From f618ab49554dba03c104ef27cea5d893063fd373 Mon Sep 17 00:00:00 2001
From: Gaurav Jain <gjn@google.com>
Date: Tue, 30 Jun 2020 16:06:49 -0700
Subject: [PATCH] Move away from deprecated asserts

- assertEquals -> assertEqual
- assertRaisesRegexp -> assertRegexpMatches
- assertRegexpMatches -> assertRegex

PiperOrigin-RevId: 319118081
Change-Id: Ieb457128522920ab55d6b69a7f244ab798a7d689
---
 tensorflow/compiler/tests/add_n_test.py       |   4 +-
 .../compiler/tests/bucketize_op_test.py       |   6 +-
 tensorflow/compiler/tests/concat_ops_test.py  |   2 +-
 tensorflow/compiler/tests/cond_test.py        |   8 +-
 tensorflow/compiler/tests/eager_test.py       |   8 +-
 .../compiler/tests/ensure_shape_op_test.py    |   4 +-
 tensorflow/compiler/tests/fifo_queue_test.py  |   2 +-
 tensorflow/compiler/tests/image_ops_test.py   |   2 +-
 tensorflow/compiler/tests/momentum_test.py    |   8 +-
 .../compiler/tests/tensor_array_ops_test.py   |   5 +-
 .../compiler/tests/tensor_list_ops_test.py    |   6 +-
 .../tests/tridiagonal_solve_ops_test.py       |   2 +-
 .../compiler/tests/variable_ops_test.py       |   4 +-
 tensorflow/compiler/tests/xla_ops_test.py     |   4 +-
 tensorflow/lite/python/interpreter_test.py    |  35 +--
 tensorflow/lite/schema/upgrade_schema_test.py |   6 +-
 .../autograph/converters/asserts_test.py      |   2 +-
 .../autograph/converters/directives_test.py   |   4 +-
 tensorflow/python/autograph/impl/api_test.py  |   2 +-
 .../autograph/lang/special_functions_test.py  |   6 +-
 .../operators/conditional_expressions_test.py |   2 +-
 .../autograph/operators/control_flow_test.py  |   4 +-
 .../autograph/operators/exceptions_test.py    |  10 +-
 .../python/autograph/pyct/transformer_test.py |   2 +-
 .../python/client/events_writer_test.py       |   2 +-
 .../python/client/session_partial_run_test.py |  22 +-
 tensorflow/python/client/session_test.py      |  40 +--
 tensorflow/python/compiler/mlir/mlir_test.py  |   4 +-
 .../compiler/tensorrt/trt_convert_test.py     |  14 +-
 .../compiler/xla/experimental_compile_test.py |   4 +-
 tensorflow/python/compiler/xla/jit_test.py    |   4 +-
 tensorflow/python/compiler/xla/xla_test.py    |   6 +-
 .../kernel_tests/assert_cardinality_test.py   |   3 +-
 .../dense_to_sparse_batch_test.py             |  10 +-
 .../directed_interleave_dataset_test.py       |  14 +-
 .../kernel_tests/get_single_element_test.py   |   2 +-
 .../kernel_tests/group_by_reducer_test.py     |   6 +-
 .../kernel_tests/group_by_window_test.py      |   2 +-
 .../make_batched_features_dataset_test.py     |   2 +-
 .../kernel_tests/make_csv_dataset_test.py     |   4 +-
 .../kernel_tests/map_and_batch_test.py        |   2 +-
 .../kernel_tests/map_defun_op_test.py         |   4 +-
 .../choose_fastest_branch_dataset_test.py     |   4 +-
 .../optimization/map_vectorization_test.py    |   4 +-
 .../kernel_tests/rebatch_dataset_test.py      |   4 +-
 .../experimental/kernel_tests/scan_test.py    |   6 +-
 .../sequence_dataset_serialization_test.py    |  12 +-
 .../stats_dataset_serialization_test.py       |   8 +-
 .../data/kernel_tests/concatenate_test.py     |   6 +-
 .../python/data/kernel_tests/dataset_test.py  |   6 +-
 .../data/kernel_tests/from_generator_test.py  |   2 +-
 .../python/data/kernel_tests/iterator_test.py |  12 +-
 .../python/data/kernel_tests/map_test.py      |   2 +-
 .../python/data/kernel_tests/options_test.py  |   3 +-
 .../data/kernel_tests/padded_batch_test.py    |  14 +-
 tensorflow/python/data/util/convert_test.py   |   4 +-
 tensorflow/python/data/util/nest_test.py      |  63 ++--
 tensorflow/python/data/util/structure_test.py |  52 ++--
 .../python/debug/cli/analyzer_cli_test.py     |  16 +-
 .../python/debug/cli/cli_shared_test.py       |   2 +-
 .../python/debug/cli/command_parser_test.py   | 113 ++++---
 tensorflow/python/debug/cli/curses_ui_test.py |   4 +-
 .../python/debug/cli/curses_widgets_test.py   |   6 +-
 .../debug/cli/debugger_cli_common_test.py     |  46 +--
 tensorflow/python/debug/cli/evaluator_test.py |  16 +-
 .../debug/cli/profile_analyzer_cli_test.py    |  14 +-
 .../python/debug/cli/readline_ui_test.py      |   4 +-
 .../python/debug/cli/tensor_format_test.py    |  49 ++-
 .../python/debug/lib/debug_data_test.py       |  12 +-
 .../debug/lib/debug_events_writer_test.py     |   4 +-
 .../python/debug/lib/debug_gradients_test.py  |  12 +-
 .../python/debug/lib/debug_graphs_test.py     |   8 +-
 .../python/debug/lib/debug_v2_ops_test.py     |   6 +-
 .../python/debug/lib/dumping_callback_test.py |  22 +-
 .../debug/lib/session_debug_grpc_test.py      |  27 +-
 .../python/debug/lib/source_utils_test.py     |   8 +-
 .../debug/wrappers/dumping_wrapper_test.py    |   8 +-
 .../python/debug/wrappers/framework_test.py   |   8 +-
 .../debug/wrappers/local_cli_wrapper_test.py  |   8 +-
 .../python/distribute/all_reduce_test.py      |   3 +-
 .../gce_cluster_resolver_test.py              |   2 +-
 .../kubernetes_cluster_resolver_test.py       |   2 +-
 .../tpu/tpu_cluster_resolver_test.py          |   4 +-
 .../custom_training_loop_input_test.py        |   4 +-
 .../python/distribute/distribute_lib_test.py  |  27 +-
 .../distribute/mirrored_strategy_test.py      |   6 +-
 .../distribute/mirrored_variable_test.py      |  10 +-
 .../multi_process_runner_no_init_test.py      |   4 +-
 .../distribute/multi_process_runner_test.py   |   6 +-
 .../distribute/multi_worker_util_test.py      |  20 +-
 .../distribute/sharded_variable_test.py       |   8 +-
 .../shared_variable_creator_test.py           |  12 +-
 .../distribute/strategy_combinations_test.py  |   2 +-
 tensorflow/python/eager/backprop_test.py      |  28 +-
 tensorflow/python/eager/core_test.py          |  10 +-
 tensorflow/python/eager/custom_device_test.py |   2 +-
 tensorflow/python/eager/def_function_test.py  |  18 +-
 .../python/eager/def_function_xla_jit_test.py |  13 +-
 tensorflow/python/eager/forwardprop_test.py   |  12 +-
 .../eager/function_defun_collection_test.py   |  12 +-
 .../python/eager/function_gradients_test.py   |   4 +-
 tensorflow/python/eager/function_test.py      | 194 ++++++------
 tensorflow/python/eager/pywrap_tfe_test.py    |  17 +-
 .../python/eager/remote_cluster_test.py       |   4 +-
 tensorflow/python/eager/tensor_test.py        |  46 ++-
 .../feature_column/feature_column_test.py     | 288 +++++++++---------
 .../feature_column/feature_column_v2_test.py  | 256 ++++++++--------
 .../sequence_feature_column_test.py           |  24 +-
 .../feature_column/serialization_test.py      |  10 +-
 .../framework/auto_control_deps_test.py       |   4 +-
 tensorflow/python/framework/config_test.py    |  48 +--
 .../python/framework/device_spec_test.py      |   4 +-
 tensorflow/python/framework/device_test.py    |   8 +-
 tensorflow/python/framework/dtypes_test.py    |  54 ++--
 .../framework/error_interpolation_test.py     |  14 +-
 tensorflow/python/framework/function_test.py  |  54 ++--
 .../python/framework/graph_util_test.py       |   2 +-
 tensorflow/python/framework/importer_test.py  |  58 ++--
 .../python/framework/memory_checker_test.py   |   2 +-
 .../python/framework/meta_graph_test.py       |   8 +-
 tensorflow/python/framework/ops_test.py       | 127 ++++----
 tensorflow/python/framework/registry_test.py  |   2 +-
 tensorflow/python/framework/subscribe_test.py |   2 +-
 .../python/framework/tensor_shape_div_test.py |   2 +-
 .../python/framework/tensor_shape_test.py     |  12 +-
 .../python/framework/tensor_spec_test.py      |   8 +-
 .../python/framework/tensor_util_test.py      |   2 +-
 .../framework/test_combinations_test.py       |   2 +-
 tensorflow/python/framework/test_util_test.py |  42 +--
 tensorflow/python/framework/versions_test.py  |   6 +-
 tensorflow/python/keras/backend_test.py       |   4 +-
 tensorflow/python/keras/callbacks_test.py     |  31 +-
 .../distribute/distribute_strategy_test.py    |  31 +-
 .../distributed_training_utils_test.py        |   4 +-
 .../distribute/keras_dnn_correctness_test.py  |  10 +-
 ...as_stateful_lstm_model_correctness_test.py |   5 +-
 .../keras/distribute/keras_utils_test.py      |  18 +-
 .../python/keras/engine/base_layer_test.py    |  50 +--
 .../keras/engine/base_layer_utils_test.py     |   4 +-
 .../python/keras/engine/data_adapter_test.py  |  27 +-
 .../python/keras/engine/functional_test.py    |  28 +-
 .../python/keras/engine/input_spec_test.py    |   8 +-
 .../python/keras/engine/sequential_test.py    |  22 +-
 .../keras/engine/training_dataset_test.py     |  10 +-
 .../keras/engine/training_generator_test.py   |   6 +-
 .../python/keras/engine/training_test.py      |  44 ++-
 .../keras/engine/training_utils_test.py       |   9 +-
 .../feature_column/dense_features_test.py     |  28 +-
 .../feature_column/dense_features_v2_test.py  |  24 +-
 .../sequence_feature_column_test.py           |  10 +-
 .../keras/integration_test/function_test.py   |   4 +-
 .../keras/layers/advanced_activations_test.py |   4 +-
 .../python/keras/layers/convolutional_test.py |   4 +-
 tensorflow/python/keras/layers/core_test.py   |  10 +-
 .../keras/layers/dense_attention_test.py      |  20 +-
 .../python/keras/layers/einsum_dense_test.py  |  10 +-
 .../python/keras/layers/kernelized_test.py    |  14 +-
 tensorflow/python/keras/layers/merge_test.py  |  40 +--
 .../python/keras/layers/normalization_test.py |  20 +-
 .../layers/preprocessing/hashing_test.py      |  14 +-
 .../preprocessing/image_preprocessing_test.py |   4 +-
 .../layers/preprocessing/table_utils_test.py  |   2 +-
 .../python/keras/layers/recurrent_test.py     |  14 +-
 .../keras/layers/rnn_cell_wrapper_v2_test.py  |   4 +-
 .../python/keras/layers/wrappers_test.py      |  17 +-
 .../keras/legacy_tf_layers/base_test.py       |  20 +-
 .../legacy_tf_layers/convolutional_test.py    |  80 ++---
 .../keras/legacy_tf_layers/pooling_test.py    |  10 +-
 tensorflow/python/keras/losses_test.py        |  18 +-
 .../keras/metrics_confusion_matrix_test.py    |  34 +--
 tensorflow/python/keras/metrics_test.py       |   4 +-
 .../experimental/autocast_variable_test.py    |  17 +-
 .../device_compatibility_check_test.py        |   4 +-
 .../experimental/get_layer_policy_test.py     |   2 +-
 .../experimental/keras_test.py                |  16 +-
 .../experimental/loss_scale_optimizer_test.py |  10 +-
 .../mixed_precision_graph_rewrite_test.py     |  10 +-
 .../experimental/policy_test.py               |  59 ++--
 tensorflow/python/keras/models_test.py        |  13 +-
 .../optimizer_v2/gradient_descent_test.py     |   2 +-
 .../keras/optimizer_v2/optimizer_v2_test.py   |  18 +-
 .../python/keras/saving/hdf5_format_test.py   |  33 +-
 tensorflow/python/keras/saving/save_test.py   |   4 +-
 .../saving/saved_model/saved_model_test.py    |   7 +-
 .../saving/saved_model_experimental_test.py   |   4 +-
 .../python/keras/saving/saving_utils_test.py  |   8 +-
 .../keras/tests/add_loss_correctness_test.py  |   4 +-
 .../keras/tests/model_subclassing_test.py     |  30 +-
 tensorflow/python/keras/tests/saver_test.py   |   4 +-
 .../python/keras/tests/summary_ops_test.py    |   4 +-
 .../python/keras/tests/tracking_test.py       |   8 +-
 .../python/keras/tests/tracking_util_test.py  |   8 +-
 .../tracking_util_with_v1_optimizers_test.py  |   6 +-
 .../python/keras/utils/version_utils_test.py  |   4 +-
 .../python/kernel_tests/array_ops_test.py     |  62 ++--
 .../python/kernel_tests/barrier_ops_test.py   |  46 +--
 .../python/kernel_tests/base64_ops_test.py    |   2 +-
 .../python/kernel_tests/benchmark_test.py     |   6 +-
 .../python/kernel_tests/betainc_op_test.py    |   2 +-
 .../python/kernel_tests/bincount_op_test.py   |   6 +-
 .../python/kernel_tests/bitcast_op_test.py    |   2 +-
 .../boosted_trees/stats_ops_test.py           |   2 +-
 .../boosted_trees/training_ops_test.py        |   4 +-
 .../kernel_tests/broadcast_to_ops_test.py     |   4 +-
 .../python/kernel_tests/bucketize_op_test.py  |   7 +-
 .../python/kernel_tests/check_ops_test.py     | 159 +++++-----
 .../python/kernel_tests/cholesky_op_test.py   |   2 +-
 .../python/kernel_tests/concat_op_test.py     |  16 +-
 .../python/kernel_tests/cond_v2_test.py       |  36 ++-
 .../kernel_tests/confusion_matrix_test.py     |   6 +-
 .../kernel_tests/constant_op_eager_test.py    |  28 +-
 .../python/kernel_tests/constant_op_test.py   |  37 ++-
 .../kernel_tests/control_flow_ops_py_test.py  |  60 ++--
 .../python/kernel_tests/conv_ops_test.py      |  22 +-
 .../kernel_tests/critical_section_test.py     |  31 +-
 .../kernel_tests/ctc_decoder_ops_test.py      |   6 +-
 .../python/kernel_tests/ctc_loss_op_test.py   |   9 +-
 .../kernel_tests/cwise_ops_binary_test.py     |   8 +-
 .../python/kernel_tests/cwise_ops_test.py     |   8 +-
 .../kernel_tests/depthtospace_op_test.py      |   2 +-
 .../python/kernel_tests/diag_op_test.py       |  10 +-
 .../distributions/bijector_test.py            |  32 +-
 .../distributions/kullback_leibler_test.py    |   4 +-
 .../distributions/student_t_test.py           |   8 +-
 .../kernel_tests/distributions/util_test.py   |   2 +-
 .../python/kernel_tests/fifo_queue_test.py    | 105 +++----
 .../kernel_tests/functional_ops_test.py       |  19 +-
 .../kernel_tests/identity_n_op_py_test.py     |   6 +-
 .../python/kernel_tests/init_ops_test.py      |   6 +-
 .../python/kernel_tests/inplace_ops_test.py   |  12 +-
 .../linalg/linear_operator_addition_test.py   |  12 +-
 .../linalg/linear_operator_adjoint_test.py    |   4 +-
 .../linalg/linear_operator_algebra_test.py    |  26 +-
 .../linalg/linear_operator_block_diag_test.py |  10 +-
 ...ar_operator_block_lower_triangular_test.py |  16 +-
 .../linalg/linear_operator_circulant_test.py  |   6 +-
 .../linear_operator_composition_test.py       |   6 +-
 .../linalg/linear_operator_diag_test.py       |   2 +-
 .../linear_operator_full_matrix_test.py       |   2 +-
 .../linear_operator_householder_test.py       |   2 +-
 .../linalg/linear_operator_identity_test.py   |  26 +-
 .../linalg/linear_operator_inversion_test.py  |   8 +-
 .../linalg/linear_operator_kronecker_test.py  |   6 +-
 .../linear_operator_low_rank_update_test.py   |  10 +-
 .../linear_operator_lower_triangular_test.py  |   2 +-
 .../linear_operator_permutation_test.py       |   8 +-
 .../linalg/linear_operator_test.py            |  14 +-
 .../linalg/linear_operator_toeplitz_test.py   |   6 +-
 .../linalg/linear_operator_util_test.py       |  10 +-
 .../linalg/linear_operator_zeros_test.py      |  26 +-
 .../python/kernel_tests/list_ops_test.py      | 132 ++++----
 .../python/kernel_tests/lookup_ops_test.py    |  72 +++--
 tensorflow/python/kernel_tests/losses_test.py |  62 ++--
 .../python/kernel_tests/manip_ops_test.py     |  38 +--
 tensorflow/python/kernel_tests/map_fn_test.py |   8 +-
 .../python/kernel_tests/matmul_op_test.py     |   4 +-
 .../python/kernel_tests/metrics_test.py       |   8 +-
 .../python/kernel_tests/norm_op_test.py       |  14 +-
 .../kernel_tests/nth_element_op_test.py       |  12 +-
 .../python/kernel_tests/numerics_test.py      |  10 +-
 tensorflow/python/kernel_tests/pad_op_test.py |   6 +-
 .../kernel_tests/padding_fifo_queue_test.py   |  47 +--
 .../python/kernel_tests/parsing_ops_test.py   |   4 +-
 .../partitioned_variables_test.py             |   2 +-
 .../python/kernel_tests/pooling_ops_test.py   |   8 +-
 .../kernel_tests/priority_queue_test.py       |   4 +-
 .../python/kernel_tests/py_func_test.py       |  14 +-
 tensorflow/python/kernel_tests/qr_op_test.py  |   8 +-
 .../kernel_tests/random/random_ops_test.py    |   4 +-
 .../random/random_shuffle_queue_test.py       |  44 +--
 .../kernel_tests/reduce_join_op_test.py       |  12 +-
 .../python/kernel_tests/relu_op_test.py       |   4 +-
 .../python/kernel_tests/reshape_op_test.py    |   6 +-
 .../resource_variable_ops_test.py             |  18 +-
 .../kernel_tests/reverse_sequence_op_test.py  |  30 +-
 .../python/kernel_tests/rnn_cell_test.py      |  11 +-
 tensorflow/python/kernel_tests/rnn_test.py    |  18 +-
 .../segment_reduction_ops_test.py             |   2 +-
 tensorflow/python/kernel_tests/sets_test.py   |  10 +-
 .../python/kernel_tests/softplus_op_test.py   |   2 +-
 .../python/kernel_tests/softsign_op_test.py   |   2 +-
 .../python/kernel_tests/sparse_add_op_test.py |   4 +-
 .../sparse_conditional_accumulator_test.py    |  48 +--
 .../kernel_tests/sparse_cross_op_test.py      |  34 +--
 .../python/kernel_tests/sparse_ops_test.py    |   4 +-
 .../kernel_tests/sparse_reshape_op_test.py    |   6 +-
 .../kernel_tests/sparse_split_op_test.py      |   8 +-
 .../sparse_tensor_dense_matmul_op_test.py     |   2 +-
 .../kernel_tests/sparse_xent_op_test.py       |   8 +-
 .../python/kernel_tests/split_op_test.py      |   8 +-
 .../python/kernel_tests/stack_op_test.py      |   4 +-
 .../string_bytes_split_op_test.py             |   4 +-
 .../kernel_tests/string_format_op_test.py     |  12 +-
 .../kernel_tests/string_length_op_test.py     |   2 +-
 .../kernel_tests/string_split_op_test.py      |   4 +-
 .../python/kernel_tests/summary_ops_test.py   |  11 +-
 tensorflow/python/kernel_tests/svd_op_test.py |   8 +-
 .../python/kernel_tests/template_test.py      |  50 +--
 .../kernel_tests/tensor_array_ops_test.py     |  12 +-
 .../python/kernel_tests/tensordot_op_test.py  |   4 +-
 .../python/kernel_tests/topk_op_test.py       |   4 +-
 .../kernel_tests/unicode_decode_op_test.py    |   8 +-
 .../kernel_tests/unicode_encode_op_test.py    |   2 +-
 .../kernel_tests/unicode_transcode_op_test.py |   4 +-
 .../python/kernel_tests/unstack_op_test.py    |  12 +-
 .../kernel_tests/variable_scope_test.py       |  42 ++-
 .../python/kernel_tests/variables_test.py     |  21 +-
 .../kernel_tests/weights_broadcast_test.py    |   8 +-
 .../python/kernel_tests/while_v2_test.py      |  18 +-
 .../python/kernel_tests/xent_op_test.py       |   6 +-
 tensorflow/python/lib/io/tf_record_test.py    |   8 +-
 tensorflow/python/module/module_test.py       |   4 +-
 tensorflow/python/ops/batch_ops_test.py       |   4 +-
 tensorflow/python/ops/bincount_ops_test.py    |  34 +--
 tensorflow/python/ops/clustering_ops_test.py  |   2 +-
 .../python/ops/collective_ops_gpu_test.py     |  10 +-
 tensorflow/python/ops/collective_ops_test.py  |  12 +-
 .../python/ops/control_flow_ops_test.py       |  61 ++--
 .../python/ops/gradient_checker_test.py       |   6 +-
 .../python/ops/gradient_checker_v2_test.py    |   4 +-
 tensorflow/python/ops/gradients_test.py       |  19 +-
 tensorflow/python/ops/histogram_ops_test.py   |  15 +-
 tensorflow/python/ops/image_ops_test.py       |  48 +--
 tensorflow/python/ops/init_ops_v2_test.py     |   6 +-
 tensorflow/python/ops/math_ops_test.py        |  27 +-
 tensorflow/python/ops/nccl_ops_test.py        |   6 +-
 tensorflow/python/ops/nn_batchnorm_test.py    |   8 +-
 .../ops/nn_loss_scaling_utilities_test.py     |   6 +-
 tensorflow/python/ops/nn_test.py              |   2 +-
 tensorflow/python/ops/nn_xent_test.py         |   4 +-
 .../python/ops/numpy_ops/np_utils_test.py     |   8 +-
 .../python/ops/parallel_for/array_test.py     |   4 +-
 .../ops/parallel_for/control_flow_ops_test.py |  16 +-
 .../python/ops/parallel_for/gradients_test.py |   6 +-
 ...vert_to_tensor_or_ragged_tensor_op_test.py |   8 +-
 .../ops/ragged/ragged_batch_gather_op_test.py |  12 +-
 .../ops/ragged/ragged_boolean_mask_op_test.py |  37 +--
 .../ops/ragged/ragged_concat_op_test.py       |   8 +-
 .../python/ops/ragged/ragged_const_op_test.py |  10 +-
 .../ragged/ragged_constant_value_op_test.py   |   2 +-
 .../python/ops/ragged/ragged_cross_op_test.py |   4 +-
 .../python/ops/ragged/ragged_dispatch_test.py |   4 +-
 .../ragged_dynamic_partition_op_test.py       |  12 +-
 .../ops/ragged/ragged_from_sparse_op_test.py  |  24 +-
 .../ops/ragged/ragged_from_tensor_op_test.py  |   4 +-
 .../ops/ragged/ragged_gather_nd_op_test.py    |   8 +-
 .../ops/ragged/ragged_gather_op_test.py       |  18 +-
 .../python/ops/ragged/ragged_getitem_test.py  |   4 +-
 .../ragged/ragged_map_flat_values_op_test.py  |  19 +-
 .../ops/ragged/ragged_map_fn_op_test.py       |   4 +-
 .../ops/ragged/ragged_merge_dims_op_test.py   |   2 +-
 .../ops/ragged/ragged_one_hot_op_test.py      |   2 +-
 .../ops/ragged/ragged_operators_test.py       |   8 +-
 .../python/ops/ragged/ragged_range_op_test.py |   4 +-
 .../ops/ragged/ragged_reduce_op_test.py       |  10 +-
 .../ops/ragged/ragged_reverse_op_test.py      |   5 +-
 .../ops/ragged/ragged_row_lengths_op_test.py  |   2 +-
 ...agged_row_splits_to_segment_ids_op_test.py |  19 +-
 ...agged_segment_ids_to_row_splits_op_test.py |  16 +-
 .../ops/ragged/ragged_segment_op_test.py      |   6 +-
 .../python/ops/ragged/ragged_stack_op_test.py |   4 +-
 .../ops/ragged/ragged_tensor_shape_test.py    |   5 +-
 .../ops/ragged/ragged_to_sparse_op_test.py    |  14 +-
 .../ops/ragged/ragged_to_tensor_op_test.py    |   4 +-
 .../python/ops/ragged/ragged_util_test.py     |   2 +-
 .../python/ops/ragged/ragged_where_op_test.py |   2 +-
 .../python/ops/ragged/row_partition_test.py   |  40 ++-
 .../ops/ragged/string_ngrams_op_test.py       |   4 +-
 tensorflow/python/ops/raw_ops_test.py         |   4 +-
 .../structured_tensor_slice_test.py           |   4 +-
 .../structured/structured_tensor_spec_test.py |   2 +-
 .../ops/structured/structured_tensor_test.py  |  24 +-
 .../python/profiler/pprof_profiler_test.py    |  22 +-
 .../python/profiler/tfprof_logger_test.py     |   8 +-
 tensorflow/python/saved_model/load_test.py    |  32 +-
 .../python/saved_model/load_v1_in_v2_test.py  |   8 +-
 tensorflow/python/saved_model/loader_test.py  |   2 +-
 .../model_utils/export_output_test.py         |  26 +-
 .../saved_model/model_utils/mode_keys_test.py |   4 +-
 .../nested_structure_coder_test.py            |   4 +-
 tensorflow/python/saved_model/save_test.py    |  27 +-
 .../python/saved_model/saved_model_test.py    |  25 +-
 tensorflow/python/saved_model/utils_test.py   |   2 +-
 tensorflow/python/summary/summary_test.py     |  20 +-
 .../python/summary/writer/writer_test.py      |  50 +--
 .../python/tools/saved_model_cli_test.py      |  10 +-
 .../python/tools/saved_model_utils_test.py    |   2 +-
 .../python/tpu/feature_column_v2_test.py      |   9 +-
 tensorflow/python/tpu/tpu_test.py             |   4 +-
 tensorflow/python/training/adadelta_test.py   |   8 +-
 tensorflow/python/training/adagrad_test.py    |   4 +-
 .../python/training/basic_loops_test.py       |   6 +-
 .../training/basic_session_run_hooks_test.py  |  38 ++-
 .../training/checkpoint_management_test.py    |   4 +-
 .../python/training/coordinator_test.py       |  20 +-
 .../experimental/loss_scale_optimizer_test.py |   2 +-
 .../loss_scaling_gradient_tape_test.py        |   8 +-
 .../experimental/mixed_precision_test.py      |   8 +-
 tensorflow/python/training/input_test.py      |  56 ++--
 tensorflow/python/training/momentum_test.py   |  20 +-
 .../python/training/monitored_session_test.py |  48 +--
 tensorflow/python/training/optimizer_test.py  |  12 +-
 .../python/training/queue_runner_test.py      |   6 +-
 .../training/saver_large_variable_test.py     |   4 +-
 tensorflow/python/training/saver_test.py      |  36 +--
 tensorflow/python/training/server_lib_test.py |  30 +-
 .../python/training/session_manager_test.py   |  64 ++--
 tensorflow/python/training/supervisor_test.py |  16 +-
 .../training/sync_replicas_optimizer_test.py  |   3 +-
 .../python/training/tracking/base_test.py     |   6 +-
 .../training/tracking/data_structures_test.py |   8 +-
 .../python/training/tracking/tracking_test.py |   8 +-
 .../python/training/tracking/util_test.py     |   9 +-
 .../tracking/util_with_v1_optimizers_test.py  |   2 +-
 .../python/training/training_util_test.py     |  24 +-
 tensorflow/python/util/deprecation_test.py    |  85 +++---
 tensorflow/python/util/dispatch_test.py       |  14 +-
 tensorflow/python/util/function_utils_test.py |  19 +-
 tensorflow/python/util/keyword_args_test.py   |   4 +-
 tensorflow/python/util/nest_test.py           |  93 +++---
 .../python/util/protobuf/compare_test.py      |  68 ++---
 tensorflow/python/util/tf_export_test.py      |  64 ++--
 tensorflow/python/util/tf_inspect_test.py     |   4 +-
 tensorflow/python/util/tf_should_use_test.py  |   4 +-
 .../tools/compatibility/ast_edits_test.py     |   5 +-
 425 files changed, 3462 insertions(+), 3619 deletions(-)

diff --git a/tensorflow/compiler/tests/add_n_test.py b/tensorflow/compiler/tests/add_n_test.py
index 40e6bea0cc5..69c7737ad12 100644
--- a/tensorflow/compiler/tests/add_n_test.py
+++ b/tensorflow/compiler/tests/add_n_test.py
@@ -50,7 +50,7 @@ class XlaAddNTest(xla_test.XLATestCase):
       l2 = list_ops.tensor_list_reserve(
           element_shape=[], element_dtype=dtypes.float32, num_elements=3)
       l = math_ops.add_n([l1, l2])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "TensorList arguments to AddN must all have the same shape"):
         list_ops.tensor_list_stack(l, element_dtype=dtypes.float32).eval()
@@ -70,7 +70,7 @@ class XlaAddNTest(xla_test.XLATestCase):
           element_dtype=dtypes.float32,
           num_elements=3)
       l = math_ops.add_n([l1, l2])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "TensorList arguments to AddN must all have the same shape"):
         session.run(
diff --git a/tensorflow/compiler/tests/bucketize_op_test.py b/tensorflow/compiler/tests/bucketize_op_test.py
index f6b6d773135..a1272fe045a 100644
--- a/tensorflow/compiler/tests/bucketize_op_test.py
+++ b/tensorflow/compiler/tests/bucketize_op_test.py
@@ -64,13 +64,13 @@ class BucketizationOpTest(xla_test.XLATestCase):
       p = array_ops.placeholder(dtypes.int32)
       with self.test_scope():
         op = math_ops._bucketize(p, boundaries=[0, 8, 3, 11])
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Expected sorted boundaries"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Expected sorted boundaries"):
         sess.run(op, {p: [-5, 0]})
 
   def testBoundariesNotList(self):
     with self.session():
-      with self.assertRaisesRegexp(TypeError, "Expected list.*"):
+      with self.assertRaisesRegex(TypeError, "Expected list.*"):
         p = array_ops.placeholder(dtypes.int32)
         with self.test_scope():
           math_ops._bucketize(p, boundaries=0)
diff --git a/tensorflow/compiler/tests/concat_ops_test.py b/tensorflow/compiler/tests/concat_ops_test.py
index f35ded924d5..310be97f2d9 100644
--- a/tensorflow/compiler/tests/concat_ops_test.py
+++ b/tensorflow/compiler/tests/concat_ops_test.py
@@ -288,7 +288,7 @@ class ConcatTest(xla_test.XLATestCase):
       with self.test_scope():
         scalar = constant_op.constant(7)
         dim = array_ops.placeholder(dtypes.int32)
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
           array_ops.concat([scalar, scalar, scalar], dim)
 
diff --git a/tensorflow/compiler/tests/cond_test.py b/tensorflow/compiler/tests/cond_test.py
index a28c2c5ca88..701181f577c 100644
--- a/tensorflow/compiler/tests/cond_test.py
+++ b/tensorflow/compiler/tests/cond_test.py
@@ -175,8 +175,8 @@ class CondTest(xla_test.XLATestCase):
       output = control_flow_ops.cond(
           constant_op.constant(True), if_true, if_false)
 
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "must be a compile-time constant"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "must be a compile-time constant"):
         sess.run(
             output, feed_dict={
                 x: [0., 1., 2.],
@@ -209,8 +209,8 @@ class CondTest(xla_test.XLATestCase):
 
       output = xla.compile(f)
 
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "must be a compile-time constant"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "must be a compile-time constant"):
         sess.run(
             output, feed_dict={
                 x: [0., 1., 2.],
diff --git a/tensorflow/compiler/tests/eager_test.py b/tensorflow/compiler/tests/eager_test.py
index 0ed81b7e9e5..520348e0f8a 100644
--- a/tensorflow/compiler/tests/eager_test.py
+++ b/tensorflow/compiler/tests/eager_test.py
@@ -704,8 +704,8 @@ class EagerFunctionTest(xla_test.XLATestCase):
       self.assertAllEqual([0.0, 4.0], r_y)
       if context.executing_eagerly():
         # backing_device is only available for eager tensors.
-        self.assertRegexpMatches(r_x.backing_device, self.device)
-        self.assertRegexpMatches(r_y.backing_device, self.device)
+        self.assertRegex(r_x.backing_device, self.device)
+        self.assertRegex(r_y.backing_device, self.device)
 
       # When function is executed op-by-op, requested devices will be
       # respected.
@@ -714,8 +714,8 @@ class EagerFunctionTest(xla_test.XLATestCase):
       self.assertAllEqual([0.0, 4.0], r_y)
       if context.executing_eagerly():
         # backing_device is only available for eager tensors.
-        self.assertRegexpMatches(r_x.backing_device, self.device)
-        self.assertRegexpMatches(r_y.backing_device, 'device:CPU:0')
+        self.assertRegex(r_x.backing_device, self.device)
+        self.assertRegex(r_y.backing_device, 'device:CPU:0')
 
 
 class ExcessivePaddingTest(xla_test.XLATestCase):
diff --git a/tensorflow/compiler/tests/ensure_shape_op_test.py b/tensorflow/compiler/tests/ensure_shape_op_test.py
index 95de5a9c49b..328d0bb6c01 100644
--- a/tensorflow/compiler/tests/ensure_shape_op_test.py
+++ b/tensorflow/compiler/tests/ensure_shape_op_test.py
@@ -42,8 +42,8 @@ class EnsureShapeOpTest(xla_test.XLATestCase):
       p = array_ops.placeholder(dtypes.int32)
       with self.test_scope():
         op = check_ops.ensure_shape(p, (None, 3, 3))
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "is not compatible with expected shape"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "is not compatible with expected shape"):
         sess.run(op, {p: [[0, 1, 2], [3, 4, 5], [6, 7, 8]]})
 
 
diff --git a/tensorflow/compiler/tests/fifo_queue_test.py b/tensorflow/compiler/tests/fifo_queue_test.py
index ba80fa0a0b2..ef24b927ad4 100644
--- a/tensorflow/compiler/tests/fifo_queue_test.py
+++ b/tensorflow/compiler/tests/fifo_queue_test.py
@@ -66,7 +66,7 @@ class FIFOQueueTest(xla_test.XLATestCase):
   def testEnqueueDictWithoutNames(self):
     with self.session(), self.test_scope():
       q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
-      with self.assertRaisesRegexp(ValueError, "must have names"):
+      with self.assertRaisesRegex(ValueError, "must have names"):
         q.enqueue({"a": 12.0})
 
   def testParallelEnqueue(self):
diff --git a/tensorflow/compiler/tests/image_ops_test.py b/tensorflow/compiler/tests/image_ops_test.py
index 81779203955..9590688fda7 100644
--- a/tensorflow/compiler/tests/image_ops_test.py
+++ b/tensorflow/compiler/tests/image_ops_test.py
@@ -297,7 +297,7 @@ class AdjustHueTest(xla_test.XLATestCase):
     x_np = np.random.rand(2, 3) * 255.
     delta_h = np.random.rand() * 2.0 - 1.0
     fused = False
-    with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
+    with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"):
       self._adjustHueTf(x_np, delta_h)
     x_np = np.random.rand(4, 2, 4) * 255.
     delta_h = np.random.rand() * 2.0 - 1.0
diff --git a/tensorflow/compiler/tests/momentum_test.py b/tensorflow/compiler/tests/momentum_test.py
index dc4ccd52624..5f061fa0595 100644
--- a/tensorflow/compiler/tests/momentum_test.py
+++ b/tensorflow/compiler/tests/momentum_test.py
@@ -54,10 +54,10 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
         # Check we have slots
         self.assertEqual(["momentum"], mom_opt.get_slot_names())
         slot0 = mom_opt.get_slot(var0, "momentum")
-        self.assertEquals(slot0.get_shape(), var0.get_shape())
+        self.assertEqual(slot0.get_shape(), var0.get_shape())
         self.assertFalse(slot0 in variables.trainable_variables())
         slot1 = mom_opt.get_slot(var1, "momentum")
-        self.assertEquals(slot1.get_shape(), var1.get_shape())
+        self.assertEqual(slot1.get_shape(), var1.get_shape())
         self.assertFalse(slot1 in variables.trainable_variables())
 
         # Fetch params to validate initial values
@@ -140,10 +140,10 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
         # Check we have slots
         self.assertEqual(["momentum"], mom_opt.get_slot_names())
         slot0 = mom_opt.get_slot(var0, "momentum")
-        self.assertEquals(slot0.get_shape(), var0.get_shape())
+        self.assertEqual(slot0.get_shape(), var0.get_shape())
         self.assertFalse(slot0 in variables.trainable_variables())
         slot1 = mom_opt.get_slot(var1, "momentum")
-        self.assertEquals(slot1.get_shape(), var1.get_shape())
+        self.assertEqual(slot1.get_shape(), var1.get_shape())
         self.assertFalse(slot1 in variables.trainable_variables())
 
         # Fetch params to validate initial values
diff --git a/tensorflow/compiler/tests/tensor_array_ops_test.py b/tensorflow/compiler/tests/tensor_array_ops_test.py
index c07bcbef5b4..665d396182a 100644
--- a/tensorflow/compiler/tests/tensor_array_ops_test.py
+++ b/tensorflow/compiler/tests/tensor_array_ops_test.py
@@ -393,9 +393,8 @@ class TensorArrayTest(xla_test.XLATestCase):
       # Test writing the wrong datatype.
       # TODO(b/129870929): Remove InvalidArgumentError/second regexp after all
       # callers provide proper init dtype.
-      with self.assertRaisesRegexp(
-          (ValueError, errors.InvalidArgumentError),
-          r"("
+      with self.assertRaisesRegex(
+          (ValueError, errors.InvalidArgumentError), r"("
           r"conversion requested dtype float32 for Tensor with dtype int32"
           r"|"
           r"TensorArray dtype is float but op has dtype int32"
diff --git a/tensorflow/compiler/tests/tensor_list_ops_test.py b/tensorflow/compiler/tests/tensor_list_ops_test.py
index d49a6a37785..4a6cb4a11f8 100644
--- a/tensorflow/compiler/tests/tensor_list_ops_test.py
+++ b/tensorflow/compiler/tests/tensor_list_ops_test.py
@@ -103,8 +103,8 @@ class ListOpsTest(parameterized.TestCase, xla_test.XLATestCase):
       l = list_ops.tensor_list_push_back(
           l, constant_op.constant(1.0, shape=(7, 15)))
       _, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Set the max number of elements"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Set the max number of elements"):
         self.assertAllEqual(sess.run(e), 1.0 * np.ones((7, 15)))
 
   def testEmptyTensorListMax(self):
@@ -174,7 +174,7 @@ class ListOpsTest(parameterized.TestCase, xla_test.XLATestCase):
           element_dtype=dtypes.float32, element_shape=None, max_num_elements=2)
       l = list_ops.tensor_list_push_back(l, [3.0, 4.0])
       # Pushing an element with a different shape should raise an error.
-      with self.assertRaisesRegexp(errors.InternalError, "shape"):
+      with self.assertRaisesRegex(errors.InternalError, "shape"):
         l = list_ops.tensor_list_push_back(l, 5.)
         self.evaluate(
             list_ops.tensor_list_stack(l, element_dtype=dtypes.float32))
diff --git a/tensorflow/compiler/tests/tridiagonal_solve_ops_test.py b/tensorflow/compiler/tests/tridiagonal_solve_ops_test.py
index 0fe745f869a..e462211e5dd 100644
--- a/tensorflow/compiler/tests/tridiagonal_solve_ops_test.py
+++ b/tensorflow/compiler/tests/tridiagonal_solve_ops_test.py
@@ -223,7 +223,7 @@ class TridiagonalSolveOpsTest(xla_test.XLATestCase):
                                     num_rhs)).astype(np.float32)
 
     with self.session() as sess, self.test_scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.UnimplementedError,
           "Current implementation does not yet support pivoting."):
         diags = array_ops.placeholder(
diff --git a/tensorflow/compiler/tests/variable_ops_test.py b/tensorflow/compiler/tests/variable_ops_test.py
index 2514a0a9dc4..ad7e29c6def 100644
--- a/tensorflow/compiler/tests/variable_ops_test.py
+++ b/tensorflow/compiler/tests/variable_ops_test.py
@@ -485,8 +485,8 @@ class SliceAssignTest(xla_test.XLATestCase):
       checker2[None] = [6]  # new axis
 
   def testUninitialized(self):
-    with self.assertRaisesRegexp(errors.FailedPreconditionError,
-                                 "uninitialized"):
+    with self.assertRaisesRegex(errors.FailedPreconditionError,
+                                "uninitialized"):
       with self.session() as sess, self.test_scope():
         v = resource_variable_ops.ResourceVariable([1, 2])
         sess.run(v[:].assign([1, 2]))
diff --git a/tensorflow/compiler/tests/xla_ops_test.py b/tensorflow/compiler/tests/xla_ops_test.py
index 35d36315464..0d6ae81ef6e 100644
--- a/tensorflow/compiler/tests/xla_ops_test.py
+++ b/tensorflow/compiler/tests/xla_ops_test.py
@@ -343,7 +343,7 @@ class XlaOpsNumericalTest(xla_test.XLATestCase, parameterized.TestCase):
             np.array([5, 7]), np.array([2, 3, 4]))
       with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
         session.run(output)
-      self.assertRegexpMatches(
+      self.assertRegex(
           invalid_arg_error.exception.message,
           (r'start_indices must be a vector with length equal to input rank, '
            r'but input rank is 3 and start_indices has shape \[2\].*'))
@@ -357,7 +357,7 @@ class XlaOpsNumericalTest(xla_test.XLATestCase, parameterized.TestCase):
             np.array([5, 7, 3]), np.array([2, 3]))
       with self.assertRaises(errors.InvalidArgumentError) as invalid_arg_error:
         session.run(output)
-      self.assertRegexpMatches(
+      self.assertRegex(
           invalid_arg_error.exception.message,
           (r'size_indices must be a vector with length equal to input rank, '
            r'but input rank is 3 and size_indices has shape \[2\].*'))
diff --git a/tensorflow/lite/python/interpreter_test.py b/tensorflow/lite/python/interpreter_test.py
index 770c9dc3090..cc74f4d8fbc 100644
--- a/tensorflow/lite/python/interpreter_test.py
+++ b/tensorflow/lite/python/interpreter_test.py
@@ -52,7 +52,7 @@ class InterpreterCustomOpsTest(test_util.TensorFlowTestCase):
 
   def testRegistererFailure(self):
     bogus_name = 'CompletelyBogusRegistererName'
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Looking up symbol \'' + bogus_name + '\' failed'):
       interpreter_wrapper.InterpreterWithCustomOps(
           model_path=resource_loader.get_path_to_datafile(
@@ -69,15 +69,14 @@ class InterpreterTest(test_util.TensorFlowTestCase):
     self.assertEqual(quantized_dimension, params['quantized_dimension'])
 
   def testThreads_NegativeValue(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'num_threads should >= 1'):
+    with self.assertRaisesRegex(ValueError, 'num_threads should >= 1'):
       interpreter_wrapper.Interpreter(
           model_path=resource_loader.get_path_to_datafile(
               'testdata/permute_float.tflite'), num_threads=-1)
 
   def testThreads_WrongType(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'type of num_threads should be int'):
+    with self.assertRaisesRegex(ValueError,
+                                'type of num_threads should be int'):
       interpreter_wrapper.Interpreter(
           model_path=resource_loader.get_path_to_datafile(
               'testdata/permute_float.tflite'), num_threads=4.2)
@@ -261,13 +260,13 @@ class InterpreterTest(test_util.TensorFlowTestCase):
 class InterpreterTestErrorPropagation(test_util.TensorFlowTestCase):
 
   def testInvalidModelContent(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'Model provided has model identifier \''):
+    with self.assertRaisesRegex(ValueError,
+                                'Model provided has model identifier \''):
       interpreter_wrapper.Interpreter(model_content=six.b('garbage'))
 
   def testInvalidModelFile(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'Could not open \'totally_invalid_file_name\''):
+    with self.assertRaisesRegex(ValueError,
+                                'Could not open \'totally_invalid_file_name\''):
       interpreter_wrapper.Interpreter(
           model_path='totally_invalid_file_name')
 
@@ -275,12 +274,12 @@ class InterpreterTestErrorPropagation(test_util.TensorFlowTestCase):
     interpreter = interpreter_wrapper.Interpreter(
         model_path=resource_loader.get_path_to_datafile(
             'testdata/permute_float.tflite'))
-    with self.assertRaisesRegexp(RuntimeError,
-                                 'Invoke called on model that is not ready'):
+    with self.assertRaisesRegex(RuntimeError,
+                                'Invoke called on model that is not ready'):
       interpreter.invoke()
 
   def testInvalidModelFileContent(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, '`model_path` or `model_content` must be specified.'):
       interpreter_wrapper.Interpreter(model_path=None, model_content=None)
 
@@ -290,9 +289,9 @@ class InterpreterTestErrorPropagation(test_util.TensorFlowTestCase):
             'testdata/permute_float.tflite'))
     interpreter.allocate_tensors()
     # Invalid tensor index passed.
-    with self.assertRaisesRegexp(ValueError, 'Tensor with no shape found.'):
+    with self.assertRaisesRegex(ValueError, 'Tensor with no shape found.'):
       interpreter._get_tensor_details(4)
-    with self.assertRaisesRegexp(ValueError, 'Invalid node index'):
+    with self.assertRaisesRegex(ValueError, 'Invalid node index'):
       interpreter._get_op_details(4)
 
 
@@ -339,12 +338,10 @@ class InterpreterTensorAccessorTest(test_util.TensorFlowTestCase):
   def testBaseProtectsFunctions(self):
     in0 = self.interpreter.tensor(self.input0)()
     # Make sure we get an exception if we try to run an unsafe operation
-    with self.assertRaisesRegexp(
-        RuntimeError, 'There is at least 1 reference'):
+    with self.assertRaisesRegex(RuntimeError, 'There is at least 1 reference'):
       _ = self.interpreter.allocate_tensors()
     # Make sure we get an exception if we try to run an unsafe operation
-    with self.assertRaisesRegexp(
-        RuntimeError, 'There is at least 1 reference'):
+    with self.assertRaisesRegex(RuntimeError, 'There is at least 1 reference'):
       _ = self.interpreter.invoke()
     # Now test that we can run
     del in0  # this is our only buffer reference, so now it is safe to change
@@ -483,7 +480,7 @@ class InterpreterDelegateTest(test_util.TensorFlowTestCase):
     self.assertEqual(lib.get_options_counter(), 2)
 
   def testFail(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         # Due to exception chaining in PY3, we can't be more specific here and check that
         # the phrase 'Fail argument sent' is present.
         ValueError,
diff --git a/tensorflow/lite/schema/upgrade_schema_test.py b/tensorflow/lite/schema/upgrade_schema_test.py
index 922968c65aa..e55925053e0 100644
--- a/tensorflow/lite/schema/upgrade_schema_test.py
+++ b/tensorflow/lite/schema/upgrade_schema_test.py
@@ -255,17 +255,17 @@ class TestSchemaUpgrade(test_util.TensorFlowTestCase):
   def testNonExistentFile(self):
     converter = upgrade_schema_lib.Converter()
     non_existent = tempfile.mktemp(suffix=".json")
-    with self.assertRaisesRegexp(IOError, "No such file or directory"):
+    with self.assertRaisesRegex(IOError, "No such file or directory"):
       converter.Convert(non_existent, non_existent)
 
   def testInvalidExtension(self):
     converter = upgrade_schema_lib.Converter()
     invalid_extension = tempfile.mktemp(suffix=".foo")
-    with self.assertRaisesRegexp(ValueError, "Invalid extension on input"):
+    with self.assertRaisesRegex(ValueError, "Invalid extension on input"):
       converter.Convert(invalid_extension, invalid_extension)
     with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as in_json:
       JsonDumpAndFlush(EMPTY_TEST_SCHEMA_V1, in_json)
-      with self.assertRaisesRegexp(ValueError, "Invalid extension on output"):
+      with self.assertRaisesRegex(ValueError, "Invalid extension on output"):
         converter.Convert(in_json.name, invalid_extension)
 
   def CheckConversion(self, data_old, data_expected):
diff --git a/tensorflow/python/autograph/converters/asserts_test.py b/tensorflow/python/autograph/converters/asserts_test.py
index bf063829e42..2e71c709e90 100644
--- a/tensorflow/python/autograph/converters/asserts_test.py
+++ b/tensorflow/python/autograph/converters/asserts_test.py
@@ -38,7 +38,7 @@ class AssertsTest(converter_testing.TestCase):
     tr = self.transform(f, (functions, asserts, return_statements))
 
     op = tr(constant_op.constant(False))
-    with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, 'testmsg'):
+    with self.assertRaisesRegex(errors_impl.InvalidArgumentError, 'testmsg'):
       self.evaluate(op)
 
 
diff --git a/tensorflow/python/autograph/converters/directives_test.py b/tensorflow/python/autograph/converters/directives_test.py
index ac8730fe185..0e39edfd8f7 100644
--- a/tensorflow/python/autograph/converters/directives_test.py
+++ b/tensorflow/python/autograph/converters/directives_test.py
@@ -77,7 +77,7 @@ class DirectivesTest(converter_testing.TestCase):
       directives.set_loop_options()
       pass
 
-    with self.assertRaisesRegexp(ValueError, 'must be used inside a statement'):
+    with self.assertRaisesRegex(ValueError, 'must be used inside a statement'):
       self.transform(f, directives_converter, include_ast=True)
 
   def test_loop_target_not_first(self):
@@ -88,7 +88,7 @@ class DirectivesTest(converter_testing.TestCase):
         a = 2
         directives.set_loop_options(parallel_iterations=10, back_prop=a)
 
-    with self.assertRaisesRegexp(ValueError, 'must be the first statement'):
+    with self.assertRaisesRegex(ValueError, 'must be the first statement'):
       self.transform(f, directives_converter, include_ast=True)
 
   def test_value_verification_does_not_trigger_properties(self):
diff --git a/tensorflow/python/autograph/impl/api_test.py b/tensorflow/python/autograph/impl/api_test.py
index 146cca2f2eb..118258b3b91 100644
--- a/tensorflow/python/autograph/impl/api_test.py
+++ b/tensorflow/python/autograph/impl/api_test.py
@@ -587,7 +587,7 @@ class ApiTest(test.TestCase):
     opts = converter.ConversionOptions(internal_convert_user_code=False)
 
     # f should not be converted, causing len to error out.
-    with self.assertRaisesRegexp(Exception, 'len is not well defined'):
+    with self.assertRaisesRegex(Exception, 'len is not well defined'):
       api.converted_call(f, (constant_op.constant([0]),), None, options=opts)
 
     # len on the other hand should work fine.
diff --git a/tensorflow/python/autograph/lang/special_functions_test.py b/tensorflow/python/autograph/lang/special_functions_test.py
index 8d40f4036c5..ff72468d6f3 100644
--- a/tensorflow/python/autograph/lang/special_functions_test.py
+++ b/tensorflow/python/autograph/lang/special_functions_test.py
@@ -62,12 +62,12 @@ class SpecialFunctionsTest(test.TestCase):
       self.assertAllEqual(self.evaluate(sl), [])
 
   def test_tensor_list_unsupported_initializer(self):
-    with self.assertRaisesRegexp(ValueError, 'unknown type'):
+    with self.assertRaisesRegex(ValueError, 'unknown type'):
       special_functions.tensor_list(np.array([1, 2, 3]))
 
   def test_tensor_list_empty_list_no_type(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'element_dtype and element_shape are required'):
+    with self.assertRaisesRegex(ValueError,
+                                'element_dtype and element_shape are required'):
       special_functions.tensor_list([])
 
   def test_tensor_list_from_elements(self):
diff --git a/tensorflow/python/autograph/operators/conditional_expressions_test.py b/tensorflow/python/autograph/operators/conditional_expressions_test.py
index 3f126116023..2e28e1794f8 100644
--- a/tensorflow/python/autograph/operators/conditional_expressions_test.py
+++ b/tensorflow/python/autograph/operators/conditional_expressions_test.py
@@ -48,7 +48,7 @@ class IfExpTest(test.TestCase):
       conditional_expressions.if_exp(
           constant_op.constant(True), lambda: 1.0, lambda: 2, 'expr_repr')
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         "'expr_repr' has dtype float32 in the main.*int32 in the else"):
       test_fn()
diff --git a/tensorflow/python/autograph/operators/control_flow_test.py b/tensorflow/python/autograph/operators/control_flow_test.py
index 57288be9a9f..ce9b1181e05 100644
--- a/tensorflow/python/autograph/operators/control_flow_test.py
+++ b/tensorflow/python/autograph/operators/control_flow_test.py
@@ -685,7 +685,7 @@ class WhileLoopTest(test.TestCase):
     if not __debug__:
       self.skipTest('Feature disabled in optimized mode.')
     with test.mock.patch.object(control_flow, 'PYTHON_MAX_ITERATIONS', 100):
-      with self.assertRaisesRegexp(ValueError, 'iteration limit'):
+      with self.assertRaisesRegex(ValueError, 'iteration limit'):
         control_flow.while_stmt(
             test=lambda: True,
             body=lambda: None,
@@ -698,7 +698,7 @@ class WhileLoopTest(test.TestCase):
     if not __debug__:
       self.skipTest('Feature disabled in optimized mode.')
     with test.mock.patch.object(control_flow, 'PYTHON_MAX_ITERATIONS', 100):
-      with self.assertRaisesRegexp(ValueError, 'iteration limit'):
+      with self.assertRaisesRegex(ValueError, 'iteration limit'):
         control_flow.for_stmt(
             iter_=range(101),
             extra_test=None,
diff --git a/tensorflow/python/autograph/operators/exceptions_test.py b/tensorflow/python/autograph/operators/exceptions_test.py
index 21ba76bb952..4218db4312b 100644
--- a/tensorflow/python/autograph/operators/exceptions_test.py
+++ b/tensorflow/python/autograph/operators/exceptions_test.py
@@ -40,8 +40,8 @@ class ExceptionsTest(test.TestCase):
           constant_op.constant(False),
           lambda: constant_op.constant('test message'))
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   'test message'):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  'test message'):
         self.evaluate(t)
 
   @test_util.run_deprecated_v1
@@ -54,8 +54,8 @@ class ExceptionsTest(test.TestCase):
       t = exceptions.assert_stmt(
           constant_op.constant(False), lambda: two_tensors)
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   'test message.*another message'):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  'test message.*another message'):
         self.evaluate(t)
 
   def test_assert_python_untriggered(self):
@@ -81,7 +81,7 @@ class ExceptionsTest(test.TestCase):
       side_effect_trace.append(tracer)
       return 'test message'
 
-    with self.assertRaisesRegexp(AssertionError, 'test message'):
+    with self.assertRaisesRegex(AssertionError, 'test message'):
       exceptions.assert_stmt(False, expression_with_side_effects)
     self.assertListEqual(side_effect_trace, [tracer])
 
diff --git a/tensorflow/python/autograph/pyct/transformer_test.py b/tensorflow/python/autograph/pyct/transformer_test.py
index 30284ba5634..05d4664dcae 100644
--- a/tensorflow/python/autograph/pyct/transformer_test.py
+++ b/tensorflow/python/autograph/pyct/transformer_test.py
@@ -211,7 +211,7 @@ class TransformerTest(test.TestCase):
       node = tr.visit(node)
     obtained_message = str(cm.exception)
     expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
-    self.assertRegexpMatches(obtained_message, expected_message)
+    self.assertRegex(obtained_message, expected_message)
 
   def test_robust_error_on_ast_corruption(self):
     # A child class should not be able to be so broken that it causes the error
diff --git a/tensorflow/python/client/events_writer_test.py b/tensorflow/python/client/events_writer_test.py
index 20fd4e5f3e1..b4082c1acbd 100644
--- a/tensorflow/python/client/events_writer_test.py
+++ b/tensorflow/python/client/events_writer_test.py
@@ -73,7 +73,7 @@ class PywrapeventsWriterTest(test_util.TensorFlowTestCase):
       def __str__(self):
         return "Invalid"
 
-    with self.assertRaisesRegexp(TypeError, "Invalid"):
+    with self.assertRaisesRegex(TypeError, "Invalid"):
       _pywrap_events_writer.EventsWriter(b"foo").WriteEvent(_Invalid())
 
 
diff --git a/tensorflow/python/client/session_partial_run_test.py b/tensorflow/python/client/session_partial_run_test.py
index ed9a85b03c8..c1a521112a4 100644
--- a/tensorflow/python/client/session_partial_run_test.py
+++ b/tensorflow/python/client/session_partial_run_test.py
@@ -119,8 +119,8 @@ class PartialRunTest(test_util.TensorFlowTestCase):
     x = array_ops.placeholder(dtypes.float32, shape=())
     fetches = [x * 2, x * 3]
     handle = sess.partial_run_setup(fetches=fetches, feeds=[])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'You must feed a value for placeholder'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'You must feed a value for placeholder'):
       sess.partial_run(handle, fetches[0])
 
   def RunTestPartialRunUnspecifiedFeed(self, sess):
@@ -130,8 +130,8 @@ class PartialRunTest(test_util.TensorFlowTestCase):
     r1 = math_ops.add(a, b)
 
     h = sess.partial_run_setup([r1], [a, b])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'was not specified in partial_run_setup.$'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'was not specified in partial_run_setup.$'):
       sess.partial_run(h, r1, feed_dict={a: 1, b: 2, c: 3})
 
   def RunTestPartialRunUnspecifiedFetch(self, sess):
@@ -142,8 +142,8 @@ class PartialRunTest(test_util.TensorFlowTestCase):
     r2 = math_ops.multiply(a, c)
 
     h = sess.partial_run_setup([r1], [a, b, c])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'was not specified in partial_run_setup.$'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'was not specified in partial_run_setup.$'):
       sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
 
   def RunTestPartialRunAlreadyFed(self, sess):
@@ -155,8 +155,8 @@ class PartialRunTest(test_util.TensorFlowTestCase):
 
     h = sess.partial_run_setup([r1, r2], [a, b, c])
     sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'has already been fed.$'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'has already been fed.$'):
       sess.partial_run(h, r2, feed_dict={a: 1, c: 3})
 
   def RunTestPartialRunAlreadyFetched(self, sess):
@@ -168,8 +168,8 @@ class PartialRunTest(test_util.TensorFlowTestCase):
 
     h = sess.partial_run_setup([r1, r2], [a, b, c])
     sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'has already been fetched.$'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'has already been fetched.$'):
       sess.partial_run(h, r1, feed_dict={c: 3})
 
   def RunTestPartialRunEmptyFetches(self, sess):
@@ -185,7 +185,7 @@ class PartialRunTest(test_util.TensorFlowTestCase):
   def testInvalidPartialRunSetup(self):
     sess = session.Session()
     x = array_ops.placeholder(dtypes.float32, shape=[])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         'specify at least one target to fetch or execute.'):
       sess.partial_run_setup(fetches=[], feeds=[x])
diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py
index 074b50bf69b..696353a5781 100644
--- a/tensorflow/python/client/session_test.py
+++ b/tensorflow/python/client/session_test.py
@@ -1269,11 +1269,11 @@ class SessionTest(test_util.TensorFlowTestCase):
 
   def testUseEmptyGraph(self):
     with session.Session() as sess:
-      with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
+      with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
         sess.run([])
-      with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
+      with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
         sess.run(())
-      with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
+      with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
         sess.run({})
 
   @test_util.run_v1_only('b/120545219')
@@ -1516,11 +1516,11 @@ class SessionTest(test_util.TensorFlowTestCase):
       feed_t = array_ops.placeholder(dtype=dtypes.float32)
       out_t = array_ops.identity(feed_t)
       feed_val = constant_op.constant(5.0)
-      with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
+      with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
         sess.run(out_t, feed_dict={feed_t: feed_val})
-      with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
+      with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
         out_t.eval(feed_dict={feed_t: feed_val})
-      with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
+      with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
         out_t.op.run(feed_dict={feed_t: feed_val})
 
   def testFeedPrecisionLossError(self):
@@ -1532,11 +1532,11 @@ class SessionTest(test_util.TensorFlowTestCase):
 
       out_t = constant_op.constant(1.0)
 
-      with self.assertRaisesRegexp(TypeError,
-                                   'is not compatible with Tensor type'):
+      with self.assertRaisesRegex(TypeError,
+                                  'is not compatible with Tensor type'):
         sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
-      with self.assertRaisesRegexp(TypeError,
-                                   'is not compatible with Tensor type'):
+      with self.assertRaisesRegex(TypeError,
+                                  'is not compatible with Tensor type'):
         sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
 
   def testStringFetch(self):
@@ -1598,7 +1598,7 @@ class SessionTest(test_util.TensorFlowTestCase):
         self.assertEqual(c_list[i], out[i].decode('utf-8'))
 
   def testInvalidTargetFails(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.NotFoundError,
         'No session factory registered for the given session options'):
       session.Session('INVALID_TARGET')
@@ -1662,7 +1662,7 @@ class SessionTest(test_util.TensorFlowTestCase):
   def testFeedDictKeyException(self):
     with session.Session() as sess:
       a = constant_op.constant(1.0, dtypes.float32, name='a')
-      with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
+      with self.assertRaisesRegex(TypeError, 'Cannot interpret feed_dict'):
         sess.run(a, feed_dict={'a': [2.0]})
 
   def testPerStepTrace(self):
@@ -1717,10 +1717,10 @@ class SessionTest(test_util.TensorFlowTestCase):
       new_shape = constant_op.constant([2, 2])
       reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
 
-      with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
+      with self.assertRaisesRegex(ValueError, 'Cannot feed value of shape'):
         sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           'Input to reshape is a tensor with 4 values, '
           'but the requested shape has 21'):
@@ -1794,7 +1794,7 @@ class SessionTest(test_util.TensorFlowTestCase):
     sess2_controller = sess2.as_default()
     sess2_controller.__enter__()
 
-    with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
+    with self.assertRaisesRegex(AssertionError, 'Nesting violated'):
       sess1_controller.__exit__(None, None, None)
 
     ops._default_session_stack.reset()
@@ -1818,17 +1818,17 @@ class SessionTest(test_util.TensorFlowTestCase):
 
   def testReentry(self):
     sess = session.Session()
-    with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
+    with self.assertRaisesRegex(RuntimeError, 'not re-entrant'):
       with sess:
         with sess:
           pass
 
   def testInvalidArgument(self):
-    with self.assertRaisesRegexp(TypeError, 'target must be a string'):
+    with self.assertRaisesRegex(TypeError, 'target must be a string'):
       session.Session(37)
-    with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
+    with self.assertRaisesRegex(TypeError, 'config must be a tf.ConfigProto'):
       session.Session(config=37)
-    with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
+    with self.assertRaisesRegex(TypeError, 'graph must be a tf.Graph'):
       session.Session(graph=37)
 
   @test_util.run_v1_only('b/120545219')
@@ -2061,7 +2061,7 @@ class SessionTest(test_util.TensorFlowTestCase):
   def testAutoConvertAndCheckData(self):
     with self.cached_session() as sess:
       a = array_ops.placeholder(dtype=dtypes.string)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
         sess.run(a, feed_dict={a: 1})
 
diff --git a/tensorflow/python/compiler/mlir/mlir_test.py b/tensorflow/python/compiler/mlir/mlir_test.py
index 8fd5952fd76..2a2362d9f6b 100644
--- a/tensorflow/python/compiler/mlir/mlir_test.py
+++ b/tensorflow/python/compiler/mlir/mlir_test.py
@@ -32,8 +32,8 @@ class MLIRImportTest(test.TestCase):
     self.assertIn('func @main', mlir_module)
 
   def test_invalid_pbtxt(self):
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'Could not parse input proto'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'Could not parse input proto'):
       mlir.convert_graph_def('some invalid proto')
 
 
diff --git a/tensorflow/python/compiler/tensorrt/trt_convert_test.py b/tensorflow/python/compiler/tensorrt/trt_convert_test.py
index 05ff6fcaebe..8c5c925f026 100644
--- a/tensorflow/python/compiler/tensorrt/trt_convert_test.py
+++ b/tensorflow/python/compiler/tensorrt/trt_convert_test.py
@@ -563,7 +563,7 @@ class TrtConvertTest(test_util.TensorFlowTestCase, parameterized.TestCase):
               {_SAVED_MODEL_SIGNATURE_KEY: root.run})
 
     # Run TRT conversion.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"Option is_dynamic_op=False is not supported in TF 2.0, "
         "please set it to True instead."):
       self._CreateConverterV2(input_saved_model_dir, is_dynamic_op=False)
@@ -684,16 +684,16 @@ class TrtConvertTest(test_util.TensorFlowTestCase, parameterized.TestCase):
         gen_resource_variable_ops.destroy_resource_op(
             handle, ignore_lookup_error=False)
 
-    with self.assertRaisesRegexp(errors.NotFoundError,
-                                 r"Resource .* does not exist."):
+    with self.assertRaisesRegex(errors.NotFoundError,
+                                r"Resource .* does not exist."):
       _DestroyCache()
 
     # Load the converted model and make sure the engine cache is populated by
     # default.
     root = load.load(output_saved_model_dir)
     _DestroyCache()
-    with self.assertRaisesRegexp(errors.NotFoundError,
-                                 r"Resource .* does not exist."):
+    with self.assertRaisesRegex(errors.NotFoundError,
+                                r"Resource .* does not exist."):
       _DestroyCache()
 
     # Load the converted model again and make sure the engine cache is destroyed
@@ -701,8 +701,8 @@ class TrtConvertTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     root = load.load(output_saved_model_dir)
     del root
     gc.collect()  # Force GC to destroy the TRT engine cache.
-    with self.assertRaisesRegexp(errors.NotFoundError,
-                                 r"Resource .* does not exist."):
+    with self.assertRaisesRegex(errors.NotFoundError,
+                                r"Resource .* does not exist."):
       _DestroyCache()
 
   def _CompareSavedModel(self, model_class):
diff --git a/tensorflow/python/compiler/xla/experimental_compile_test.py b/tensorflow/python/compiler/xla/experimental_compile_test.py
index c0a1c4bf307..963a92d4384 100644
--- a/tensorflow/python/compiler/xla/experimental_compile_test.py
+++ b/tensorflow/python/compiler/xla/experimental_compile_test.py
@@ -103,8 +103,8 @@ class ExperimentalCompileTest(test.TestCase):
       x = xla_func(inputs)
       # XLA support is not yet enabled for TF ROCm
       if not test.is_built_with_rocm():
-        with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                     "not compilable"):
+        with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                    "not compilable"):
           with session.Session(graph=g) as sess:
             sess.run(x, feed_dict={inputs: [1, 2, 2, 3, 3]})
 
diff --git a/tensorflow/python/compiler/xla/jit_test.py b/tensorflow/python/compiler/xla/jit_test.py
index 14fb611dc7f..5294d970a9b 100644
--- a/tensorflow/python/compiler/xla/jit_test.py
+++ b/tensorflow/python/compiler/xla/jit_test.py
@@ -57,7 +57,7 @@ class JITTest(test.TestCase, parameterized.TestCase):
   @test_util.run_v2_only
   def testJITInEager(self):
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         RuntimeError, "xla.experimental.jit_scope is not supported when eager "
         "execution is enabled. Try use it inside tf.function."):
       with jit.experimental_jit_scope(True):
@@ -204,7 +204,7 @@ class CompilationEnabledInGradientTest(test.TestCase, parameterized.TestCase):
       for cg in c_grad_ops:
         self.assertTrue(cg.get_attr("_XlaCompile"))
       for ncg in nc_grad_ops:
-        with self.assertRaisesRegexp(ValueError, "[Nn]o attr named"):
+        with self.assertRaisesRegex(ValueError, "[Nn]o attr named"):
           ncg.get_attr("_XlaCompile")
 
       # d/dx (x ** 4) = 4 * (x ** 3)
diff --git a/tensorflow/python/compiler/xla/xla_test.py b/tensorflow/python/compiler/xla/xla_test.py
index 6dc0789ba4f..af18abf727a 100644
--- a/tensorflow/python/compiler/xla/xla_test.py
+++ b/tensorflow/python/compiler/xla/xla_test.py
@@ -112,7 +112,7 @@ class XLACompileContextTest(test.TestCase, parameterized.TestCase):
 
     context = self.create_test_xla_compile_context()
     context.Enter()
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         NotImplementedError, 'Non-resource Variables are not supported inside '
         r'XLA computations \(operator name: Assign\)'):
       state_ops.assign(a, a + 1)
@@ -126,8 +126,8 @@ class XLACompileContextTest(test.TestCase, parameterized.TestCase):
 
     context2 = self.create_test_xla_compile_context()
     context2.Enter()
-    with self.assertRaisesRegexp(ValueError,
-                                 'XLA compiled computations cannot be nested'):
+    with self.assertRaisesRegex(ValueError,
+                                'XLA compiled computations cannot be nested'):
       constant_op.constant(1)
     context2.Exit()
     context1.Exit()
diff --git a/tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py b/tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py
index ecda3a7e0f9..362495744dc 100644
--- a/tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py
@@ -69,8 +69,7 @@ class AssertCardinalityTest(test_base.DatasetTestBase, parameterized.TestCase):
     dataset = dataset.apply(
         cardinality.assert_cardinality(asserted_cardinality))
     get_next = self.getNext(dataset)
-    with self.assertRaisesRegexp(errors.FailedPreconditionError,
-                                 expected_error):
+    with self.assertRaisesRegex(errors.FailedPreconditionError, expected_error):
       while True:
         self.evaluate(get_next())
 
diff --git a/tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py b/tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py
index 5dd1bb0532c..a32d61c667e 100644
--- a/tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py
@@ -85,7 +85,7 @@ class DenseToSparseBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
   @combinations.generate(test_base.default_test_combinations())
   def testDenseToSparseBatchDatasetWithInvalidShape(self):
     input_tensor = array_ops.constant([[1]])
-    with self.assertRaisesRegexp(ValueError, "Dimension -2 must be >= 0"):
+    with self.assertRaisesRegex(ValueError, "Dimension -2 must be >= 0"):
       dataset_ops.Dataset.from_tensors(input_tensor).apply(
           batching.dense_to_sparse_batch(4, [-2]))
 
@@ -98,14 +98,14 @@ class DenseToSparseBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
 
     # Initialize with an input tensor of incompatible rank.
     get_next = self.getNext(dataset_fn([[1]]))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "incompatible with the row shape"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "incompatible with the row shape"):
       self.evaluate(get_next())
 
     # Initialize with an input tensor that is larger than `row_shape`.
     get_next = self.getNext(dataset_fn(np.int32(range(13))))
-    with self.assertRaisesRegexp(errors.DataLossError,
-                                 "larger than the row shape"):
+    with self.assertRaisesRegex(errors.DataLossError,
+                                "larger than the row shape"):
       self.evaluate(get_next())
 
 
diff --git a/tensorflow/python/data/experimental/kernel_tests/directed_interleave_dataset_test.py b/tensorflow/python/data/experimental/kernel_tests/directed_interleave_dataset_test.py
index fc18afaa842..f6ccc5163a4 100644
--- a/tensorflow/python/data/experimental/kernel_tests/directed_interleave_dataset_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/directed_interleave_dataset_test.py
@@ -113,38 +113,38 @@ class DirectedInterleaveDatasetTest(test_base.DatasetTestBase,
 
   @combinations.generate(test_base.default_test_combinations())
   def testErrors(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r"vector of length `len\(datasets\)`"):
+    with self.assertRaisesRegex(ValueError,
+                                r"vector of length `len\(datasets\)`"):
       interleave_ops.sample_from_datasets(
           [dataset_ops.Dataset.range(10),
            dataset_ops.Dataset.range(20)],
           weights=[0.25, 0.25, 0.25, 0.25])
 
-    with self.assertRaisesRegexp(TypeError, "`tf.float32` or `tf.float64`"):
+    with self.assertRaisesRegex(TypeError, "`tf.float32` or `tf.float64`"):
       interleave_ops.sample_from_datasets(
           [dataset_ops.Dataset.range(10),
            dataset_ops.Dataset.range(20)],
           weights=[1, 1])
 
-    with self.assertRaisesRegexp(TypeError, "must have the same type"):
+    with self.assertRaisesRegex(TypeError, "must have the same type"):
       interleave_ops.sample_from_datasets([
           dataset_ops.Dataset.from_tensors(0),
           dataset_ops.Dataset.from_tensors(0.0)
       ])
 
-    with self.assertRaisesRegexp(TypeError, "tf.int64"):
+    with self.assertRaisesRegex(TypeError, "tf.int64"):
       interleave_ops.choose_from_datasets([
           dataset_ops.Dataset.from_tensors(0),
           dataset_ops.Dataset.from_tensors(1)
       ], choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
 
-    with self.assertRaisesRegexp(TypeError, "scalar"):
+    with self.assertRaisesRegex(TypeError, "scalar"):
       interleave_ops.choose_from_datasets([
           dataset_ops.Dataset.from_tensors(0),
           dataset_ops.Dataset.from_tensors(1)
       ], choice_dataset=dataset_ops.Dataset.from_tensors([1.0]))
 
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, "out of range"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, "out of range"):
       dataset = interleave_ops.choose_from_datasets(
           [dataset_ops.Dataset.from_tensors(0)],
           choice_dataset=dataset_ops.Dataset.from_tensors(
diff --git a/tensorflow/python/data/experimental/kernel_tests/get_single_element_test.py b/tensorflow/python/data/experimental/kernel_tests/get_single_element_test.py
index 59c2ef68d99..b508b78da58 100644
--- a/tensorflow/python/data/experimental/kernel_tests/get_single_element_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/get_single_element_test.py
@@ -64,7 +64,7 @@ class GetSingleElementTest(test_base.DatasetTestBase, parameterized.TestCase):
       self.assertAllEqual([skip], sparse_val.values)
       self.assertAllEqual([skip], sparse_val.dense_shape)
     else:
-      with self.assertRaisesRegexp(error, error_msg):
+      with self.assertRaisesRegex(error, error_msg):
         self.evaluate(get_single_element.get_single_element(dataset))
 
   @combinations.generate(test_base.default_test_combinations())
diff --git a/tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py b/tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py
index bf823143d57..8671dec1745 100644
--- a/tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py
@@ -143,7 +143,7 @@ class GroupByReducerTest(test_base.DatasetTestBase, parameterized.TestCase):
         finalize_func=lambda x: x)
 
     dataset = dataset_ops.Dataset.range(10)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         "The element types for the new state must match the initial state."):
       dataset.apply(
@@ -158,7 +158,7 @@ class GroupByReducerTest(test_base.DatasetTestBase, parameterized.TestCase):
         finalize_func=lambda x: x)
 
     dataset = dataset_ops.Dataset.range(10)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "`key_func` must return a single tf.int64 tensor."):
       dataset.apply(
           grouping.group_by_reducer(lambda _: np.int64((0, 0)), reducer))
@@ -172,7 +172,7 @@ class GroupByReducerTest(test_base.DatasetTestBase, parameterized.TestCase):
         finalize_func=lambda x: x)
 
     dataset = dataset_ops.Dataset.range(10)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "`key_func` must return a single tf.int64 tensor."):
       dataset.apply(
           grouping.group_by_reducer(lambda _: "wrong", reducer))
diff --git a/tensorflow/python/data/experimental/kernel_tests/group_by_window_test.py b/tensorflow/python/data/experimental/kernel_tests/group_by_window_test.py
index 581d8f42792..a35327c7b70 100644
--- a/tensorflow/python/data/experimental/kernel_tests/group_by_window_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/group_by_window_test.py
@@ -265,7 +265,7 @@ class GroupByWindowTest(test_base.DatasetTestBase, parameterized.TestCase):
         grouping.group_by_window(lambda _: 0, lambda _, xs: xs, 0))
 
     get_next = self.getNext(dataset)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Window size must be greater than zero, but got 0."):
       print(self.evaluate(get_next()))
diff --git a/tensorflow/python/data/experimental/kernel_tests/make_batched_features_dataset_test.py b/tensorflow/python/data/experimental/kernel_tests/make_batched_features_dataset_test.py
index 980fd03b073..4016fbbed66 100644
--- a/tensorflow/python/data/experimental/kernel_tests/make_batched_features_dataset_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/make_batched_features_dataset_test.py
@@ -223,7 +223,7 @@ class MakeBatchedFeaturesDatasetTest(
 
   @combinations.generate(test_base.default_test_combinations())
   def testOldStyleReader(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r"The `reader` argument must return a `Dataset` object. "
         r"`tf.ReaderBase` subclasses are not supported."):
       _ = readers.make_batched_features_dataset(
diff --git a/tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py b/tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py
index 5f8382f43c4..23063b13f66 100644
--- a/tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py
@@ -258,8 +258,8 @@ class MakeCsvDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
         compression_type="GZIP",
     )
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "compression_type .ZLIB. is not supported"):
+    with self.assertRaisesRegex(ValueError,
+                                "compression_type .ZLIB. is not supported"):
       self._test_dataset(
           inputs,
           expected_output=expected_output,
diff --git a/tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py b/tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py
index 83eb210206e..525c6c22295 100644
--- a/tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py
@@ -226,7 +226,7 @@ class MapAndBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
   def testMapAndBatchFails(self):
     """Test a dataset that maps a TF function across its input elements."""
 
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, "oops"):
       dataset = dataset_ops.Dataset.from_tensors(
           array_ops.check_numerics(
               constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
diff --git a/tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py b/tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py
index f12b702a68e..a6efe989ee3 100644
--- a/tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py
@@ -185,8 +185,8 @@ class MapDefunTest(test_base.DatasetTestBase, parameterized.TestCase):
             constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0),
         [100, 1])
     map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0]
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r"indices = 10 is not in \[0, 5\)"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r"indices = 10 is not in \[0, 5\)"):
       self.evaluate(map_defun_op)
 
   @combinations.generate(_test_combinations())
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_branch_dataset_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_branch_dataset_test.py
index bb7849fb213..6370cb8a3df 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_branch_dataset_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_branch_dataset_test.py
@@ -150,8 +150,8 @@ class ChooseFastestBranchDatasetTest(test_base.DatasetTestBase,
     expected_error_msg = ("`num_elements_per_branch` must be divisible by "
                           "`ratio_denominator`")
     if context.executing_eagerly():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   expected_error_msg):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  expected_error_msg):
         make_dataset()
     else:
       choose_fastest = make_dataset()
diff --git a/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py b/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
index a806e745ef9..080a03c76dd 100644
--- a/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
@@ -466,8 +466,8 @@ class MapVectorizationTest(test_base.DatasetTestBase, parameterized.TestCase):
       # x has leading dimension 5, this will raise an error
       return array_ops.gather(x, 10)
 
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r"indices = 10 is not in \[0, 5\)"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r"indices = 10 is not in \[0, 5\)"):
       base_dataset = dataset_ops.Dataset.range(5).repeat(5).batch(
           5, drop_remainder=True)
       _, optimized = self._get_test_datasets(base_dataset, map_fn)
diff --git a/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py b/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py
index 61d0e5eb0bb..3f8b40be508 100644
--- a/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py
@@ -79,8 +79,8 @@ class RebatchDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
   def testScalarInputError(self):
     dataset = dataset_ops.Dataset.range(1024)
     distribute._RebatchDataset(dataset.batch(4), num_replicas=4)
-    with self.assertRaisesRegexp(ValueError, ("You can fix the issue "
-                                              "by adding the `batch`")):
+    with self.assertRaisesRegex(ValueError, ("You can fix the issue "
+                                             "by adding the `batch`")):
       distribute._RebatchDataset(dataset, num_replicas=4)
 
   @combinations.generate(
diff --git a/tensorflow/python/data/experimental/kernel_tests/scan_test.py b/tensorflow/python/data/experimental/kernel_tests/scan_test.py
index dd97e57c700..a5fe5a7c62f 100644
--- a/tensorflow/python/data/experimental/kernel_tests/scan_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/scan_test.py
@@ -184,7 +184,7 @@ class ScanTest(test_base.DatasetTestBase, parameterized.TestCase):
     start = empty_ta
     start = start.write(0, -1)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         NotImplementedError,
         r"construct a new TensorArray inside the function"):
       dataset_ops.Dataset.range(6).apply(scan_ops.scan(start, scan_fn))
@@ -226,7 +226,7 @@ class ScanTest(test_base.DatasetTestBase, parameterized.TestCase):
       return constant_op.constant(1, dtype=dtypes.int64), state
 
     dataset = dataset_ops.Dataset.range(10)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         "The element types for the new state must match the initial state."):
       dataset.apply(
@@ -239,7 +239,7 @@ class ScanTest(test_base.DatasetTestBase, parameterized.TestCase):
       return constant_op.constant(1, dtype=dtypes.int64)
 
     dataset = dataset_ops.Dataset.range(10)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         "The scan function must return a pair comprising the new state and the "
         "output value."):
diff --git a/tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py b/tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py
index 22fe264e8be..bab6c594072 100644
--- a/tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py
@@ -53,8 +53,8 @@ class SkipDatasetSerializationTest(
 
   @combinations.generate(test_base.default_test_combinations())
   def testInvalidSkip(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'Shape must be rank 0 but is rank 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'Shape must be rank 0 but is rank 1'):
       self.run_core_tests(lambda: self._build_skip_dataset([1, 2]), 0)
 
 
@@ -83,8 +83,8 @@ class TakeDatasetSerializationTest(
     self.run_core_tests(lambda: self._build_take_dataset(0), 0)
 
   def testInvalidTake(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'Shape must be rank 0 but is rank 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'Shape must be rank 0 but is rank 1'):
       self.run_core_tests(lambda: self._build_take_dataset([1, 2]), 0)
 
 
@@ -120,8 +120,8 @@ class RepeatDatasetSerializationTest(
 
   @combinations.generate(test_base.default_test_combinations())
   def testInvalidRepeat(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'Shape must be rank 0 but is rank 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'Shape must be rank 0 but is rank 1'):
       self.run_core_tests(lambda: self._build_repeat_dataset([1, 2], 0), 0)
 
 
diff --git a/tensorflow/python/data/experimental/kernel_tests/serialization/stats_dataset_serialization_test.py b/tensorflow/python/data/experimental/kernel_tests/serialization/stats_dataset_serialization_test.py
index 66658ea0a5b..68bfd2aba35 100644
--- a/tensorflow/python/data/experimental/kernel_tests/serialization/stats_dataset_serialization_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/serialization/stats_dataset_serialization_test.py
@@ -44,8 +44,8 @@ class StatsDatasetSerializationTest(
 
   @combinations.generate(test_base.default_test_combinations())
   def test_bytes_produced_stats_invalid_tag_shape(self):
-    with self.assertRaisesRegexp(
-        ValueError, "Shape must be rank 0 but is rank 1"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 0 but is rank 1"):
       # pylint: disable=g-long-lambda
       self.run_core_tests(
           lambda: dataset_ops.Dataset.range(100).apply(
@@ -71,8 +71,8 @@ class StatsDatasetSerializationTest(
 
   @combinations.generate(test_base.default_test_combinations())
   def test_latency_stats_invalid_tag_shape(self):
-    with self.assertRaisesRegexp(
-        ValueError, "Shape must be rank 0 but is rank 1"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 0 but is rank 1"):
       # pylint: disable=g-long-lambda
       self.run_core_tests(
           lambda: dataset_ops.Dataset.range(100).apply(
diff --git a/tensorflow/python/data/kernel_tests/concatenate_test.py b/tensorflow/python/data/kernel_tests/concatenate_test.py
index bf726607681..203cefab32e 100644
--- a/tensorflow/python/data/kernel_tests/concatenate_test.py
+++ b/tensorflow/python/data/kernel_tests/concatenate_test.py
@@ -110,7 +110,7 @@ class ConcatenateTest(test_base.DatasetTestBase, parameterized.TestCase):
     dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
         to_concatenate_components)
 
-    with self.assertRaisesRegexp(TypeError, "have different types"):
+    with self.assertRaisesRegex(TypeError, "have different types"):
       input_dataset.concatenate(dataset_to_concatenate)
 
   @combinations.generate(test_base.default_test_combinations())
@@ -128,7 +128,7 @@ class ConcatenateTest(test_base.DatasetTestBase, parameterized.TestCase):
     dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
         to_concatenate_components)
 
-    with self.assertRaisesRegexp(TypeError, "have different types"):
+    with self.assertRaisesRegex(TypeError, "have different types"):
       input_dataset.concatenate(dataset_to_concatenate)
 
   @combinations.generate(test_base.default_test_combinations())
@@ -144,7 +144,7 @@ class ConcatenateTest(test_base.DatasetTestBase, parameterized.TestCase):
     dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
         to_concatenate_components)
 
-    with self.assertRaisesRegexp(TypeError, "have different types"):
+    with self.assertRaisesRegex(TypeError, "have different types"):
       input_dataset.concatenate(dataset_to_concatenate)
 
 
diff --git a/tensorflow/python/data/kernel_tests/dataset_test.py b/tensorflow/python/data/kernel_tests/dataset_test.py
index 3e474dd2511..32184d1905f 100644
--- a/tensorflow/python/data/kernel_tests/dataset_test.py
+++ b/tensorflow/python/data/kernel_tests/dataset_test.py
@@ -351,7 +351,7 @@ class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
   def testSameGraphError(self):
     dataset = dataset_ops.Dataset.range(10)
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
+      with self.assertRaisesRegex(ValueError, "must be from the same graph"):
         dataset = dataset.batch(2)
 
   @combinations.generate(
@@ -359,7 +359,7 @@ class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
   def testSameGraphErrorOneShot(self):
     dataset = dataset_ops.Dataset.range(10)
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Please ensure that all datasets in the pipeline are "
           "created in the same graph as the iterator."):
         _ = dataset_ops.make_one_shot_iterator(dataset)
@@ -369,7 +369,7 @@ class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
   def testSameGraphErrorInitializable(self):
     dataset = dataset_ops.Dataset.range(10)
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Please ensure that all datasets in the pipeline are "
           "created in the same graph as the iterator."):
         _ = dataset_ops.make_initializable_iterator(dataset)
diff --git a/tensorflow/python/data/kernel_tests/from_generator_test.py b/tensorflow/python/data/kernel_tests/from_generator_test.py
index 386108f0de7..8643228f267 100644
--- a/tensorflow/python/data/kernel_tests/from_generator_test.py
+++ b/tensorflow/python/data/kernel_tests/from_generator_test.py
@@ -453,7 +453,7 @@ class FromGeneratorTest(test_base.DatasetTestBase, parameterized.TestCase):
       for _ in range(10):
         yield [20]
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r"Cannot convert value \[tf.int64\] to a TensorFlow DType"):
       dataset_ops.Dataset.from_generator(
           generator, output_types=[dtypes.int64])
diff --git a/tensorflow/python/data/kernel_tests/iterator_test.py b/tensorflow/python/data/kernel_tests/iterator_test.py
index 36689ed75fb..060014652ec 100644
--- a/tensorflow/python/data/kernel_tests/iterator_test.py
+++ b/tensorflow/python/data/kernel_tests/iterator_test.py
@@ -72,7 +72,7 @@ class IteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
     dataset = (
         dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
         .map(lambda x: x + var))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
         "datasets that capture stateful objects.+myvar"):
       dataset_ops.make_one_shot_iterator(dataset)
@@ -213,17 +213,17 @@ class IteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
     next_element = iterator.get_next()
 
     with self.cached_session() as sess:
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, ""):
         sess.run(next_element)
 
       # Test that subsequent attempts to use the iterator also fail.
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, ""):
         sess.run(next_element)
 
     with self.cached_session() as sess:
 
       def consumer_thread():
-        with self.assertRaisesRegexp(errors.InvalidArgumentError, ""):
+        with self.assertRaisesRegex(errors.InvalidArgumentError, ""):
           sess.run(next_element)
 
       num_threads = 8
@@ -293,8 +293,8 @@ class IteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
     get_next = iterator.get_next()
 
     with self.cached_session() as sess:
-      with self.assertRaisesRegexp(errors.FailedPreconditionError,
-                                   "iterator has not been initialized"):
+      with self.assertRaisesRegex(errors.FailedPreconditionError,
+                                  "iterator has not been initialized"):
         sess.run(get_next)
 
   @combinations.generate(test_base.graph_only_combinations())
diff --git a/tensorflow/python/data/kernel_tests/map_test.py b/tensorflow/python/data/kernel_tests/map_test.py
index 03df41cd662..275be3ea635 100644
--- a/tensorflow/python/data/kernel_tests/map_test.py
+++ b/tensorflow/python/data/kernel_tests/map_test.py
@@ -1012,7 +1012,7 @@ class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
   @combinations.generate(_test_combinations())
   def testReturnValueError(self, apply_map):
     dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r"Unsupported return value from function passed to "
         r"Dataset.map\(\)"):
       _ = apply_map(dataset, lambda x: Foo)
diff --git a/tensorflow/python/data/kernel_tests/options_test.py b/tensorflow/python/data/kernel_tests/options_test.py
index 27b5a336a6c..6869306e0d6 100644
--- a/tensorflow/python/data/kernel_tests/options_test.py
+++ b/tensorflow/python/data/kernel_tests/options_test.py
@@ -68,8 +68,7 @@ class OptionsTest(test_base.DatasetTestBase, parameterized.TestCase):
     options1.experimental_optimization.autotune = True
     options2 = dataset_ops.Options()
     options2.experimental_optimization.autotune = False
-    with self.assertRaisesRegexp(ValueError,
-                                 "Cannot merge incompatible values"):
+    with self.assertRaisesRegex(ValueError, "Cannot merge incompatible values"):
       dataset_ops.Dataset.range(0).with_options(options1).with_options(options2)
 
   @combinations.generate(test_base.default_test_combinations())
diff --git a/tensorflow/python/data/kernel_tests/padded_batch_test.py b/tensorflow/python/data/kernel_tests/padded_batch_test.py
index b4c5fdb2a1b..effbaad8c39 100644
--- a/tensorflow/python/data/kernel_tests/padded_batch_test.py
+++ b/tensorflow/python/data/kernel_tests/padded_batch_test.py
@@ -243,14 +243,14 @@ class PaddedBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
 
   @combinations.generate(test_base.default_test_combinations())
   def testPaddedBatchShapeErrorWrongRank(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'The padded shape \(1,\) is not compatible with the '
         r'corresponding input component shape \(\).'):
       _ = dataset_ops.Dataset.range(10).padded_batch(5, padded_shapes=[1])
 
   @combinations.generate(test_base.default_test_combinations())
   def testPaddedBatchShapeErrorTooSmall(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'The padded shape \(1,\) is not compatible with the '
         r'corresponding input component shape \(3,\).'):
       _ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
@@ -258,7 +258,7 @@ class PaddedBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
 
   @combinations.generate(test_base.default_test_combinations())
   def testPaddedBatchShapeErrorShapeNotRank1(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'Padded shape .* must be a 1-D tensor '
         r'of tf.int64 values, but its shape was \(2, 2\).'):
       _ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
@@ -266,7 +266,7 @@ class PaddedBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
 
   @combinations.generate(test_base.default_test_combinations())
   def testPaddedBatchShapeErrorShapeNotInt(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r'Padded shape .* must be a 1-D tensor '
         r'of tf.int64 values, but its element type was float32.'):
       _ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch(
@@ -274,7 +274,7 @@ class PaddedBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
 
   @combinations.generate(test_base.default_test_combinations())
   def testPaddedBatchShapeErrorWrongRankFromTensor(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'The padded shape \(1,\) is not compatible with the '
         r'corresponding input component shape \(\).'):
       shape_as_tensor = constant_op.constant([1], dtype=dtypes.int64)
@@ -283,14 +283,14 @@ class PaddedBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
 
   @combinations.generate(test_base.default_test_combinations())
   def testPaddedBatchShapeErrorDefaultShapeWithUnknownRank(self):
-    with self.assertRaisesRegexp(ValueError, r'`padded_shapes`.*unknown rank'):
+    with self.assertRaisesRegex(ValueError, r'`padded_shapes`.*unknown rank'):
       ds = dataset_ops.Dataset.from_generator(
           lambda: iter([1, 2, 3]), output_types=dtypes.int32)
       ds.padded_batch(2)
 
   @combinations.generate(test_base.graph_only_combinations())
   def testPaddedBatchShapeErrorPlaceholder(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'The padded shape \((\?|None), (\?|None)\) is not compatible with the '
         r'corresponding input component shape \(\).'):
diff --git a/tensorflow/python/data/util/convert_test.py b/tensorflow/python/data/util/convert_test.py
index 78ca6e95139..7ec41d70879 100644
--- a/tensorflow/python/data/util/convert_test.py
+++ b/tensorflow/python/data/util/convert_test.py
@@ -79,13 +79,13 @@ class ConvertTest(test.TestCase):
                                 constant_op.constant([-1],
                                                      dtype=dtypes.int64))))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"The given shape .* must be a 1-D tensor of tf.int64 "
         r"values, but the shape was \(2, 2\)."):
       convert.partial_shape_to_tensor(constant_op.constant(
           [[1, 1], [1, 1]], dtype=dtypes.int64))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r"The given shape .* must be a 1-D tensor of tf.int64 "
         r"values, but the element type was float32."):
       convert.partial_shape_to_tensor(constant_op.constant([1., 1.]))
diff --git a/tensorflow/python/data/util/nest_test.py b/tensorflow/python/data/util/nest_test.py
index e53753e1525..5e42fae2ad7 100644
--- a/tensorflow/python/data/util/nest_test.py
+++ b/tensorflow/python/data/util/nest_test.py
@@ -58,10 +58,10 @@ class NestTest(test.TestCase):
     self.assertEqual(
         np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
 
-    with self.assertRaisesRegexp(ValueError, "Structure is a scalar"):
+    with self.assertRaisesRegex(ValueError, "Structure is a scalar"):
       nest.pack_sequence_as("scalar", [4, 5])
 
-    with self.assertRaisesRegexp(TypeError, "flat_sequence"):
+    with self.assertRaisesRegex(TypeError, "flat_sequence"):
       nest.pack_sequence_as([4, 5], "bad_sequence")
 
     with self.assertRaises(ValueError):
@@ -191,20 +191,20 @@ class NestTest(test.TestCase):
     nest.assert_same_structure("abc", np.array([0, 1]))
     nest.assert_same_structure("abc", constant_op.constant([0, 1]))
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "don't have the same nested structure"):
+    with self.assertRaisesRegex(ValueError,
+                                "don't have the same nested structure"):
       nest.assert_same_structure(structure1, structure_different_num_elements)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "don't have the same nested structure"):
+    with self.assertRaisesRegex(ValueError,
+                                "don't have the same nested structure"):
       nest.assert_same_structure((0, 1), np.array([0, 1]))
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "don't have the same nested structure"):
+    with self.assertRaisesRegex(ValueError,
+                                "don't have the same nested structure"):
       nest.assert_same_structure(0, (0, 1))
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "don't have the same nested structure"):
+    with self.assertRaisesRegex(ValueError,
+                                "don't have the same nested structure"):
       nest.assert_same_structure(structure1, structure_different_nesting)
 
     named_type_0 = collections.namedtuple("named_0", ("a", "b"))
@@ -217,24 +217,23 @@ class NestTest(test.TestCase):
     self.assertRaises(TypeError, nest.assert_same_structure,
                       named_type_0(3, 4), named_type_1(3, 4))
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "don't have the same nested structure"):
+    with self.assertRaisesRegex(ValueError,
+                                "don't have the same nested structure"):
       nest.assert_same_structure(named_type_0(3, 4), named_type_0((3,), 4))
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "don't have the same nested structure"):
+    with self.assertRaisesRegex(ValueError,
+                                "don't have the same nested structure"):
       nest.assert_same_structure(((3,), 4), (3, (4,)))
 
     structure1_list = {"a": ((1, 2), 3), "b": 4, "c": (5, 6)}
     structure2_list = {"a": ((1, 2), 3), "b": 4, "d": (5, 6)}
-    with self.assertRaisesRegexp(TypeError,
-                                 "don't have the same sequence type"):
+    with self.assertRaisesRegex(TypeError, "don't have the same sequence type"):
       nest.assert_same_structure(structure1, structure1_list)
     nest.assert_same_structure(structure1, structure2, check_types=False)
     nest.assert_same_structure(structure1, structure1_list, check_types=False)
-    with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
+    with self.assertRaisesRegex(ValueError, "don't have the same set of keys"):
       nest.assert_same_structure(structure1_list, structure2_list)
-    with self.assertRaisesRegexp(ValueError, "don't have the same set of keys"):
+    with self.assertRaisesRegex(ValueError, "don't have the same set of keys"):
       nest.assert_same_structure(structure_dictionary,
                                  structure_dictionary_diff_nested)
     nest.assert_same_structure(
@@ -262,26 +261,26 @@ class NestTest(test.TestCase):
 
     self.assertEqual(7, nest.map_structure(lambda x, y: x + y, 3, 4))
 
-    with self.assertRaisesRegexp(TypeError, "callable"):
+    with self.assertRaisesRegex(TypeError, "callable"):
       nest.map_structure("bad", structure1_plus1)
 
-    with self.assertRaisesRegexp(ValueError, "same nested structure"):
+    with self.assertRaisesRegex(ValueError, "same nested structure"):
       nest.map_structure(lambda x, y: None, 3, (3,))
 
-    with self.assertRaisesRegexp(TypeError, "same sequence type"):
+    with self.assertRaisesRegex(TypeError, "same sequence type"):
       nest.map_structure(lambda x, y: None, ((3, 4), 5), {"a": (3, 4), "b": 5})
 
-    with self.assertRaisesRegexp(ValueError, "same nested structure"):
+    with self.assertRaisesRegex(ValueError, "same nested structure"):
       nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
 
-    with self.assertRaisesRegexp(ValueError, "same nested structure"):
+    with self.assertRaisesRegex(ValueError, "same nested structure"):
       nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
                          check_types=False)
 
-    with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
+    with self.assertRaisesRegex(ValueError, "Only valid keyword argument"):
       nest.map_structure(lambda x: None, structure1, foo="a")
 
-    with self.assertRaisesRegexp(ValueError, "Only valid keyword argument"):
+    with self.assertRaisesRegex(ValueError, "Only valid keyword argument"):
       nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
 
   def testAssertShallowStructure(self):
@@ -290,7 +289,7 @@ class NestTest(test.TestCase):
     expected_message = (
         "The two structures don't have the same sequence length. Input "
         "structure has length 2, while shallow structure has length 3.")
-    with self.assertRaisesRegexp(ValueError, expected_message):
+    with self.assertRaisesRegex(ValueError, expected_message):
       nest.assert_shallow_structure(inp_abc, inp_ab)
 
     inp_ab1 = ((1, 1), (2, 2))
@@ -299,7 +298,7 @@ class NestTest(test.TestCase):
         "The two structures don't have the same sequence type. Input structure "
         "has type <(type|class) 'tuple'>, while shallow structure has type "
         "<(type|class) 'dict'>.")
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       nest.assert_shallow_structure(inp_ab2, inp_ab1)
     nest.assert_shallow_structure(inp_ab2, inp_ab1, check_types=False)
 
@@ -309,7 +308,7 @@ class NestTest(test.TestCase):
         r"The two structures don't have the same keys. Input "
         r"structure has keys \['c'\], while shallow structure has "
         r"keys \['d'\].")
-    with self.assertRaisesRegexp(ValueError, expected_message):
+    with self.assertRaisesRegex(ValueError, expected_message):
       nest.assert_shallow_structure(inp_ab2, inp_ab1)
 
     inp_ab = collections.OrderedDict([("a", 1), ("b", (2, 3))])
@@ -387,14 +386,14 @@ class NestTest(test.TestCase):
     shallow_tree = ("shallow_tree",)
     expected_message = ("If shallow structure is a sequence, input must also "
                         "be a sequence. Input has type: <(type|class) 'str'>.")
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, list(shallow_tree))
 
     input_tree = "input_tree"
     shallow_tree = ("shallow_tree_9", "shallow_tree_8")
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, list(shallow_tree))
@@ -404,14 +403,14 @@ class NestTest(test.TestCase):
     shallow_tree = (9,)
     expected_message = ("If shallow structure is a sequence, input must also "
                         "be a sequence. Input has type: <(type|class) 'int'>.")
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, list(shallow_tree))
 
     input_tree = 0
     shallow_tree = (9, 8)
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, list(shallow_tree))
diff --git a/tensorflow/python/data/util/structure_test.py b/tensorflow/python/data/util/structure_test.py
index c4f9af69eb4..f44f3342799 100644
--- a/tensorflow/python/data/util/structure_test.py
+++ b/tensorflow/python/data/util/structure_test.py
@@ -417,46 +417,46 @@ class StructureTest(test_base.DatasetTestBase, parameterized.TestCase,
     s_nest = structure.type_spec_from_value(value_nest)
     flat_nest = structure.to_tensor_list(s_nest, value_nest)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"SparseTensor.* is not convertible to a tensor with "
         r"dtype.*float32.* and shape \(\)"):
       structure.to_tensor_list(s_tensor, value_sparse_tensor)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_tensor, value_nest)
 
-    with self.assertRaisesRegexp(
-        TypeError, "Neither a SparseTensor nor SparseTensorValue"):
+    with self.assertRaisesRegex(TypeError,
+                                "Neither a SparseTensor nor SparseTensorValue"):
       structure.to_tensor_list(s_sparse_tensor, value_tensor)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_sparse_tensor, value_nest)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_nest, value_tensor)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_nest, value_sparse_tensor)
 
-    with self.assertRaisesRegexp(ValueError, r"Incompatible input:"):
+    with self.assertRaisesRegex(ValueError, r"Incompatible input:"):
       structure.from_tensor_list(s_tensor, flat_sparse_tensor)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 1 tensors but got 2."):
+    with self.assertRaisesRegex(ValueError, "Expected 1 tensors but got 2."):
       structure.from_tensor_list(s_tensor, flat_nest)
 
-    with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
+    with self.assertRaisesRegex(ValueError, "Incompatible input: "):
       structure.from_tensor_list(s_sparse_tensor, flat_tensor)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 1 tensors but got 2."):
+    with self.assertRaisesRegex(ValueError, "Expected 1 tensors but got 2."):
       structure.from_tensor_list(s_sparse_tensor, flat_nest)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 1."):
+    with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 1."):
       structure.from_tensor_list(s_nest, flat_tensor)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 1."):
+    with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 1."):
       structure.from_tensor_list(s_nest, flat_sparse_tensor)
 
   def testIncompatibleNestedStructure(self):
@@ -498,20 +498,20 @@ class StructureTest(test_base.DatasetTestBase, parameterized.TestCase,
     s_2 = structure.type_spec_from_value(value_2)
     flat_s_2 = structure.to_tensor_list(s_2, value_2)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"SparseTensor.* is not convertible to a tensor with "
         r"dtype.*int32.* and shape \(3,\)"):
       structure.to_tensor_list(s_0, value_1)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_0, value_2)
 
-    with self.assertRaisesRegexp(
-        TypeError, "Neither a SparseTensor nor SparseTensorValue"):
+    with self.assertRaisesRegex(TypeError,
+                                "Neither a SparseTensor nor SparseTensorValue"):
       structure.to_tensor_list(s_1, value_0)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_1, value_2)
 
@@ -519,30 +519,30 @@ class StructureTest(test_base.DatasetTestBase, parameterized.TestCase,
     # needs to account for "a" coming before or after "b". It might be worth
     # adding a deterministic repr for these error messages (among other
     # improvements).
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_2, value_0)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The two structures don't have the same nested structure."):
       structure.to_tensor_list(s_2, value_1)
 
-    with self.assertRaisesRegexp(ValueError, r"Incompatible input:"):
+    with self.assertRaisesRegex(ValueError, r"Incompatible input:"):
       structure.from_tensor_list(s_0, flat_s_1)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 3."):
+    with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 3."):
       structure.from_tensor_list(s_0, flat_s_2)
 
-    with self.assertRaisesRegexp(ValueError, "Incompatible input: "):
+    with self.assertRaisesRegex(ValueError, "Incompatible input: "):
       structure.from_tensor_list(s_1, flat_s_0)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 2 tensors but got 3."):
+    with self.assertRaisesRegex(ValueError, "Expected 2 tensors but got 3."):
       structure.from_tensor_list(s_1, flat_s_2)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 3 tensors but got 2."):
+    with self.assertRaisesRegex(ValueError, "Expected 3 tensors but got 2."):
       structure.from_tensor_list(s_2, flat_s_0)
 
-    with self.assertRaisesRegexp(ValueError, "Expected 3 tensors but got 2."):
+    with self.assertRaisesRegex(ValueError, "Expected 3 tensors but got 2."):
       structure.from_tensor_list(s_2, flat_s_1)
 
   @parameterized.named_parameters(
diff --git a/tensorflow/python/debug/cli/analyzer_cli_test.py b/tensorflow/python/debug/cli/analyzer_cli_test.py
index 58a0f3546f8..cba446e8157 100644
--- a/tensorflow/python/debug/cli/analyzer_cli_test.py
+++ b/tensorflow/python/debug/cli/analyzer_cli_test.py
@@ -1340,24 +1340,24 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
     analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
                                           _cli_config_from_temp_file())
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Input argument filter_name cannot be empty."):
+    with self.assertRaisesRegex(ValueError,
+                                "Input argument filter_name cannot be empty."):
       analyzer.add_tensor_filter("", lambda datum, tensor: True)
 
   def testAddTensorFilterNonStrName(self):
     analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
                                           _cli_config_from_temp_file())
 
-    with self.assertRaisesRegexp(
-        TypeError,
-        "Input argument filter_name is expected to be str, ""but is not"):
+    with self.assertRaisesRegex(
+        TypeError, "Input argument filter_name is expected to be str, "
+        "but is not"):
       analyzer.add_tensor_filter(1, lambda datum, tensor: True)
 
   def testAddGetTensorFilterNonCallable(self):
     analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump,
                                           _cli_config_from_temp_file())
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, "Input argument filter_callable is expected to be callable, "
         "but is not."):
       analyzer.add_tensor_filter("foo_filter", "bar")
@@ -1367,8 +1367,8 @@ class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
                                           _cli_config_from_temp_file())
 
     analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
-    with self.assertRaisesRegexp(ValueError,
-                                 "There is no tensor filter named \"bar\""):
+    with self.assertRaisesRegex(ValueError,
+                                "There is no tensor filter named \"bar\""):
       analyzer.get_tensor_filter("bar")
 
   def _findSourceLine(self, annotated_source, line_number):
diff --git a/tensorflow/python/debug/cli/cli_shared_test.py b/tensorflow/python/debug/cli/cli_shared_test.py
index a7ccd846267..3182d29d125 100644
--- a/tensorflow/python/debug/cli/cli_shared_test.py
+++ b/tensorflow/python/debug/cli/cli_shared_test.py
@@ -101,7 +101,7 @@ class TimeToReadableStrTest(test_util.TensorFlowTestCase):
                      cli_shared.time_to_readable_str(
                          0, force_time_unit=cli_shared.TIME_UNIT_S))
 
-    with self.assertRaisesRegexp(ValueError, r"Invalid time unit: ks"):
+    with self.assertRaisesRegex(ValueError, r"Invalid time unit: ks"):
       cli_shared.time_to_readable_str(100, force_time_unit="ks")
 
 
diff --git a/tensorflow/python/debug/cli/command_parser_test.py b/tensorflow/python/debug/cli/command_parser_test.py
index ae7468fd09d..156aad27f38 100644
--- a/tensorflow/python/debug/cli/command_parser_test.py
+++ b/tensorflow/python/debug/cli/command_parser_test.py
@@ -121,7 +121,7 @@ class ExtractOutputFilePathTest(test_util.TensorFlowTestCase):
     self.assertEqual(output_path, "/tmp/foo.txt")
 
   def testHasGreaterThanSignButNoFileNameCausesSyntaxError(self):
-    with self.assertRaisesRegexp(SyntaxError, "Redirect file path is empty"):
+    with self.assertRaisesRegex(SyntaxError, "Redirect file path is empty"):
       command_parser.extract_output_file_path(
           ["pt", "a:0", ">"])
 
@@ -256,15 +256,15 @@ class ParseIndicesTest(test_util.TensorFlowTestCase):
     self.assertEqual([3, 4, -5], command_parser.parse_indices("3,4,-5"))
 
   def testParseInvalidIndicesStringsWithoutBrackets(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"invalid literal for int\(\) with base 10: 'a'"):
       self.assertEqual([0], command_parser.parse_indices("0,a"))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"invalid literal for int\(\) with base 10: '2\]'"):
       self.assertEqual([0], command_parser.parse_indices("1, 2]"))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"invalid literal for int\(\) with base 10: ''"):
       self.assertEqual([0], command_parser.parse_indices("3, 4,"))
 
@@ -296,20 +296,20 @@ class ParseRangesTest(test_util.TensorFlowTestCase):
     with self.assertRaises(SyntaxError):
       command_parser.parse_ranges("[[1,2]")
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Incorrect number of elements in range"):
+    with self.assertRaisesRegex(ValueError,
+                                "Incorrect number of elements in range"):
       command_parser.parse_ranges("[1,2,3]")
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Incorrect number of elements in range"):
+    with self.assertRaisesRegex(ValueError,
+                                "Incorrect number of elements in range"):
       command_parser.parse_ranges("[inf]")
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Incorrect type in the 1st element of range"):
+    with self.assertRaisesRegex(ValueError,
+                                "Incorrect type in the 1st element of range"):
       command_parser.parse_ranges("[1j, 1]")
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Incorrect type in the 2nd element of range"):
+    with self.assertRaisesRegex(ValueError,
+                                "Incorrect type in the 2nd element of range"):
       command_parser.parse_ranges("[1, 1j]")
 
 
@@ -350,11 +350,11 @@ class ParseReadableSizeStrTest(test_util.TensorFlowTestCase):
                      command_parser.parse_readable_size_str("0.25G"))
 
   def testParseUnsupportedUnitRaisesException(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Failed to parsed human-readable byte size str: \"0foo\""):
       command_parser.parse_readable_size_str("0foo")
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Failed to parsed human-readable byte size str: \"2E\""):
       command_parser.parse_readable_size_str("2EB")
 
@@ -377,15 +377,13 @@ class ParseReadableTimeStrTest(test_util.TensorFlowTestCase):
     self.assertEqual(2e3, command_parser.parse_readable_time_str("2ms"))
 
   def testParseUnsupportedUnitRaisesException(self):
-    with self.assertRaisesRegexp(
-        ValueError, r".*float.*2us.*"):
+    with self.assertRaisesRegex(ValueError, r".*float.*2us.*"):
       command_parser.parse_readable_time_str("2uss")
 
-    with self.assertRaisesRegexp(
-        ValueError, r".*float.*2m.*"):
+    with self.assertRaisesRegex(ValueError, r".*float.*2m.*"):
       command_parser.parse_readable_time_str("2m")
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"Invalid time -1. Time value must be positive."):
       command_parser.parse_readable_time_str("-1s")
 
@@ -393,103 +391,104 @@ class ParseReadableTimeStrTest(test_util.TensorFlowTestCase):
 class ParseInterval(test_util.TensorFlowTestCase):
 
   def testParseTimeInterval(self):
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(10, True, 1e3, True),
         command_parser.parse_time_interval("[10us, 1ms]"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(10, False, 1e3, False),
         command_parser.parse_time_interval("(10us, 1ms)"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(10, False, 1e3, True),
         command_parser.parse_time_interval("(10us, 1ms]"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(10, True, 1e3, False),
         command_parser.parse_time_interval("[10us, 1ms)"))
-    self.assertEquals(command_parser.Interval(0, False, 1e3, True),
-                      command_parser.parse_time_interval("<=1ms"))
-    self.assertEquals(
+    self.assertEqual(
+        command_parser.Interval(0, False, 1e3, True),
+        command_parser.parse_time_interval("<=1ms"))
+    self.assertEqual(
         command_parser.Interval(1e3, True, float("inf"), False),
         command_parser.parse_time_interval(">=1ms"))
-    self.assertEquals(command_parser.Interval(0, False, 1e3, False),
-                      command_parser.parse_time_interval("<1ms"))
-    self.assertEquals(
+    self.assertEqual(
+        command_parser.Interval(0, False, 1e3, False),
+        command_parser.parse_time_interval("<1ms"))
+    self.assertEqual(
         command_parser.Interval(1e3, False, float("inf"), False),
         command_parser.parse_time_interval(">1ms"))
 
   def testParseTimeGreaterLessThanWithInvalidValueStrings(self):
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after >= "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after >= "):
       command_parser.parse_time_interval(">=wms")
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after > "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after > "):
       command_parser.parse_time_interval(">Yms")
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after <= "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after <= "):
       command_parser.parse_time_interval("<= _ms")
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after < "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after < "):
       command_parser.parse_time_interval("<-ms")
 
   def testParseTimeIntervalsWithInvalidValueStrings(self):
-    with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
+    with self.assertRaisesRegex(ValueError, "Invalid first item in interval:"):
       command_parser.parse_time_interval("[wms, 10ms]")
-    with self.assertRaisesRegexp(ValueError,
-                                 "Invalid second item in interval:"):
+    with self.assertRaisesRegex(ValueError, "Invalid second item in interval:"):
       command_parser.parse_time_interval("[ 0ms, _ms]")
-    with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
+    with self.assertRaisesRegex(ValueError, "Invalid first item in interval:"):
       command_parser.parse_time_interval("(xms, _ms]")
-    with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
+    with self.assertRaisesRegex(ValueError, "Invalid first item in interval:"):
       command_parser.parse_time_interval("((3ms, _ms)")
 
   def testInvalidTimeIntervalRaisesException(self):
-    with self.assertRaisesRegexp(
-        ValueError,
-        r"Invalid interval format: \[10us, 1ms. Valid formats are: "
+    with self.assertRaisesRegex(
+        ValueError, r"Invalid interval format: \[10us, 1ms. Valid formats are: "
         r"\[min, max\], \(min, max\), <max, >min"):
       command_parser.parse_time_interval("[10us, 1ms")
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"Incorrect interval format: \[10us, 1ms, 2ms\]. Interval should "
         r"specify two values: \[min, max\] or \(min, max\)"):
       command_parser.parse_time_interval("[10us, 1ms, 2ms]")
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"Invalid interval \[1s, 1ms\]. Start must be before end of interval."):
       command_parser.parse_time_interval("[1s, 1ms]")
 
   def testParseMemoryInterval(self):
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(1024, True, 2048, True),
         command_parser.parse_memory_interval("[1k, 2k]"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(1024, False, 2048, False),
         command_parser.parse_memory_interval("(1kB, 2kB)"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(1024, False, 2048, True),
         command_parser.parse_memory_interval("(1k, 2k]"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(1024, True, 2048, False),
         command_parser.parse_memory_interval("[1k, 2k)"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(0, False, 2048, True),
         command_parser.parse_memory_interval("<=2k"))
-    self.assertEquals(
+    self.assertEqual(
         command_parser.Interval(11, True, float("inf"), False),
         command_parser.parse_memory_interval(">=11"))
-    self.assertEquals(command_parser.Interval(0, False, 2048, False),
-                      command_parser.parse_memory_interval("<2k"))
-    self.assertEquals(
+    self.assertEqual(
+        command_parser.Interval(0, False, 2048, False),
+        command_parser.parse_memory_interval("<2k"))
+    self.assertEqual(
         command_parser.Interval(11, False, float("inf"), False),
         command_parser.parse_memory_interval(">11"))
 
   def testParseMemoryIntervalsWithInvalidValueStrings(self):
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after >= "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after >= "):
       command_parser.parse_time_interval(">=wM")
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after > "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after > "):
       command_parser.parse_time_interval(">YM")
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after <= "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after <= "):
       command_parser.parse_time_interval("<= _MB")
-    with self.assertRaisesRegexp(ValueError, "Invalid value string after < "):
+    with self.assertRaisesRegex(ValueError, "Invalid value string after < "):
       command_parser.parse_time_interval("<-MB")
 
   def testInvalidMemoryIntervalRaisesException(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"Invalid interval \[5k, 3k\]. Start of interval must be less than or "
         "equal to end of interval."):
diff --git a/tensorflow/python/debug/cli/curses_ui_test.py b/tensorflow/python/debug/cli/curses_ui_test.py
index 3c09ad64876..3ffa031923d 100644
--- a/tensorflow/python/debug/cli/curses_ui_test.py
+++ b/tensorflow/python/debug/cli/curses_ui_test.py
@@ -1532,8 +1532,8 @@ class CursesTest(test_util.TensorFlowTestCase):
 class ScrollBarTest(test_util.TensorFlowTestCase):
 
   def testConstructorRaisesExceptionForNotEnoughHeight(self):
-    with self.assertRaisesRegexp(
-        ValueError, r"Insufficient height for ScrollBar \(2\)"):
+    with self.assertRaisesRegex(ValueError,
+                                r"Insufficient height for ScrollBar \(2\)"):
       curses_ui.ScrollBar(0, 0, 1, 1, 0, 0)
 
   def testLayoutIsEmptyForZeroRow(self):
diff --git a/tensorflow/python/debug/cli/curses_widgets_test.py b/tensorflow/python/debug/cli/curses_widgets_test.py
index fb0d3f4a0d6..f3a89e16e74 100644
--- a/tensorflow/python/debug/cli/curses_widgets_test.py
+++ b/tensorflow/python/debug/cli/curses_widgets_test.py
@@ -43,11 +43,11 @@ class CNHTest(test_util.TensorFlowTestCase):
     self.assertFalse(nav_history.can_go_forward())
     self.assertFalse(nav_history.can_go_back())
 
-    with self.assertRaisesRegexp(ValueError, "Empty navigation history"):
+    with self.assertRaisesRegex(ValueError, "Empty navigation history"):
       nav_history.go_back()
-    with self.assertRaisesRegexp(ValueError, "Empty navigation history"):
+    with self.assertRaisesRegex(ValueError, "Empty navigation history"):
       nav_history.go_forward()
-    with self.assertRaisesRegexp(ValueError, "Empty navigation history"):
+    with self.assertRaisesRegex(ValueError, "Empty navigation history"):
       nav_history.update_scroll_position(3)
 
   def testAddOneItemWorks(self):
diff --git a/tensorflow/python/debug/cli/debugger_cli_common_test.py b/tensorflow/python/debug/cli/debugger_cli_common_test.py
index eb46a0a4062..93df845c4c5 100644
--- a/tensorflow/python/debug/cli/debugger_cli_common_test.py
+++ b/tensorflow/python/debug/cli/debugger_cli_common_test.py
@@ -64,7 +64,7 @@ class RichTextLinesTest(test_util.TensorFlowTestCase):
     self.assertEqual(2, screen_output.num_lines())
 
   def testRichTextLinesConstructorWithInvalidType(self):
-    with self.assertRaisesRegexp(ValueError, "Unexpected type in lines"):
+    with self.assertRaisesRegex(ValueError, "Unexpected type in lines"):
       debugger_cli_common.RichTextLines(123)
 
   def testRichTextLinesConstructorWithString(self):
@@ -320,7 +320,7 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
 
     # Attempt to register an empty-string as a command prefix should trigger
     # an exception.
-    with self.assertRaisesRegexp(ValueError, "Empty command prefix"):
+    with self.assertRaisesRegex(ValueError, "Empty command prefix"):
       registry.register_command_handler("", self._noop_handler, "")
 
   def testRegisterAndInvokeHandler(self):
@@ -335,11 +335,11 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
 
     # Attempt to invoke an unregistered command prefix should trigger an
     # exception.
-    with self.assertRaisesRegexp(ValueError, "No handler is registered"):
+    with self.assertRaisesRegex(ValueError, "No handler is registered"):
       registry.dispatch_command("beep", [])
 
     # Empty command prefix should trigger an exception.
-    with self.assertRaisesRegexp(ValueError, "Prefix is empty"):
+    with self.assertRaisesRegex(ValueError, "Prefix is empty"):
       registry.dispatch_command("", [])
 
   def testExitingHandler(self):
@@ -391,7 +391,7 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
 
     # If the command handler fails to return a RichTextLines instance, an error
     # should be triggered.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "Return value from command handler.*is not None or a RichTextLines "
         "instance"):
@@ -403,7 +403,7 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
 
     # Registering the same command prefix more than once should trigger an
     # exception.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "A handler is already registered for command prefix"):
       registry.register_command_handler("noop", self._noop_handler, "")
 
@@ -416,8 +416,8 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
         "noop", self._noop_handler, "", prefix_aliases=["n"])
 
     # Clash with existing alias.
-    with self.assertRaisesRegexp(ValueError,
-                                 "clashes with existing prefixes or aliases"):
+    with self.assertRaisesRegex(ValueError,
+                                "clashes with existing prefixes or aliases"):
       registry.register_command_handler(
           "cols", self._echo_screen_cols, "", prefix_aliases=["n"])
 
@@ -425,8 +425,8 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
     self.assertFalse(registry.is_registered("cols"))
 
     # Aliases can also clash with command prefixes.
-    with self.assertRaisesRegexp(ValueError,
-                                 "clashes with existing prefixes or aliases"):
+    with self.assertRaisesRegex(ValueError,
+                                "clashes with existing prefixes or aliases"):
       registry.register_command_handler(
           "cols", self._echo_screen_cols, "", prefix_aliases=["noop"])
 
@@ -451,13 +451,13 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
     registry = debugger_cli_common.CommandHandlerRegistry()
 
     # Attempt to register a non-callable handler should fail.
-    with self.assertRaisesRegexp(ValueError, "handler is not callable"):
+    with self.assertRaisesRegex(ValueError, "handler is not callable"):
       registry.register_command_handler("non_callable", 1, "")
 
   def testRegisterHandlerWithInvalidHelpInfoType(self):
     registry = debugger_cli_common.CommandHandlerRegistry()
 
-    with self.assertRaisesRegexp(ValueError, "help_info is not a str"):
+    with self.assertRaisesRegex(ValueError, "help_info is not a str"):
       registry.register_command_handler("noop", self._noop_handler, ["foo"])
 
   def testGetHelpFull(self):
@@ -629,7 +629,7 @@ class RegexFindTest(test_util.TensorFlowTestCase):
         debugger_cli_common.REGEX_MATCH_LINES_KEY])
 
   def testInvalidRegex(self):
-    with self.assertRaisesRegexp(ValueError, "Invalid regular expression"):
+    with self.assertRaisesRegex(ValueError, "Invalid regular expression"):
       debugger_cli_common.regex_find(self._orig_screen_output, "[", "yellow")
 
   def testRegexFindOnPrependedLinesWorks(self):
@@ -755,11 +755,11 @@ class WrapScreenOutputTest(test_util.TensorFlowTestCase):
     self.assertEqual(new_line_indices, [0, 2, 5])
 
   def testWrappingInvalidArguments(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 "Invalid type of input screen_output"):
+    with self.assertRaisesRegex(ValueError,
+                                "Invalid type of input screen_output"):
       debugger_cli_common.wrap_rich_text_lines("foo", 12)
 
-    with self.assertRaisesRegexp(ValueError, "Invalid type of input cols"):
+    with self.assertRaisesRegex(ValueError, "Invalid type of input cols"):
       debugger_cli_common.wrap_rich_text_lines(
           debugger_cli_common.RichTextLines(["foo", "bar"]), "12")
 
@@ -813,7 +813,7 @@ class SliceRichTextLinesTest(test_util.TensorFlowTestCase):
     self.assertEqual(1, sliced.num_lines())
 
   def testAttemptSliceWithNegativeIndex(self):
-    with self.assertRaisesRegexp(ValueError, "Encountered negative index"):
+    with self.assertRaisesRegex(ValueError, "Encountered negative index"):
       self._original.slice(0, -1)
 
 
@@ -872,8 +872,8 @@ class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
                      self._tc_reg.get_completions("node_info", "node_"))
 
   def testExtendCompletionItemsNonexistentContext(self):
-    with self.assertRaisesRegexp(
-        KeyError, "Context word \"foo\" has not been registered"):
+    with self.assertRaisesRegex(KeyError,
+                                "Context word \"foo\" has not been registered"):
       self._tc_reg.extend_comp_items("foo", ["node_A:1", "node_A:2"])
 
   def testRemoveCompletionItems(self):
@@ -891,8 +891,8 @@ class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
                      self._tc_reg.get_completions("node_info", "node_"))
 
   def testRemoveCompletionItemsNonexistentContext(self):
-    with self.assertRaisesRegexp(
-        KeyError, "Context word \"foo\" has not been registered"):
+    with self.assertRaisesRegex(KeyError,
+                                "Context word \"foo\" has not been registered"):
       self._tc_reg.remove_comp_items("foo", ["node_a:1", "node_a:2"])
 
   def testDeregisterContext(self):
@@ -921,7 +921,7 @@ class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
 
     self._tc_reg.deregister_context(["print_tensor"])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         KeyError,
         "Cannot deregister unregistered context word \"print_tensor\""):
       self._tc_reg.deregister_context(["print_tensor"])
@@ -992,7 +992,7 @@ class CommandHistoryTest(test_util.TensorFlowTestCase):
     self.assertEqual([], self._cmd_hist.lookup_prefix("print_tensor", 10))
 
   def testAddNonStrCommand(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, "Attempt to enter non-str entry to command history"):
       self._cmd_hist.add_command(["print_tensor node_a:0"])
 
diff --git a/tensorflow/python/debug/cli/evaluator_test.py b/tensorflow/python/debug/cli/evaluator_test.py
index c851ad781e7..3116ab6f957 100644
--- a/tensorflow/python/debug/cli/evaluator_test.py
+++ b/tensorflow/python/debug/cli/evaluator_test.py
@@ -102,14 +102,14 @@ class ParseDebugTensorNameTest(test_util.TensorFlowTestCase):
     self.assertEqual(0, exec_index)
 
   def testParseMalformedDebugTensorName(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"The debug tensor name in the to-be-evaluated expression is "
         r"malformed:"):
       evaluator._parse_debug_tensor_name(
           "/job:ps/replica:0/task:2/cpu:0:foo:1:DebugNanCount:1337")
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"The debug tensor name in the to-be-evaluated expression is "
         r"malformed:"):
@@ -184,7 +184,7 @@ class EvaluatorTest(test_util.TensorFlowTestCase):
     with test.mock.patch.object(
         dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
       ev = evaluator.ExpressionEvaluator(dump)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Eval failed due to the value of .* being unavailable"):
         ev.evaluate("np.matmul(`a:0`, `b:0`)")
 
@@ -206,7 +206,7 @@ class EvaluatorTest(test_util.TensorFlowTestCase):
     with test.mock.patch.object(
         dump, "get_tensors", side_effect=fake_get_tensors, autospec=True):
       ev = evaluator.ExpressionEvaluator(dump)
-      with self.assertRaisesRegexp(ValueError, r"multiple \(2\) devices"):
+      with self.assertRaisesRegex(ValueError, r"multiple \(2\) devices"):
         ev.evaluate("`a:0` + `a:0`")
 
       self.assertAllClose(
@@ -252,12 +252,12 @@ class EvaluatorTest(test_util.TensorFlowTestCase):
   def testEvaluateExpressionWithInvalidDebugTensorName(self):
     dump = test.mock.MagicMock()
     ev = evaluator.ExpressionEvaluator(dump)
-    with self.assertRaisesRegexp(
-        ValueError, r".* tensor name .* expression .* malformed"):
+    with self.assertRaisesRegex(ValueError,
+                                r".* tensor name .* expression .* malformed"):
       ev.evaluate("np.matmul(`a`, `b`)")
 
-    with self.assertRaisesRegexp(
-        ValueError, r".* tensor name .* expression .* malformed"):
+    with self.assertRaisesRegex(ValueError,
+                                r".* tensor name .* expression .* malformed"):
       ev.evaluate("np.matmul(`a:0:DebugIdentity:0`, `b:1:DebugNanCount:2`)")
 
     with self.assertRaises(ValueError):
diff --git a/tensorflow/python/debug/cli/profile_analyzer_cli_test.py b/tensorflow/python/debug/cli/profile_analyzer_cli_test.py
index ee4c5a1a6fc..0027c78d4d1 100644
--- a/tensorflow/python/debug/cli/profile_analyzer_cli_test.py
+++ b/tensorflow/python/debug/cli/profile_analyzer_cli_test.py
@@ -79,7 +79,7 @@ class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase):
 
     prof_analyzer = profile_analyzer_cli.ProfileAnalyzer(graph, run_metadata)
     prof_output = prof_analyzer.list_profile([]).lines
-    self.assertEquals([""], prof_output)
+    self.assertEqual([""], prof_output)
 
   def testSingleDevice(self):
     node1 = step_stats_pb2.NodeExecStats(
@@ -211,22 +211,22 @@ class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase):
 
     # Default sort by start time (i.e. all_start_micros).
     prof_output = prof_analyzer.list_profile([]).lines
-    self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
+    self.assertRegex("".join(prof_output), r"Mul/456.*Add/123")
     # Default sort in reverse.
     prof_output = prof_analyzer.list_profile(["-r"]).lines
-    self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
+    self.assertRegex("".join(prof_output), r"Add/123.*Mul/456")
     # Sort by name.
     prof_output = prof_analyzer.list_profile(["-s", "node"]).lines
-    self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
+    self.assertRegex("".join(prof_output), r"Add/123.*Mul/456")
     # Sort by op time (i.e. op_end_rel_micros - op_start_rel_micros).
     prof_output = prof_analyzer.list_profile(["-s", "op_time"]).lines
-    self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
+    self.assertRegex("".join(prof_output), r"Mul/456.*Add/123")
     # Sort by exec time (i.e. all_end_rel_micros).
     prof_output = prof_analyzer.list_profile(["-s", "exec_time"]).lines
-    self.assertRegexpMatches("".join(prof_output), r"Add/123.*Mul/456")
+    self.assertRegex("".join(prof_output), r"Add/123.*Mul/456")
     # Sort by line number.
     prof_output = prof_analyzer.list_profile(["-s", "line"]).lines
-    self.assertRegexpMatches("".join(prof_output), r"Mul/456.*Add/123")
+    self.assertRegex("".join(prof_output), r"Mul/456.*Add/123")
 
   def testFiltering(self):
     node1 = step_stats_pb2.NodeExecStats(
diff --git a/tensorflow/python/debug/cli/readline_ui_test.py b/tensorflow/python/debug/cli/readline_ui_test.py
index 267a158edf4..011ba23fc4d 100644
--- a/tensorflow/python/debug/cli/readline_ui_test.py
+++ b/tensorflow/python/debug/cli/readline_ui_test.py
@@ -88,13 +88,13 @@ class CursesTest(test_util.TensorFlowTestCase):
     self.assertIsInstance(ui, readline_ui.ReadlineUI)
 
   def testUIFactoryRaisesExceptionOnInvalidUIType(self):
-    with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'foobar'"):
+    with self.assertRaisesRegex(ValueError, "Invalid ui_type: 'foobar'"):
       ui_factory.get_ui(
           "foobar",
           config=cli_config.CLIConfig(config_file_path=self._tmp_config_path))
 
   def testUIFactoryRaisesExceptionOnInvalidUITypeGivenAvailable(self):
-    with self.assertRaisesRegexp(ValueError, "Invalid ui_type: 'readline'"):
+    with self.assertRaisesRegex(ValueError, "Invalid ui_type: 'readline'"):
       ui_factory.get_ui(
           "readline",
           available_ui_types=["curses"],
diff --git a/tensorflow/python/debug/cli/tensor_format_test.py b/tensorflow/python/debug/cli/tensor_format_test.py
index 804b6c0143d..99ce343d7be 100644
--- a/tensorflow/python/debug/cli/tensor_format_test.py
+++ b/tensorflow/python/debug/cli/tensor_format_test.py
@@ -373,16 +373,13 @@ class RichTextLinesTest(test_util.TensorFlowTestCase):
 
     self._checkTensorElementLocations(out, a)
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices exceed tensor dimensions"):
+    with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
       tensor_format.locate_tensor_element(out, [20])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices contain negative"):
+    with self.assertRaisesRegex(ValueError, "Indices contain negative"):
       tensor_format.locate_tensor_element(out, [-1])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Dimensions mismatch"):
+    with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
       tensor_format.locate_tensor_element(out, [0, 0])
 
   def testLocateTensorElement1DNoEllipsisBatchMode(self):
@@ -407,18 +404,17 @@ class RichTextLinesTest(test_util.TensorFlowTestCase):
         self, ["Tensor \"a\":", ""], out.lines[:2])
     self.assertEqual(repr(a).split("\n"), out.lines[2:])
 
-    with self.assertRaisesRegexp(ValueError, "Dimensions mismatch"):
+    with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
       tensor_format.locate_tensor_element(out, [[0, 0], [0]])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Indices exceed tensor dimensions"):
+    with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
       tensor_format.locate_tensor_element(out, [[0], [20]])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 r"Indices contain negative value\(s\)"):
+    with self.assertRaisesRegex(ValueError,
+                                r"Indices contain negative value\(s\)"):
       tensor_format.locate_tensor_element(out, [[0], [-1]])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Input indices sets are not in ascending order"):
       tensor_format.locate_tensor_element(out, [[5], [0]])
 
@@ -447,16 +443,13 @@ class RichTextLinesTest(test_util.TensorFlowTestCase):
 
     self._checkTensorElementLocations(out, a)
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices exceed tensor dimensions"):
+    with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
       tensor_format.locate_tensor_element(out, [1, 4])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices contain negative"):
+    with self.assertRaisesRegex(ValueError, "Indices contain negative"):
       tensor_format.locate_tensor_element(out, [-1, 2])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Dimensions mismatch"):
+    with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
       tensor_format.locate_tensor_element(out, [0])
 
   def testLocateTensorElement2DNoEllipsisWithNumericSummary(self):
@@ -479,16 +472,13 @@ class RichTextLinesTest(test_util.TensorFlowTestCase):
 
     self._checkTensorElementLocations(out, a)
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices exceed tensor dimensions"):
+    with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
       tensor_format.locate_tensor_element(out, [1, 4])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices contain negative"):
+    with self.assertRaisesRegex(ValueError, "Indices contain negative"):
       tensor_format.locate_tensor_element(out, [-1, 2])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Dimensions mismatch"):
+    with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
       tensor_format.locate_tensor_element(out, [0])
 
   def testLocateTensorElement3DWithEllipses(self):
@@ -564,16 +554,13 @@ class RichTextLinesTest(test_util.TensorFlowTestCase):
     self.assertIsNone(start_col)  # Past ellipsis.
     self.assertIsNone(end_col)
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices exceed tensor dimensions"):
+    with self.assertRaisesRegex(ValueError, "Indices exceed tensor dimensions"):
       tensor_format.locate_tensor_element(out, [11, 5, 5])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Indices contain negative"):
+    with self.assertRaisesRegex(ValueError, "Indices contain negative"):
       tensor_format.locate_tensor_element(out, [-1, 5, 5])
 
-    with self.assertRaisesRegexp(
-        ValueError, "Dimensions mismatch"):
+    with self.assertRaisesRegex(ValueError, "Dimensions mismatch"):
       tensor_format.locate_tensor_element(out, [5, 5])
 
   def testLocateTensorElement3DWithEllipsesBatchMode(self):
@@ -633,7 +620,7 @@ class RichTextLinesTest(test_util.TensorFlowTestCase):
     self.assertEqual(["Tensor \"a\":", "", "Uninitialized tensor:"],
                      out.lines[:3])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         AttributeError, "tensor_metadata is not available in annotations"):
       tensor_format.locate_tensor_element(out, [0])
 
diff --git a/tensorflow/python/debug/lib/debug_data_test.py b/tensorflow/python/debug/lib/debug_data_test.py
index 6796187ac66..d7ba5cde1f7 100644
--- a/tensorflow/python/debug/lib/debug_data_test.py
+++ b/tensorflow/python/debug/lib/debug_data_test.py
@@ -182,7 +182,7 @@ class DebugDumpDirTest(test_util.TensorFlowTestCase):
         gpu_1_dir, "node_foo_1_2_DebugIdentity_1472563253536387"), "wb")
 
   def testDebugDumpDir_nonexistentDumpRoot(self):
-    with self.assertRaisesRegexp(IOError, "does not exist"):
+    with self.assertRaisesRegex(IOError, "does not exist"):
       debug_data.DebugDumpDir(tempfile.mktemp() + "_foo")
 
   def testDebugDumpDir_invalidFileNamingPattern(self):
@@ -194,8 +194,8 @@ class DebugDumpDirTest(test_util.TensorFlowTestCase):
     os.makedirs(device_dir)
     open(os.path.join(device_dir, "node1_DebugIdentity_1234"), "wb")
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "does not conform to the naming pattern"):
+    with self.assertRaisesRegex(ValueError,
+                                "does not conform to the naming pattern"):
       debug_data.DebugDumpDir(self._dump_root)
 
   def testDebugDumpDir_validDuplicateNodeNamesWithMultipleDevices(self):
@@ -228,8 +228,7 @@ class DebugDumpDirTest(test_util.TensorFlowTestCase):
     self.assertEqual(1472563253536385, dump_dir.t0)
     self.assertEqual(3, dump_dir.size)
 
-    with self.assertRaisesRegexp(
-        ValueError, r"Invalid device name: "):
+    with self.assertRaisesRegex(ValueError, r"Invalid device name: "):
       dump_dir.nodes("/job:localhost/replica:0/task:0/device:GPU:2")
     self.assertItemsEqual(["node_foo_1", "node_foo_1", "node_foo_1"],
                           dump_dir.nodes())
@@ -259,8 +258,7 @@ class DebugDumpDirTest(test_util.TensorFlowTestCase):
     node.op = "FooOp"
     node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
 
-    with self.assertRaisesRegexp(
-        ValueError, r"Duplicate node name on device "):
+    with self.assertRaisesRegex(ValueError, r"Duplicate node name on device "):
       debug_data.DebugDumpDir(
           self._dump_root,
           partition_graphs=[graph_cpu_0, graph_gpu_0, graph_gpu_1])
diff --git a/tensorflow/python/debug/lib/debug_events_writer_test.py b/tensorflow/python/debug/lib/debug_events_writer_test.py
index 3f3f9179e5d..584a758e52d 100644
--- a/tensorflow/python/debug/lib/debug_events_writer_test.py
+++ b/tensorflow/python/debug/lib/debug_events_writer_test.py
@@ -674,8 +674,8 @@ class MultiSetReaderTest(dumping_callback_test_lib.DumpingCallbackTestBase):
           re.sub(r"(tfdbg_events\.\d+)", r"\g<1>1", os.path.basename(src_path)))
       os.rename(src_path, dst_path)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 r"Found multiple \(2\) tfdbg2 runs"):
+    with self.assertRaisesRegex(ValueError,
+                                r"Found multiple \(2\) tfdbg2 runs"):
       debug_events_reader.DebugDataReader(dump_root_0)
 
 
diff --git a/tensorflow/python/debug/lib/debug_gradients_test.py b/tensorflow/python/debug/lib/debug_gradients_test.py
index 92d31171133..95da6cb9ff8 100644
--- a/tensorflow/python/debug/lib/debug_gradients_test.py
+++ b/tensorflow/python/debug/lib/debug_gradients_test.py
@@ -119,8 +119,8 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
   def testCallingIdentifyGradientTwiceWithTheSameGradientsDebuggerErrors(self):
     grad_debugger = debug_gradients.GradientsDebugger()
     grad_debugger.identify_gradient(self.w)
-    with self.assertRaisesRegexp(ValueError,
-                                 "The graph already contains an op named .*"):
+    with self.assertRaisesRegex(ValueError,
+                                "The graph already contains an op named .*"):
       grad_debugger.identify_gradient(self.w)
 
   def testIdentifyGradientWorksOnMultipleLosses(self):
@@ -162,18 +162,18 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
     # registered.
     gradients_impl.gradients(y, [self.u, self.v])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         LookupError,
         r"This GradientsDebugger has not received any gradient tensor for "):
       grad_debugger_1.gradient_tensor(self.w)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         LookupError,
         r"This GradientsDebugger has not received any gradient tensor for "):
       grad_debugger_2.gradient_tensor(self.w)
 
   def testIdentifyGradientRaisesTypeErrorForNonTensorOrTensorNameInput(self):
     grad_debugger = debug_gradients.GradientsDebugger()
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         r"x_tensor must be a str or tf\.Tensor or tf\.Variable, but instead "
         r"has type .*Operation.*"):
@@ -370,7 +370,7 @@ class IdentifyGradientTest(test_util.TensorFlowTestCase):
     self.assertEqual(1, len(u_grad_values))
     self.assertAllClose(30.0, u_grad_values[0])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         LookupError,
         r"This GradientsDebugger has not received any gradient tensor for "
         r"x-tensor v:0"):
diff --git a/tensorflow/python/debug/lib/debug_graphs_test.py b/tensorflow/python/debug/lib/debug_graphs_test.py
index 34257794f11..2d9e859cbac 100644
--- a/tensorflow/python/debug/lib/debug_graphs_test.py
+++ b/tensorflow/python/debug/lib/debug_graphs_test.py
@@ -91,20 +91,20 @@ class ParseDebugNodeNameTest(test_util.TensorFlowTestCase):
   def testParseDebugNodeName_invalidPrefix(self):
     invalid_debug_node_name_1 = "__copy_ns_a/ns_b/node_c:1_0_DebugIdentity"
 
-    with self.assertRaisesRegexp(ValueError, "Invalid prefix"):
+    with self.assertRaisesRegex(ValueError, "Invalid prefix"):
       debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
 
   def testParseDebugNodeName_missingDebugOpIndex(self):
     invalid_debug_node_name_1 = "__dbg_node1:0_DebugIdentity"
 
-    with self.assertRaisesRegexp(ValueError, "Invalid debug node name"):
+    with self.assertRaisesRegex(ValueError, "Invalid debug node name"):
       debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
 
   def testParseDebugNodeName_invalidWatchedTensorName(self):
     invalid_debug_node_name_1 = "__dbg_node1_0_DebugIdentity"
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Invalid tensor name in debug node name"):
+    with self.assertRaisesRegex(ValueError,
+                                "Invalid tensor name in debug node name"):
       debug_graphs.parse_debug_node_name(invalid_debug_node_name_1)
 
 
diff --git a/tensorflow/python/debug/lib/debug_v2_ops_test.py b/tensorflow/python/debug/lib/debug_v2_ops_test.py
index d70c505d3fc..a3054ad9e27 100644
--- a/tensorflow/python/debug/lib/debug_v2_ops_test.py
+++ b/tensorflow/python/debug/lib/debug_v2_ops_test.py
@@ -749,7 +749,7 @@ class DebugNumericSummaryV2Test(test_util.TensorFlowTestCase):
     with self.session(graph=ops.Graph()):
       t1 = constant_op.constant([-1.0, 1.0])
       t2 = constant_op.constant([0.0, 0.0])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"pass through test.*had -Inf and \+Inf values"):
         self.evaluate(
@@ -760,7 +760,7 @@ class DebugNumericSummaryV2Test(test_util.TensorFlowTestCase):
     with self.session(graph=ops.Graph()):
       t1 = constant_op.constant([-1.0, 1.0, 0.0])
       t2 = constant_op.constant([0.0, 0.0, 0.0])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"pass through test.*had -Inf, \+Inf, and NaN values"):
         self.evaluate(
@@ -771,7 +771,7 @@ class DebugNumericSummaryV2Test(test_util.TensorFlowTestCase):
     with self.session(graph=ops.Graph()):
       t1 = constant_op.constant([0.0, 1.0])
       t2 = constant_op.constant([0.0, 0.0])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"pass through test.*had \+Inf and NaN values"):
         self.evaluate(
diff --git a/tensorflow/python/debug/lib/dumping_callback_test.py b/tensorflow/python/debug/lib/dumping_callback_test.py
index 982e57b4a81..4260707c504 100644
--- a/tensorflow/python/debug/lib/dumping_callback_test.py
+++ b/tensorflow/python/debug/lib/dumping_callback_test.py
@@ -84,9 +84,8 @@ class DumpingCallbackTest(
       return "/job:localhost/replica:0/task:0/device:CPU:0"
 
   def testInvalidTensorDebugModeCausesError(self):
-    with self.assertRaisesRegexp(
-        ValueError,
-        r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
+    with self.assertRaisesRegex(
+        ValueError, r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
         r"Valid options.*NO_TENSOR.*"):
       dumping_callback.enable_dump_debug_info(
           self.dump_root, tensor_debug_mode="NONSENSICAL")
@@ -947,19 +946,16 @@ class DumpingCallbackTest(
             tensor_values[2], np.log(5.0) + 1.0)  # 2nd AddV2 op.
 
   def testIncorrectTensorDTypeArgFormatLeadsToError(self):
-    with self.assertRaisesRegexp(
-        ValueError,
-        r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
+    with self.assertRaisesRegex(
+        ValueError, r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
       dumping_callback.enable_dump_debug_info(self.dump_root,
                                               tensor_dtypes=dict())
-    with self.assertRaisesRegexp(
-        ValueError,
-        r".*expected.*list.*tuple.*callable.*but received.*"):
+    with self.assertRaisesRegex(
+        ValueError, r".*expected.*list.*tuple.*callable.*but received.*"):
       dumping_callback.enable_dump_debug_info(self.dump_root,
                                               tensor_dtypes="float32")
-    with self.assertRaisesRegexp(
-        ValueError,
-        r".*expected.*list.*tuple.*callable.*but received.*"):
+    with self.assertRaisesRegex(
+        ValueError, r".*expected.*list.*tuple.*callable.*but received.*"):
       dumping_callback.enable_dump_debug_info(
           self.dump_root, tensor_dtypes=dtypes.float32)
     with self.assertRaises(TypeError):
@@ -1220,7 +1216,7 @@ class DumpingCallbackTest(
         # array.
         self.assertAllEqual(tensor_value, [])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
       dumping_callback.enable_dump_debug_info(
           self.dump_root, tensor_debug_mode="FULL_TENSOR")
diff --git a/tensorflow/python/debug/lib/session_debug_grpc_test.py b/tensorflow/python/debug/lib/session_debug_grpc_test.py
index 6cf8d8b5a41..e80ae39828a 100644
--- a/tensorflow/python/debug/lib/session_debug_grpc_test.py
+++ b/tensorflow/python/debug/lib/session_debug_grpc_test.py
@@ -54,8 +54,8 @@ class GrpcDebugServerTest(test_util.TensorFlowTestCase):
     # The server is started asynchronously. It needs to be polled till its state
     # has become started.
 
-    with self.assertRaisesRegexp(
-        ValueError, "Server has already started running"):
+    with self.assertRaisesRegex(ValueError,
+                                "Server has already started running"):
       server.run_server()
 
     server.stop_server().wait()
@@ -68,7 +68,7 @@ class GrpcDebugServerTest(test_util.TensorFlowTestCase):
     server.stop_server().wait()
     server_thread.join()
 
-    with self.assertRaisesRegexp(ValueError, "Server has already stopped"):
+    with self.assertRaisesRegex(ValueError, "Server has already stopped"):
       server.stop_server().wait()
 
   def testRunServerAfterStopRaisesException(self):
@@ -78,7 +78,7 @@ class GrpcDebugServerTest(test_util.TensorFlowTestCase):
     server.stop_server().wait()
     server_thread.join()
 
-    with self.assertRaisesRegexp(ValueError, "Server has already stopped"):
+    with self.assertRaisesRegex(ValueError, "Server has already stopped"):
       server.run_server()
 
   def testStartServerWithoutBlocking(self):
@@ -131,14 +131,14 @@ class SessionDebugGrpcTest(session_debug_testlib.SessionDebugTestBase):
   def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException(self):
     sess = session.Session(
         config=session_debug_testlib.no_rewrite_session_config())
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, "Expected type str or list in grpc_debug_server_addresses"):
       grpc_wrapper.GrpcDebugWrapperSession(sess, 1337)
 
   def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException2(self):
     sess = session.Session(
         config=session_debug_testlib.no_rewrite_session_config())
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, "Expected type str in list grpc_debug_server_addresses"):
       grpc_wrapper.GrpcDebugWrapperSession(sess, ["localhost:1337", 1338])
 
@@ -307,11 +307,10 @@ class SessionDebugGrpcTest(session_debug_testlib.SessionDebugTestBase):
 
     # Check that the server has _not_ received any tracebacks, as a result of
     # the disabling above.
-    with self.assertRaisesRegexp(
-        ValueError, r"Op .*u/read.* does not exist"):
+    with self.assertRaisesRegex(ValueError, r"Op .*u/read.* does not exist"):
       self.assertTrue(self._server.query_op_traceback("u/read"))
-    with self.assertRaisesRegexp(
-        ValueError, r".* has not received any source file"):
+    with self.assertRaisesRegex(ValueError,
+                                r".* has not received any source file"):
       self._server.query_source_file_line(__file__, 1)
 
   def testConstructGrpcDebugHookWithOrWithouGrpcInUrlWorks(self):
@@ -693,11 +692,11 @@ class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase):
 
         # No op traceback or source code should have been received by the debug
         # server due to the disabling above.
-        with self.assertRaisesRegexp(
-            ValueError, r"Op .*delta_1.* does not exist"):
+        with self.assertRaisesRegex(ValueError,
+                                    r"Op .*delta_1.* does not exist"):
           self.assertTrue(self._server_1.query_op_traceback("delta_1"))
-        with self.assertRaisesRegexp(
-            ValueError, r".* has not received any source file"):
+        with self.assertRaisesRegex(ValueError,
+                                    r".* has not received any source file"):
           self._server_1.query_source_file_line(__file__, 1)
 
   def testGetGrpcDebugWatchesReturnsCorrectAnswer(self):
diff --git a/tensorflow/python/debug/lib/source_utils_test.py b/tensorflow/python/debug/lib/source_utils_test.py
index c9934c4aac8..da4b9b87b7c 100644
--- a/tensorflow/python/debug/lib/source_utils_test.py
+++ b/tensorflow/python/debug/lib/source_utils_test.py
@@ -287,8 +287,8 @@ class SourceHelperTest(test_util.TensorFlowTestCase):
 
   def testLoadNonexistentNonParPathFailsWithIOError(self):
     bad_path = os.path.join(self.get_temp_dir(), "nonexistent.py")
-    with self.assertRaisesRegexp(
-        IOError, "neither exists nor can be loaded.*par.*"):
+    with self.assertRaisesRegex(IOError,
+                                "neither exists nor can be loaded.*par.*"):
       source_utils.load_source(bad_path)
 
   def testLoadingPythonSourceFileInParFileSucceeds(self):
@@ -315,8 +315,8 @@ class SourceHelperTest(test_util.TensorFlowTestCase):
       zf.write(temp_file_path, os.path.join("tensorflow_models", "model.py"))
 
     source_path = os.path.join(par_path, "tensorflow_models", "nonexistent.py")
-    with self.assertRaisesRegexp(
-        IOError, "neither exists nor can be loaded.*par.*"):
+    with self.assertRaisesRegex(IOError,
+                                "neither exists nor can be loaded.*par.*"):
       source_utils.load_source(source_path)
 
 
diff --git a/tensorflow/python/debug/wrappers/dumping_wrapper_test.py b/tensorflow/python/debug/wrappers/dumping_wrapper_test.py
index 4a14a15562d..0a0b1eb018a 100644
--- a/tensorflow/python/debug/wrappers/dumping_wrapper_test.py
+++ b/tensorflow/python/debug/wrappers/dumping_wrapper_test.py
@@ -73,7 +73,7 @@ class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
     os.mkdir(dir_path)
     self.assertTrue(os.path.isdir(dir_path))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "session_root path points to a non-empty directory"):
       dumping_wrapper.DumpingDebugWrapperSession(
           session.Session(), session_root=self.session_root, log_usage=False)
@@ -83,8 +83,8 @@ class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
     open(file_path, "a").close()  # Create the file
     self.assertTrue(gfile.Exists(file_path))
     self.assertFalse(gfile.IsDirectory(file_path))
-    with self.assertRaisesRegexp(ValueError,
-                                 "session_root path points to a file"):
+    with self.assertRaisesRegex(ValueError,
+                                "session_root path points to a file"):
       dumping_wrapper.DumpingDebugWrapperSession(
           session.Session(), session_root=file_path, log_usage=False)
 
@@ -161,7 +161,7 @@ class DumpingDebugWrapperSessionTest(test_util.TensorFlowTestCase):
 
   def testUsingNonCallableAsWatchFnRaisesTypeError(self):
     bad_watch_fn = "bad_watch_fn"
-    with self.assertRaisesRegexp(TypeError, "watch_fn is not callable"):
+    with self.assertRaisesRegex(TypeError, "watch_fn is not callable"):
       dumping_wrapper.DumpingDebugWrapperSession(
           self.sess,
           session_root=self.session_root,
diff --git a/tensorflow/python/debug/wrappers/framework_test.py b/tensorflow/python/debug/wrappers/framework_test.py
index 0265501e625..9493fa1a81e 100644
--- a/tensorflow/python/debug/wrappers/framework_test.py
+++ b/tensorflow/python/debug/wrappers/framework_test.py
@@ -273,11 +273,11 @@ class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
     """Attempt to wrap a non-Session-type object should cause an exception."""
 
     wrapper = TestDebugWrapperSessionBadAction(self._sess)
-    with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
+    with self.assertRaisesRegex(TypeError, "Expected type .*; got type .*"):
       TestDebugWrapperSessionBadAction(wrapper)
 
   def testSessionInitBadActionValue(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
       TestDebugWrapperSessionBadAction(
           self._sess, bad_init_action="nonsense_action")
@@ -286,7 +286,7 @@ class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
     wrapper = TestDebugWrapperSessionBadAction(
         self._sess, bad_run_start_action="nonsense_action")
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Invalid OnRunStartAction value: nonsense_action"):
       wrapper.run(self._s)
 
@@ -296,7 +296,7 @@ class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
     wrapper = TestDebugWrapperSessionBadAction(
         self._sess, bad_debug_urls="file://foo")
 
-    with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
+    with self.assertRaisesRegex(TypeError, "Expected type .*; got type .*"):
       wrapper.run(self._s)
 
   def testErrorDuringRun(self):
diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
index ab33a4af030..0d930b6e7e0 100644
--- a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
+++ b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py
@@ -191,7 +191,7 @@ class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
     os.mkdir(dir_path)
     self.assertTrue(os.path.isdir(dir_path))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "dump_root path points to a non-empty directory"):
       local_cli_wrapper.LocalCLIDebugWrapperSession(
           session.Session(), dump_root=self._tmp_dir, log_usage=False)
@@ -201,7 +201,7 @@ class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
     file_path = os.path.join(self._tmp_dir, "foo")
     open(file_path, "a").close()  # Create the file
     self.assertTrue(os.path.isfile(file_path))
-    with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
+    with self.assertRaisesRegex(ValueError, "dump_root path points to a file"):
       local_cli_wrapper.LocalCLIDebugWrapperSession(
           session.Session(), dump_root=file_path, log_usage=False)
 
@@ -540,7 +540,7 @@ class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
 
     wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
         [["run"]], self.sess, dump_root=self._tmp_dir)
-    with self.assertRaisesRegexp(errors.OpError, r".*[Dd]evice.*1337.*"):
+    with self.assertRaisesRegex(errors.OpError, r".*[Dd]evice.*1337.*"):
       wrapped_sess.run(w)
 
   def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
@@ -811,7 +811,7 @@ class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
   def testCallingShouldStopMethodOnNonWrappedNonMonitoredSessionErrors(self):
     wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
         [["run"], ["run"]], self.sess)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"The wrapped session .* does not have a method .*should_stop.*"):
       wrapped_sess.should_stop()
diff --git a/tensorflow/python/distribute/all_reduce_test.py b/tensorflow/python/distribute/all_reduce_test.py
index ee97d43b476..c738fa2f855 100644
--- a/tensorflow/python/distribute/all_reduce_test.py
+++ b/tensorflow/python/distribute/all_reduce_test.py
@@ -40,8 +40,7 @@ class AllReduceTest(test_util.TensorFlowTestCase):
   @test_util.run_deprecated_v1
   def testFlattenTensorsShapesDefined(self):
     x = array_ops.placeholder(types_pb2.DT_FLOAT, [None])
-    with self.assertRaisesRegexp(ValueError,
-                                 "must have statically known shape"):
+    with self.assertRaisesRegex(ValueError, "must have statically known shape"):
       ar._flatten_tensors([x, x])
 
   def testRingPermutations(self):
diff --git a/tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver_test.py b/tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver_test.py
index d8037497cb9..b04c67d76ec 100644
--- a/tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver_test.py
+++ b/tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver_test.py
@@ -335,7 +335,7 @@ class GCEClusterResolverTest(test.TestCase):
         credentials=None,
         service=self.gen_standard_mock_service_client(name_to_ip))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         RuntimeError, 'You cannot reset the task_type '
         'of the GCEClusterResolver after it has '
         'been created.'):
diff --git a/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py b/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py
index 598c3da4642..d9d6076be13 100644
--- a/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py
+++ b/tensorflow/python/distribute/cluster_resolver/kubernetes_cluster_resolver_test.py
@@ -134,7 +134,7 @@ class KubernetesClusterResolverTest(test.TestCase):
             {'job-name=tensorflow': ret}))
 
     error_msg = 'Pod "tensorflow-abc123" is not running; phase: "Failed"'
-    with self.assertRaisesRegexp(RuntimeError, error_msg):
+    with self.assertRaisesRegex(RuntimeError, error_msg):
       cluster_resolver.cluster_spec()
 
   def testMultiplePodSelectorsAndWorkers(self):
diff --git a/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py b/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py
index 1dc9a73fd74..51abc850bb2 100644
--- a/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py
+++ b/tensorflow/python/distribute/cluster_resolver/tpu/tpu_cluster_resolver_test.py
@@ -140,8 +140,8 @@ class TPUClusterResolverTest(test.TestCase):
 
   @mock.patch.object(resolver, 'is_running_in_gce', mock_is_running_in_gce)
   def testCheckRunningInGceWithNoTpuName(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'Please provide a TPU Name to connect to.*'):
+    with self.assertRaisesRegex(ValueError,
+                                'Please provide a TPU Name to connect to.*'):
       resolver.TPUClusterResolver(tpu='')
 
   @mock.patch.object(six.moves.urllib.request, 'urlopen',
diff --git a/tensorflow/python/distribute/custom_training_loop_input_test.py b/tensorflow/python/distribute/custom_training_loop_input_test.py
index e2c4076f3f1..9251721f7d0 100644
--- a/tensorflow/python/distribute/custom_training_loop_input_test.py
+++ b/tensorflow/python/distribute/custom_training_loop_input_test.py
@@ -191,8 +191,8 @@ class InputIterationTest(test.TestCase, parameterized.TestCase,
 
     input_iterator = iter(distribution.experimental_distribute_dataset(dataset))
 
-    with self.assertRaisesRegexp(NotImplementedError,
-                                 "does not support pure eager execution"):
+    with self.assertRaisesRegex(NotImplementedError,
+                                "does not support pure eager execution"):
       distribution.run(train_step, args=(next(input_iterator),))
 
   @combinations.generate(
diff --git a/tensorflow/python/distribute/distribute_lib_test.py b/tensorflow/python/distribute/distribute_lib_test.py
index b5924ec3b67..04193d03d0c 100644
--- a/tensorflow/python/distribute/distribute_lib_test.py
+++ b/tensorflow/python/distribute/distribute_lib_test.py
@@ -150,7 +150,7 @@ def _run_in_and_out_of_scope(unbound_test_method):
     # When run under a different strategy the test method should fail.
     another_strategy = _TestStrategy()
     msg = "Mixing different .*Strategy objects"
-    with test_case.assertRaisesRegexp(RuntimeError, msg):
+    with test_case.assertRaisesRegex(RuntimeError, msg):
       with another_strategy.scope():
         unbound_test_method(test_case, dist)
   return wrapper
@@ -206,7 +206,7 @@ class TestStrategyTest(test.TestCase):
     scope.__enter__()
     self.assertIs(dist, ds_context.get_strategy())
     with ops.device("/device:CPU:0"):
-      with self.assertRaisesRegexp(RuntimeError, "Device scope nesting error"):
+      with self.assertRaisesRegex(RuntimeError, "Device scope nesting error"):
         scope.__exit__(None, None, None)
     scope.__exit__(None, None, None)
     _assert_in_default_state(self)
@@ -222,8 +222,8 @@ class TestStrategyTest(test.TestCase):
     scope.__enter__()
     self.assertIs(dist, ds_context.get_strategy())
     with variable_scope.variable_creator_scope(creator):
-      with self.assertRaisesRegexp(RuntimeError,
-                                   "Variable creator scope nesting error"):
+      with self.assertRaisesRegex(RuntimeError,
+                                  "Variable creator scope nesting error"):
         scope.__exit__(None, None, None)
     scope.__exit__(None, None, None)
     _assert_in_default_state(self)
@@ -239,8 +239,8 @@ class TestStrategyTest(test.TestCase):
       scope.__enter__()
       self.assertIs(dist, ds_context.get_strategy())
       with variable_scope.variable_scope("AA"):
-        with self.assertRaisesRegexp(RuntimeError,
-                                     "Variable scope nesting error"):
+        with self.assertRaisesRegex(RuntimeError,
+                                    "Variable scope nesting error"):
           scope.__exit__(None, None, None)
     _assert_in_default_state(self)
 
@@ -284,15 +284,15 @@ class TestStrategyTest(test.TestCase):
     _assert_in_default_state(self)
     dist = _TestStrategy()
     with dist.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError,
           "Must not be called inside a `tf.distribute.Strategy` scope"):
         ds_context.experimental_set_strategy(_TestStrategy())
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError,
           "Must not be called inside a `tf.distribute.Strategy` scope"):
         ds_context.experimental_set_strategy(dist)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError,
           "Must not be called inside a `tf.distribute.Strategy` scope"):
         ds_context.experimental_set_strategy(None)
@@ -313,9 +313,8 @@ class TestStrategyTest(test.TestCase):
       self.assertIs(dist, ds_context.get_strategy())
       dist2 = _TestStrategy()
       scope2 = dist2.scope()
-      with self.assertRaisesRegexp(
-          RuntimeError,
-          "Mixing different tf.distribute.Strategy objects"):
+      with self.assertRaisesRegex(
+          RuntimeError, "Mixing different tf.distribute.Strategy objects"):
         with scope2:
           pass
     _assert_in_default_state(self)
@@ -496,7 +495,7 @@ class DefaultDistributionStrategyTest(test.TestCase, parameterized.TestCase):
       _assert_in_default_state(self)
 
       with test_strategy.scope():
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             RuntimeError, "Mixing different tf.distribute.Strategy objects"):
           variable_scope.variable(1.0, name="error")
 
@@ -504,7 +503,7 @@ class DefaultDistributionStrategyTest(test.TestCase, parameterized.TestCase):
         _assert_in_default_state(self)
 
         with test_strategy.scope():
-          with self.assertRaisesRegexp(
+          with self.assertRaisesRegex(
               RuntimeError, "Mixing different tf.distribute.Strategy objects"):
             variable_scope.variable(1.0, name="also_error")
 
diff --git a/tensorflow/python/distribute/mirrored_strategy_test.py b/tensorflow/python/distribute/mirrored_strategy_test.py
index 39cc7f3a48f..d2a567589b9 100644
--- a/tensorflow/python/distribute/mirrored_strategy_test.py
+++ b/tensorflow/python/distribute/mirrored_strategy_test.py
@@ -438,7 +438,7 @@ class MirroredStrategyCallForEachReplicaTest(test.TestCase):
       return control_flow_ops.while_loop_v2(lambda i: i < 2, body_fn, [0])
 
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError, "`merge_call` called while defining a new graph."):
         distribution.extended.call_for_each_replica(model_fn)
 
@@ -457,7 +457,7 @@ class MirroredStrategyCallForEachReplicaTest(test.TestCase):
       return model_fn_nested()
 
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError, "`merge_call` called while defining a new graph."):
         distribution.extended.call_for_each_replica(model_fn)
 
@@ -706,7 +706,7 @@ class MirroredVariableUpdateTest(test.TestCase):
       def model_fn():
         return mirrored_var.assign(5.0)
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
           "with the given reduce op ReduceOp.SUM."):
         self.evaluate(distribution.experimental_local_results(
diff --git a/tensorflow/python/distribute/mirrored_variable_test.py b/tensorflow/python/distribute/mirrored_variable_test.py
index df32a6babea..8e7d674947e 100644
--- a/tensorflow/python/distribute/mirrored_variable_test.py
+++ b/tensorflow/python/distribute/mirrored_variable_test.py
@@ -377,7 +377,7 @@ class MirroredVariableCreationTest(test.TestCase):
 
   def testNoneSynchronizationWithGetVariable(self, distribution):
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "`NONE` variable synchronization mode is not "
           "supported with `Mirrored` distribution strategy. Please change "
           "the `synchronization` for variable: v"):
@@ -387,7 +387,7 @@ class MirroredVariableCreationTest(test.TestCase):
 
   def testNoneSynchronizationWithVariable(self, distribution):
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "`NONE` variable synchronization mode is not "
           "supported with `Mirrored` distribution strategy. Please change "
           "the `synchronization` for variable: v"):
@@ -398,14 +398,14 @@ class MirroredVariableCreationTest(test.TestCase):
 
   def testInvalidSynchronizationWithVariable(self, distribution):
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Invalid variable synchronization mode: Invalid for "
           "variable: v"):
         variable_scope.variable(1.0, name="v", synchronization="Invalid")
 
   def testInvalidAggregationWithGetVariable(self, distribution):
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Invalid variable aggregation mode: invalid for "
           "variable: v"):
         variable_scope.get_variable(
@@ -415,7 +415,7 @@ class MirroredVariableCreationTest(test.TestCase):
 
   def testInvalidAggregationWithVariable(self, distribution):
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Invalid variable aggregation mode: invalid for "
           "variable: v"):
         variable_scope.variable(
diff --git a/tensorflow/python/distribute/multi_process_runner_no_init_test.py b/tensorflow/python/distribute/multi_process_runner_no_init_test.py
index 475255d5e0a..2a1fe2551b9 100644
--- a/tensorflow/python/distribute/multi_process_runner_no_init_test.py
+++ b/tensorflow/python/distribute/multi_process_runner_no_init_test.py
@@ -30,8 +30,8 @@ class MultiProcessRunnerNoInitTest(test.TestCase):
     def simple_func():
       return 'foobar'
 
-    with self.assertRaisesRegexp(RuntimeError,
-                                 '`multi_process_runner` is not initialized.'):
+    with self.assertRaisesRegex(RuntimeError,
+                                '`multi_process_runner` is not initialized.'):
       multi_process_runner.run(
           simple_func,
           multi_worker_test_base.create_cluster_spec(num_workers=1))
diff --git a/tensorflow/python/distribute/multi_process_runner_test.py b/tensorflow/python/distribute/multi_process_runner_test.py
index a6219dc5322..c6266a5be26 100644
--- a/tensorflow/python/distribute/multi_process_runner_test.py
+++ b/tensorflow/python/distribute/multi_process_runner_test.py
@@ -97,7 +97,7 @@ class MultiProcessRunnerTest(test.TestCase):
         multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
         max_run_time=20)
     runner.start()
-    with self.assertRaisesRegexp(ValueError, 'This is an error.'):
+    with self.assertRaisesRegex(ValueError, 'This is an error.'):
       runner.join()
 
   def test_multi_process_runner_queue_emptied_between_runs(self):
@@ -287,7 +287,7 @@ class MultiProcessRunnerTest(test.TestCase):
     mpr.start()
     time.sleep(60)
     mpr.terminate_all()
-    with self.assertRaisesRegexp(ValueError, 'This is an error.'):
+    with self.assertRaisesRegex(ValueError, 'This is an error.'):
       mpr.join()
 
   def test_barrier(self):
@@ -402,7 +402,7 @@ class MultiProcessPoolRunnerTest(test.TestCase):
     cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
     runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec)
     pid = runner.run(proc_func_that_returns_pid)
-    with self.assertRaisesRegexp(ValueError, 'This is an error.'):
+    with self.assertRaisesRegex(ValueError, 'This is an error.'):
       runner.run(proc_func_that_errors)
     self.assertAllEqual(runner.run(proc_func_that_returns_pid), pid)
 
diff --git a/tensorflow/python/distribute/multi_worker_util_test.py b/tensorflow/python/distribute/multi_worker_util_test.py
index 6a51e71ded7..d5dc6d7eb91 100644
--- a/tensorflow/python/distribute/multi_worker_util_test.py
+++ b/tensorflow/python/distribute/multi_worker_util_test.py
@@ -71,7 +71,7 @@ class NormalizeClusterSpecTest(test.TestCase):
   def testUnexpectedInput(self):
     cluster_spec = ["127.0.0.1:8964", "127.0.0.1:2333"]
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
         "`tf.train.ClusterDef` object"):
@@ -94,11 +94,11 @@ class IsChiefTest(test.TestCase):
     self.assertTrue(multi_worker_util.is_chief(cluster_spec, "worker", 0))
     self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 1))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "`task_type` 'chief' not found in cluster_spec."):
       multi_worker_util.is_chief(cluster_spec, "chief", 0)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "The `task_id` 2 exceeds the maximum id of worker."):
       multi_worker_util.is_chief(cluster_spec, "worker", 2)
 
@@ -135,7 +135,7 @@ class NumWorkersTest(test.TestCase):
 
   def testTaskTypeNotFound(self):
     cluster_spec = {}
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "`task_type` 'worker' not found in cluster_spec."):
       multi_worker_util.worker_count(cluster_spec, task_type="worker")
 
@@ -145,7 +145,7 @@ class NumWorkersTest(test.TestCase):
         "ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
     }
     # A "ps" job shouldn't call this method.
-    with self.assertRaisesRegexp(ValueError, "Unexpected `task_type` 'ps'"):
+    with self.assertRaisesRegex(ValueError, "Unexpected `task_type` 'ps'"):
       multi_worker_util.worker_count(cluster_spec, task_type="ps")
 
 
@@ -187,16 +187,16 @@ class IdInClusterTest(test.TestCase):
 
   def testPsId(self):
     cluster_spec = {"chief": ["127.0.0.1:1234"], "ps": ["127.0.0.1:7566"]}
-    with self.assertRaisesRegexp(ValueError,
-                                 "There is no id for task_type 'ps'"):
+    with self.assertRaisesRegex(ValueError,
+                                "There is no id for task_type 'ps'"):
       multi_worker_util.id_in_cluster(cluster_spec, "ps", 0)
 
   def testMultipleChiefs(self):
     cluster_spec = {
         "chief": ["127.0.0.1:8258", "127.0.0.1:7566"],
     }
-    with self.assertRaisesRegexp(ValueError,
-                                 "There must be at most one 'chief' job."):
+    with self.assertRaisesRegex(ValueError,
+                                "There must be at most one 'chief' job."):
       multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
 
 
@@ -257,7 +257,7 @@ class ClusterSpecValidationTest(test.TestCase):
         "ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
     }
     multi_worker_util._validate_cluster_spec(cluster_spec, "evaluator", 0)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "`task_type` 'worker' not found in cluster_spec."):
       multi_worker_util._validate_cluster_spec(cluster_spec, "worker", 0)
 
diff --git a/tensorflow/python/distribute/sharded_variable_test.py b/tensorflow/python/distribute/sharded_variable_test.py
index b42c8314e98..64ed3d03717 100644
--- a/tensorflow/python/distribute/sharded_variable_test.py
+++ b/tensorflow/python/distribute/sharded_variable_test.py
@@ -173,23 +173,23 @@ class ShardedVariableTest(test.TestCase):
     self.assertAllEqual([3., 2.], root.train([0, 1]).numpy())
 
   def test_validation_errors(self):
-    with self.assertRaisesRegexp(ValueError, 'Expected a list of '):
+    with self.assertRaisesRegex(ValueError, 'Expected a list of '):
       sharded_variable.ShardedVariable(
           [variables_lib.Variable([0]), 'not-a-variable'])
 
-    with self.assertRaisesRegexp(ValueError, 'must have the same dtype'):
+    with self.assertRaisesRegex(ValueError, 'must have the same dtype'):
       sharded_variable.ShardedVariable([
           variables_lib.Variable([0], dtype='int64'),
           variables_lib.Variable([1], dtype='int32')
       ])
 
-    with self.assertRaisesRegexp(ValueError, 'the same shapes except'):
+    with self.assertRaisesRegex(ValueError, 'the same shapes except'):
       sharded_variable.ShardedVariable([
           variables_lib.Variable(array_ops.ones((5, 10))),
           variables_lib.Variable(array_ops.ones((5, 20)))
       ])
 
-    with self.assertRaisesRegexp(ValueError, '`SaveSliceInfo` should not'):
+    with self.assertRaisesRegex(ValueError, '`SaveSliceInfo` should not'):
       v = variables_lib.Variable([0])
       v._set_save_slice_info(
           variables_lib.Variable.SaveSliceInfo(
diff --git a/tensorflow/python/distribute/shared_variable_creator_test.py b/tensorflow/python/distribute/shared_variable_creator_test.py
index 4ddc29f2567..151abc0355a 100644
--- a/tensorflow/python/distribute/shared_variable_creator_test.py
+++ b/tensorflow/python/distribute/shared_variable_creator_test.py
@@ -30,18 +30,18 @@ class CanonicalizeVariableNameTest(test.TestCase):
     return shared_variable_creator._canonicalize_variable_name(name)
 
   def testNoName(self):
-    self.assertEquals("Variable", self._canonicalize(None))
+    self.assertEqual("Variable", self._canonicalize(None))
 
   def testPatternInMiddle(self):
-    self.assertEquals("foo/bar/baz", self._canonicalize("foo_1/bar_1/baz"))
+    self.assertEqual("foo/bar/baz", self._canonicalize("foo_1/bar_1/baz"))
 
   def testPatternAtEnd(self):
-    self.assertEquals("foo", self._canonicalize("foo_1"))
+    self.assertEqual("foo", self._canonicalize("foo_1"))
 
   def testWrongPatterns(self):
-    self.assertEquals("foo_1:0", self._canonicalize("foo_1:0"))
-    self.assertEquals("foo1", self._canonicalize("foo1"))
-    self.assertEquals("foo_a", self._canonicalize("foo_a"))
+    self.assertEqual("foo_1:0", self._canonicalize("foo_1:0"))
+    self.assertEqual("foo1", self._canonicalize("foo1"))
+    self.assertEqual("foo_a", self._canonicalize("foo_a"))
 
 
 class SharedVariableCreatorTest(test.TestCase):
diff --git a/tensorflow/python/distribute/strategy_combinations_test.py b/tensorflow/python/distribute/strategy_combinations_test.py
index 8b5ea27f512..38ace7da42d 100644
--- a/tensorflow/python/distribute/strategy_combinations_test.py
+++ b/tensorflow/python/distribute/strategy_combinations_test.py
@@ -52,7 +52,7 @@ class VirtualDevicesTest(test.TestCase, parameterized.TestCase):
   def testSetVirtualCPUsErrors(self):
     with self.assertRaises(ValueError):
       strategy_combinations.set_virtual_cpus_to_at_least(0)
-    with self.assertRaisesRegexp(RuntimeError, "with 3 < 5 virtual CPUs"):
+    with self.assertRaisesRegex(RuntimeError, "with 3 < 5 virtual CPUs"):
       strategy_combinations.set_virtual_cpus_to_at_least(5)
 
   @combinations.generate(combinations.combine(
diff --git a/tensorflow/python/eager/backprop_test.py b/tensorflow/python/eager/backprop_test.py
index 20c6e05adda..d21146e0b73 100644
--- a/tensorflow/python/eager/backprop_test.py
+++ b/tensorflow/python/eager/backprop_test.py
@@ -799,7 +799,7 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
       y = control_flow_ops.cond(x < x, true_fn, false_fn)
 
     if not context.executing_eagerly():
-      with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
+      with self.assertRaisesRegex(NotImplementedError, 'tf.gradients'):
         dy = g.gradient(y, [x])[0]
     else:
       dy = g.gradient(y, [x])[0]
@@ -822,7 +822,7 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
       _, y = control_flow_ops.while_loop(cond, body, [i, x])
 
     if not context.executing_eagerly():
-      with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'):
+      with self.assertRaisesRegex(NotImplementedError, 'tf.gradients'):
         dy = g.gradient(y, [x])[0]
     else:
       dy = g.gradient(y, [x])[0]
@@ -836,7 +836,7 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
       y = x * x
       z = y * y
     g.gradient(z, [x])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         RuntimeError, 'GradientTape.gradient can only be called once'):
       g.gradient(y, [x])
 
@@ -958,7 +958,7 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
     with backprop.GradientTape() as g:
       g.watch([x, y])
       z = y * 2
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
       g.gradient(z, x, unconnected_gradients='nonsense')
 
@@ -989,8 +989,8 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
     with backprop.GradientTape() as g:
       g.watch(x)
       tape_lib.record_operation('InvalidBackprop', [y], [x], lambda dy: [])
-    with self.assertRaisesRegexp(errors_impl.InternalError,
-                                 'InvalidBackprop.*too few gradients'):
+    with self.assertRaisesRegex(errors_impl.InternalError,
+                                'InvalidBackprop.*too few gradients'):
       g.gradient(y, x)
 
   @test_util.assert_no_new_tensors
@@ -1295,13 +1295,13 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
     y = constant_op.constant(2)
 
     loss_grads_fn = backprop.implicit_val_and_grad(fn)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Cannot differentiate a function that returns None; '
         'did you forget to return a value from fn?'):
       loss_grads_fn(x, y)
 
     val_and_grads_fn = backprop.val_and_grad_function(fn)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Cannot differentiate a function that returns None; '
         'did you forget to return a value from fn?'):
       val_and_grads_fn(x, y)
@@ -1504,7 +1504,7 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def testWatchBadThing(self):
     g = backprop.GradientTape()
-    with self.assertRaisesRegexp(ValueError, 'ndarray'):
+    with self.assertRaisesRegex(ValueError, 'ndarray'):
       g.watch(np.array(1.))
 
   def testWatchComposite(self):
@@ -1659,7 +1659,7 @@ class JacobianTest(test.TestCase):
       x = constant_op.constant([1.0, 2.0])
       g.watch(x)
       y = x * x
-    with self.assertRaisesRegexp(RuntimeError, 'persistent'):
+    with self.assertRaisesRegex(RuntimeError, 'persistent'):
       g.jacobian(y, x, experimental_use_pfor=False)
 
   @test_util.run_v1_only('b/120545219')
@@ -1749,28 +1749,28 @@ class BatchJacobianTest(test.TestCase, parameterized.TestCase):
       x = constant_op.constant([[1.0, 2.0]])
       g.watch(x)
       y = x * x
-    with self.assertRaisesRegexp(RuntimeError, 'persistent'):
+    with self.assertRaisesRegex(RuntimeError, 'persistent'):
       g.batch_jacobian(y, x, experimental_use_pfor=False)
 
   def testBadShape(self):
     x = random_ops.random_uniform([2, 3])
     with backprop.GradientTape() as g:
       y = array_ops.concat([x, x], axis=0)
-    with self.assertRaisesRegexp(ValueError, 'Need first dimension'):
+    with self.assertRaisesRegex(ValueError, 'Need first dimension'):
       g.batch_jacobian(y, x)
 
   def testBadInputRank(self):
     x = random_ops.random_uniform([2])
     with backprop.GradientTape() as g:
       y = random_ops.random_uniform([2, 2])
-    with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'):
+    with self.assertRaisesRegex(ValueError, 'must have rank at least 2'):
       g.batch_jacobian(y, x)
 
   def testBadOutputRank(self):
     x = random_ops.random_uniform([2, 2])
     with backprop.GradientTape() as g:
       y = random_ops.random_uniform([2])
-    with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'):
+    with self.assertRaisesRegex(ValueError, 'must have rank at least 2'):
       g.batch_jacobian(y, x)
 
   def test_parallel_iterations(self):
diff --git a/tensorflow/python/eager/core_test.py b/tensorflow/python/eager/core_test.py
index c1401fc56ee..d756827f44f 100644
--- a/tensorflow/python/eager/core_test.py
+++ b/tensorflow/python/eager/core_test.py
@@ -89,7 +89,7 @@ class TFETest(test_util.TensorFlowTestCase):
     else:
       # TODO(gjn): Figure out how to make this work for tf.Tensor
       # self.assertNotIsInstance(b, collections.Hashable)
-      with self.assertRaisesRegexp(TypeError, 'unhashable'):
+      with self.assertRaisesRegex(TypeError, 'unhashable'):
         set([a, b])
 
   def testEquality(self):
@@ -464,7 +464,7 @@ class TFETest(test_util.TensorFlowTestCase):
   def testContextConfig(self):
     ctx = context.Context(config=config_pb2.ConfigProto(
         device_count={'GPU': 0}))
-    self.assertEquals(0, ctx.num_gpus())
+    self.assertEqual(0, ctx.num_gpus())
 
   def testPickle(self):
     tmp_dir = self.get_temp_dir()
@@ -485,7 +485,7 @@ class TFETest(test_util.TensorFlowTestCase):
     self.assertEndsWith(current_device(), 'CPU:0')
     gpu.__enter__()
     self.assertEndsWith(current_device(), 'GPU:0')
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         RuntimeError, 'Exiting device scope without proper scope nesting'):
       cpu.__exit__()
       self.assertEndsWith(current_device(), 'GPU:0')
@@ -926,7 +926,7 @@ class TFETest(test_util.TensorFlowTestCase):
 
     x = constant_op.constant(1)
     three_x = add(add(x, x), x)
-    self.assertEquals(dtypes.int32, three_x.dtype)
+    self.assertEqual(dtypes.int32, three_x.dtype)
     self.assertAllEqual(3, three_x)
 
   @test_util.run_gpu_only
@@ -953,7 +953,7 @@ class TFETest(test_util.TensorFlowTestCase):
     types, tensors = execute_lib.convert_to_mixed_eager_tensors(
         [array, tensor], context.context())
     for typ, t in zip(types, tensors):
-      self.assertEquals(typ, dtypes.float32)
+      self.assertEqual(typ, dtypes.float32)
       self.assertIsInstance(t, ops.EagerTensor)
 
   def testConvertMixedEagerTensorsWithVariables(self):
diff --git a/tensorflow/python/eager/custom_device_test.py b/tensorflow/python/eager/custom_device_test.py
index 9a24383a13c..c9cca7eb040 100644
--- a/tensorflow/python/eager/custom_device_test.py
+++ b/tensorflow/python/eager/custom_device_test.py
@@ -41,7 +41,7 @@ class CustomDeviceTest(test.TestCase):
     # There was no copy onto the device. Actually I'm not sure how to trigger
     # that from Python.
     self.assertFalse(custom_device_testutil.FlagValue(arrived_flag))
-    with self.assertRaisesRegexp(errors.InternalError, 'Trying to copy'):
+    with self.assertRaisesRegex(errors.InternalError, 'Trying to copy'):
       y.numpy()
 
 
diff --git a/tensorflow/python/eager/def_function_test.py b/tensorflow/python/eager/def_function_test.py
index 6dc4e322bbd..0ae69fa0b8c 100644
--- a/tensorflow/python/eager/def_function_test.py
+++ b/tensorflow/python/eager/def_function_test.py
@@ -218,8 +218,8 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
           state.append(variables.Variable(2.0 * x))
         return state[0] * x
 
-      with self.assertRaisesRegexp(
-          lift_to_graph.UnliftableError, r'transitively.* mul .* x'):
+      with self.assertRaisesRegex(lift_to_graph.UnliftableError,
+                                  r'transitively.* mul .* x'):
         fn(constant_op.constant(3.0))
 
   @test_util.disable_tfrt('Variable argument is not supported')
@@ -393,8 +393,8 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
         outputs.append(inputs[t])
       return outputs
 
-    with self.assertRaisesRegexp(errors.InaccessibleTensorError,
-                                 'defined in another function or code block'):
+    with self.assertRaisesRegex(errors.InaccessibleTensorError,
+                                'defined in another function or code block'):
       f(array_ops.zeros(shape=(8, 42, 3)))
 
   @test_util.disable_tfrt('Control flow is not supported')
@@ -472,7 +472,7 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
       with ops.init_scope():
         _ = a + a
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         re.compile('An op outside of the function.*passed.*Const', re.DOTALL)):
       failing_function()
@@ -627,7 +627,7 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
       return a[0].read_value()
 
     create_variable()
-    self.assertRegexpMatches(a[0].device, 'CPU')
+    self.assertRegex(a[0].device, 'CPU')
 
   @test_util.disable_tfrt('Variable argument is not supported')
   @test_util.run_gpu_only
@@ -647,8 +647,8 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
 
     with ops.device('CPU:0'):
       create_variable()
-    self.assertRegexpMatches(a[0].device, 'CPU')
-    self.assertRegexpMatches(initial_value[0].device, 'CPU')
+    self.assertRegex(a[0].device, 'CPU')
+    self.assertRegex(initial_value[0].device, 'CPU')
 
   def testDecorate(self):
     func = def_function.function(lambda: 1)
@@ -727,7 +727,7 @@ class DefFunctionTest(test.TestCase, parameterized.TestCase):
     func = def_function.function(lambda: 1)
     self.assertEqual(func().numpy(), 1)
     msg = 'Functions cannot be decorated after they have been traced.'
-    with self.assertRaisesRegexp(ValueError, msg):
+    with self.assertRaisesRegex(ValueError, msg):
       func._decorate(lambda f: f)
 
   def testGetConcreteFunctionGraphLifetime(self):
diff --git a/tensorflow/python/eager/def_function_xla_jit_test.py b/tensorflow/python/eager/def_function_xla_jit_test.py
index 78d44a81b0b..d55f84863e9 100644
--- a/tensorflow/python/eager/def_function_xla_jit_test.py
+++ b/tensorflow/python/eager/def_function_xla_jit_test.py
@@ -142,8 +142,8 @@ class DefFunctionTest(test.TestCase):
     func = def_function.function(fn2, experimental_compile=False)
     inputs = constant_op.constant([1, 2, 2, 3, 3])
     if not test.is_built_with_rocm():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   'not compilable'):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  'not compilable'):
         func(inputs)
 
   def testUnsupportedOps(self):
@@ -156,7 +156,7 @@ class DefFunctionTest(test.TestCase):
 
     inputs = constant_op.constant([1, 2, 2, 3, 3])
     self.assertAllClose([1, 2, 3], func(inputs))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, 'not compilable'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, 'not compilable'):
       xla_func(inputs)
 
   def testFunctionGradient(self):
@@ -236,7 +236,7 @@ class DefFunctionTest(test.TestCase):
 
     inputs = constant_op.constant([1, 2, 2, 3, 3])
     c = C()
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, 'not compilable'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, 'not compilable'):
       c.f1(inputs)
 
   def testMustBeConstantPropagation(self):
@@ -285,9 +285,8 @@ class DefFunctionTest(test.TestCase):
     x = constant_op.constant(3.14)
     with backprop.GradientTape() as tape:
       tape.watch(x)
-      with self.assertRaisesRegexp(
-          errors.UnimplementedError,
-          'TensorList crossing the XLA/TF boundary'):
+      with self.assertRaisesRegex(errors.UnimplementedError,
+                                  'TensorList crossing the XLA/TF boundary'):
         y = f(x)
         tape.gradient(y, x)
 
diff --git a/tensorflow/python/eager/forwardprop_test.py b/tensorflow/python/eager/forwardprop_test.py
index b71957ae16b..ad55a5301a9 100644
--- a/tensorflow/python/eager/forwardprop_test.py
+++ b/tensorflow/python/eager/forwardprop_test.py
@@ -282,7 +282,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
   def testJVPFunctionRaisesError(self):
     sum_outputs = (constant_op.constant(6.),)
 
-    with self.assertRaisesRegexp(ValueError, r".*was expected to be of shape*"):
+    with self.assertRaisesRegex(ValueError, r".*was expected to be of shape*"):
       forwardprop._jvp_dispatch(
           op_name="Add",
           attr_tuple=(),
@@ -343,7 +343,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
   @test_util.assert_no_new_pyobjects_executing_eagerly
   def testMultipleWatchesAdd(self):
     x = constant_op.constant(-2.)
-    with self.assertRaisesRegexp(ValueError, "multiple times"):
+    with self.assertRaisesRegex(ValueError, "multiple times"):
       with forwardprop.ForwardAccumulator(
           [x, x], [1., 2.]):
         pass
@@ -365,7 +365,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
       self.assertAllClose(1.5, acc.jvp(x))
       y = 4. * x
       self.assertAllClose(6., acc.jvp(y))
-      with self.assertRaisesRegexp(ValueError, "already recording"):
+      with self.assertRaisesRegex(ValueError, "already recording"):
         with acc:
           pass
     z = 4. * x
@@ -434,8 +434,8 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
     def f(x):
       return math_ops.reduce_prod(math_ops.tanh(x)**2)
 
-    with self.assertRaisesRegexp(NotImplementedError,
-                                 "recompute_grad tried to transpose"):
+    with self.assertRaisesRegex(NotImplementedError,
+                                "recompute_grad tried to transpose"):
       primals = [constant_op.constant([1.])]
       sym_jac_fwd = _jacfwd(f, primals)
 
@@ -450,7 +450,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase):
     c = constant_op.constant(1.)
     d = constant_op.constant(2.)
     with forwardprop.ForwardAccumulator(c, d):
-      with self.assertRaisesRegexp(ValueError, "test_error_string"):
+      with self.assertRaisesRegex(ValueError, "test_error_string"):
         f(c)
 
   @parameterized.named_parameters(
diff --git a/tensorflow/python/eager/function_defun_collection_test.py b/tensorflow/python/eager/function_defun_collection_test.py
index 53478ad121c..954297e58d2 100644
--- a/tensorflow/python/eager/function_defun_collection_test.py
+++ b/tensorflow/python/eager/function_defun_collection_test.py
@@ -55,9 +55,9 @@ class DefunCollectionTest(test.TestCase, parameterized.TestCase):
           return z
 
         self.assertEqual(7, int(self.evaluate(fn())))
-        self.assertEquals(ops.get_collection('x'), [2])
-        self.assertEquals(ops.get_collection('y'), [5])
-        self.assertEquals(ops.get_collection('z'), [])
+        self.assertEqual(ops.get_collection('x'), [2])
+        self.assertEqual(ops.get_collection('y'), [5])
+        self.assertEqual(ops.get_collection('z'), [])
 
   @parameterized.named_parameters(
       dict(testcase_name='Defun', function_decorator=function.defun),
@@ -76,8 +76,7 @@ class DefunCollectionTest(test.TestCase, parameterized.TestCase):
 
         self.evaluate(variables.global_variables_initializer())
         self.assertEqual(1.0, float(self.evaluate(f())))
-        self.assertEquals(
-            len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 1)
+        self.assertLen(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES), 1)
 
   def testCollectionVariableValueWrite(self):
     """Write variable value inside defun."""
@@ -92,8 +91,7 @@ class DefunCollectionTest(test.TestCase, parameterized.TestCase):
         _ = f.get_concrete_function()
         self.evaluate(variables.global_variables_initializer())
         self.assertEqual(2.0, float(self.evaluate(f())))
-        self.assertEquals(
-            len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 1)
+        self.assertLen(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES), 1)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/eager/function_gradients_test.py b/tensorflow/python/eager/function_gradients_test.py
index ffd84fc56af..2830207b30a 100644
--- a/tensorflow/python/eager/function_gradients_test.py
+++ b/tensorflow/python/eager/function_gradients_test.py
@@ -372,8 +372,8 @@ class FunctionGradientsTest(test.TestCase, parameterized.TestCase):
             'v', initializer=constant_op.constant(1.0))
         return x * constant_op.constant(2.0)
 
-      with self.assertRaisesRegexp(ValueError,
-                                   'No trainable variables were accessed'):
+      with self.assertRaisesRegex(ValueError,
+                                  'No trainable variables were accessed'):
         backprop.implicit_val_and_grad(f)()
 
   def testDefunCallBackpropUsingSameObjectForMultipleArguments(self):
diff --git a/tensorflow/python/eager/function_test.py b/tensorflow/python/eager/function_test.py
index b70b1bc5c1f..1bcf51e62c6 100644
--- a/tensorflow/python/eager/function_test.py
+++ b/tensorflow/python/eager/function_test.py
@@ -169,7 +169,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     self.assertEqual(values, [1, 2, 1, 2])  # And again.
 
   def testCannotAddExitCallbackWhenNotInFunctionScope(self):
-    with self.assertRaisesRegexp(RuntimeError, 'when not building a function.'):
+    with self.assertRaisesRegex(RuntimeError, 'when not building a function.'):
       ops.add_exit_callback_to_default_func_graph(lambda: None)
 
   def testVariable(self):
@@ -186,7 +186,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     r1 = add(v)
     self.assertEqual(2.0, self.evaluate(r1))
     c = constant_op.constant(1.0)
-    with self.assertRaisesRegexp(AttributeError, 'no attribute'):
+    with self.assertRaisesRegex(AttributeError, 'no attribute'):
       add(c)
 
   def testPackedVariable(self):
@@ -264,8 +264,8 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
           experimental_implements='func')(lambda x, y: x + y + z)
       a = array_ops.ones((1.0,))
       b = array_ops.ones((1.0,))
-      with self.assertRaisesRegexp(AssertionError,
-                                   'variables are always captured'):
+      with self.assertRaisesRegex(AssertionError,
+                                  'variables are always captured'):
         v(a, b)
       functions = ops.get_default_graph().as_graph_def().library.function
       self.assertEmpty(functions)
@@ -624,7 +624,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     def f(_):
       return 1.0
 
-    with self.assertRaisesRegexp(ValueError, r'Got type: set'):
+    with self.assertRaisesRegex(ValueError, r'Got type: set'):
       f(set([]))
 
   def testFuncName(self):
@@ -1097,7 +1097,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
     @def_function.function
     def tensor_init():
-      with self.assertRaisesRegexp(ValueError, error_msg):
+      with self.assertRaisesRegex(ValueError, error_msg):
         resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
 
     tensor_init()
@@ -1581,7 +1581,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
           False)  # use_locking
       return None
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         'Cannot place the graph because a reference or resource edge connects '
         'colocation groups with incompatible assigned devices'):
@@ -2052,10 +2052,10 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     self.assertAllEqual([3, 1], func([[0], [1.0], [1]]))
     self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]])))
 
-    with self.assertRaisesRegexp(ValueError, 'incompatible'):
+    with self.assertRaisesRegex(ValueError, 'incompatible'):
       func([0.0, 1.0, 2.0])  # Wrong shape.
 
-    with self.assertRaisesRegexp(ValueError, 'incompatible'):
+    with self.assertRaisesRegex(ValueError, 'incompatible'):
       func([['wrong dtype']])
 
   def testNoKeywordOnlyArgumentsWithInputSignature(self):
@@ -2064,7 +2064,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
     func = eval('lambda x, *, y: x')  # pylint: disable=eval-used
     signature = [tensor_spec.TensorSpec(None, dtypes.int32)]
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Cannot define a TensorFlow function from a Python '
         'function with keyword-only arguments when input_signature is '
         'provided.'):
@@ -2160,13 +2160,14 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
     # Signatures must consist exclusively of `TensorSpec` objects.
     signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]
-    with self.assertRaisesRegexp(TypeError, 'Invalid input_signature.*'):
+    with self.assertRaisesRegex(TypeError, 'Invalid input_signature.*'):
       def_function.function(foo, input_signature=signature)
 
     # Signatures must be either lists or tuples on their outermost levels.
     signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}
-    with self.assertRaisesRegexp(TypeError, 'input_signature must be either a '
-                                 'tuple or a list.*'):
+    with self.assertRaisesRegex(
+        TypeError, 'input_signature must be either a '
+        'tuple or a list.*'):
       function.defun(foo, input_signature=signature)
 
   @test_util.run_in_graph_and_eager_modes
@@ -2179,23 +2180,23 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     defined = def_function.function(foo, input_signature=signature)
 
     # Invalid shapes.
-    with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
+    with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
       defined(array_ops.ones([3]))
 
-    with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'):
+    with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
       defined(array_ops.ones([2, 1]))
 
     # Wrong number of arguments.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r'takes 1 positional arguments \(as specified by the '
         r'input_signature\) but 2 were given'):
       defined(array_ops.ones([2]), array_ops.ones([2]))
-    with self.assertRaisesRegexp(ValueError,
-                                 'Structure of Python function inputs.*'):
+    with self.assertRaisesRegex(ValueError,
+                                'Structure of Python function inputs.*'):
       defined()
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'inputs incompatible with input_signature'):
+    with self.assertRaisesRegex(ValueError,
+                                'inputs incompatible with input_signature'):
       defined.get_concrete_function(
           tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.float32))
 
@@ -2209,12 +2210,12 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     defined = function.defun(foo, input_signature=signature)
     a = array_ops.ones([1])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'Structure of Python function inputs.*'):
+    with self.assertRaisesRegex(ValueError,
+                                'Structure of Python function inputs.*'):
       defined([a, a, a], [a])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'Structure of Python function inputs.*'):
+    with self.assertRaisesRegex(ValueError,
+                                'Structure of Python function inputs.*'):
       defined([a], [a, a, a])
     defined([a, a], [a, a])
 
@@ -2229,12 +2230,12 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
         return -1.0 * a
 
     x = constant_op.constant(1.0)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, 'got keyword argument `training` '
         'that was not included in input_signature'):
       foo(x, training=True)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, 'got keyword argument `training` '
         'that was not included in input_signature'):
       foo(x, training=False)
@@ -2343,17 +2344,17 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
     # Different number of rows
     rt3 = ragged_factory_ops.constant([[1, 2], [3, 4], [5], [6]])
-    with self.assertRaisesRegexp(ValueError, 'incompatible'):
+    with self.assertRaisesRegex(ValueError, 'incompatible'):
       defined(rt3)
 
     # Different dtype
     rt4 = ragged_factory_ops.constant([[1.0, 2.0], [], [3.0]])
-    with self.assertRaisesRegexp(ValueError, 'Structure .* does not match'):
+    with self.assertRaisesRegex(ValueError, 'Structure .* does not match'):
       defined(rt4)
 
     # Different rank
     rt5 = ragged_factory_ops.constant([[[1]], [[2]], [[3]]])
-    with self.assertRaisesRegexp(ValueError, 'does not match'):
+    with self.assertRaisesRegex(ValueError, 'does not match'):
       defined(rt5)
 
   def testInputSignatureWithVariableArgs(self):
@@ -2510,15 +2511,13 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
         # pylint: disable=protected-access
         self.assertLen(graph._functions, 2)
         functions = list(graph._functions.values())
-        self.assertRegexpMatches(
-            functions[0].definition.signature.name, '.*matmul.*')
+        self.assertRegex(functions[0].definition.signature.name, '.*matmul.*')
         attrs = functions[0].definition.attr
         self.assertLen(attrs, 2)
         self.assertEqual(attrs['experimental_1'].s, b'value1')
         self.assertEqual(attrs['experimental_2'].i, 2)
 
-        self.assertRegexpMatches(
-            functions[1].definition.signature.name, '.*add.*')
+        self.assertRegex(functions[1].definition.signature.name, '.*add.*')
         attrs = functions[1].definition.attr
         self.assertLen(attrs, 2)
         self.assertEqual(attrs['experimental_3'].b, True)
@@ -2530,8 +2529,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     def add(x, y):
       return math_ops.add(x, y)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 '.*Unsupported attribute type.*'):
+    with self.assertRaisesRegex(ValueError, '.*Unsupported attribute type.*'):
       with context.graph_mode(), self.cached_session():
         with ops.get_default_graph().as_default():
           t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
@@ -2570,8 +2568,8 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
             '.*inference.*backward.*add.*',
         ]
         for i in range(len(functions)):
-          self.assertRegexpMatches(captured_function_names[i],
-                                   expected_func_name_regex[i])
+          self.assertRegex(captured_function_names[i],
+                           expected_func_name_regex[i])
 
         # Check the forward and backward function has the correct attributes.
         self.assertEqual(
@@ -2649,7 +2647,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
         for expected, found in zip(
             expected_func_name_regex,
             captured_function_names):
-          self.assertRegexpMatches(found, expected)
+          self.assertRegex(found, expected)
 
         composite_t, composite_double = composite(t, t)
         double = add(t, t)
@@ -3011,7 +3009,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
     expected_msg = '.*() should not modify'
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def append(l):
@@ -3019,7 +3017,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       append(get_list())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def extend(l):
@@ -3027,7 +3025,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       extend(get_list())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def insert(l):
@@ -3035,7 +3033,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       insert(get_list())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def pop(l):
@@ -3043,7 +3041,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       pop(get_list())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def reverse(l):
@@ -3051,7 +3049,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       reverse(get_list())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def remove(l):
@@ -3062,7 +3060,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     # `list.clear` is a method that is in Py3 but not Py2
     if sys.version.startswith('3'):
 
-      with self.assertRaisesRegexp(ValueError, expected_msg):
+      with self.assertRaisesRegex(ValueError, expected_msg):
 
         @def_function.function
         def clear(l):
@@ -3071,7 +3069,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
         clear(get_list())
 
     # One last test for keyword arguments
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def kwdappend(**kwargs):
@@ -3087,7 +3085,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
     expected_msg = '.* should not modify'
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def clear(m):
@@ -3095,7 +3093,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       clear(get_dict())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def pop(m):
@@ -3103,7 +3101,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       pop(get_dict())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def popitem(m):
@@ -3111,7 +3109,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       popitem(get_dict())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def update(m):
@@ -3119,7 +3117,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       update(get_dict())
 
-    with self.assertRaisesRegexp(ValueError, expected_msg):
+    with self.assertRaisesRegex(ValueError, expected_msg):
 
       @def_function.function
       def setdefault(m):
@@ -3128,8 +3126,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
       setdefault(get_dict())
 
   def testFunctionModifiesInputNest(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'modify.* should not modify'):
+    with self.assertRaisesRegex(ValueError, 'modify.* should not modify'):
 
       @def_function.function
       def modify(n):
@@ -3143,8 +3140,8 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
       modify(nested_input)
 
-    with self.assertRaisesRegexp(
-        ValueError, 'modify_same_flat.* should not modify'):
+    with self.assertRaisesRegex(ValueError,
+                                'modify_same_flat.* should not modify'):
 
       # The flat list doesn't change whereas the true structure changes
       @def_function.function
@@ -3166,7 +3163,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
         5,
         add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
 
-    with self.assertRaisesRegexp(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):
+    with self.assertRaisesRegex(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):
       with context.function_executor_type('NON_EXISTENT_EXECUTOR'):
         add_five(constant_op.constant(0, dtype=dtypes.int32))
 
@@ -3230,7 +3227,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
 
     with ops.device('GPU:0'):
       x = func()
-      self.assertRegexpMatches(x.device, 'GPU')
+      self.assertRegex(x.device, 'GPU')
 
   @test_util.run_in_graph_and_eager_modes
   def testShapeCaching(self):
@@ -3299,7 +3296,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     def g():
       f_concrete(constant_op.constant([1., 2.]))
 
-    with self.assertRaisesRegexp(ValueError, 'argument_name'):
+    with self.assertRaisesRegex(ValueError, 'argument_name'):
       g()
 
   @test_util.run_in_graph_and_eager_modes
@@ -3706,7 +3703,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
       return x
 
     conc = func.get_concrete_function(*conc_args, **conc_kwargs)
-    with self.assertRaisesRegexp(exception, error):
+    with self.assertRaisesRegex(exception, error):
       self.evaluate(conc(*call_args, **call_kwargs))
 
   # pylint: disable=g-long-lambda
@@ -3809,7 +3806,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     # Remove _function_spec, to disable the structured signature.
     conc._set_function_spec(None)  # pylint: disable=protected-access
 
-    with self.assertRaisesRegexp(exception, error):
+    with self.assertRaisesRegex(exception, error):
       self.evaluate(conc(*call_args, **call_kwargs))
 
   @test_util.run_in_graph_and_eager_modes
@@ -3846,14 +3843,13 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
                   r'    kangaroo: int32 Tensor, shape=\(3,\)\n'
                   r'  Returns:\n'
                   r'    int32 Tensor, shape=\(\)')
-    self.assertRegexpMatches(
-        c1.pretty_printed_signature(verbose=False), c1_summary)
-    self.assertRegexpMatches(
+    self.assertRegex(c1.pretty_printed_signature(verbose=False), c1_summary)
+    self.assertRegex(
         c1.pretty_printed_signature(verbose=True),
         c1_summary + '\n' + c1_details)
-    self.assertRegexpMatches(
+    self.assertRegex(
         repr(c1), r'<ConcreteFunction func\(x, kangaroo, octopus=7\) at .*>')
-    self.assertRegexpMatches(
+    self.assertRegex(
         str(c1), 'ConcreteFunction {}\n{}'.format(c1_summary, c1_details))
 
     c2 = func.get_concrete_function(scalar, ragged, 3)
@@ -3863,8 +3859,8 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
                   r'    kangaroo: RaggedTensorSpec\(.*\)\n'
                   r'  Returns:\n'
                   r'    int32 Tensor, shape=\(\)')
-    self.assertRegexpMatches(c2.pretty_printed_signature(),
-                             c2_summary + '\n' + c2_details)
+    self.assertRegex(c2.pretty_printed_signature(),
+                     c2_summary + '\n' + c2_details)
 
     c3 = func.get_concrete_function({'a': scalar, 'b': [ragged, ragged]})
     c3_summary = r'func\(x, kangaroo=None, octopus=7\)'
@@ -3882,8 +3878,8 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
     # python 3.5 does not gurantee deterministic iteration of dict contents
     # which can lead mismatch on pretty_printed_signature output for "Args"
     if sys.version_info >= (3, 6):
-      self.assertRegexpMatches(c3.pretty_printed_signature(),
-                               c3_summary + '\n' + c3_details)
+      self.assertRegex(c3.pretty_printed_signature(),
+                       c3_summary + '\n' + c3_details)
 
     # pylint: disable=keyword-arg-before-vararg
     @def_function.function
@@ -3948,9 +3944,9 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
     t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
     m1, m2 = func(t, t, transpose_a=True)
     self.assertAllEqual(m1.numpy(), [[10, 14], [14, 20]])
-    self.assertRegexpMatches(m1.backing_device, 'CPU')
+    self.assertRegex(m1.backing_device, 'CPU')
     self.assertAllEqual(m2.numpy(), [[10, 14], [14, 20]])
-    self.assertRegexpMatches(m2.backing_device, 'GPU')
+    self.assertRegex(m2.backing_device, 'GPU')
 
   @test_util.run_gpu_only
   def testEmptyBody(self):
@@ -3965,9 +3961,9 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
 
     m1, m2 = func(a, b)
     self.assertAllEqual(m1.numpy(), 5.0)
-    self.assertRegexpMatches(m1.backing_device, 'GPU')
+    self.assertRegex(m1.backing_device, 'GPU')
     self.assertAllEqual(m2.numpy(), 3.0)
-    self.assertRegexpMatches(m2.backing_device, 'CPU')
+    self.assertRegex(m2.backing_device, 'CPU')
 
   @test_util.run_gpu_only
   def testMultiDeviceInt32(self):
@@ -4001,16 +3997,16 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
 
     m1, m2 = func(int_cpu, resource, int_gpu)
     self.assertAllEqual(m1.numpy(), 22)
-    self.assertRegexpMatches(m1.backing_device, 'CPU')
+    self.assertRegex(m1.backing_device, 'CPU')
     self.assertAllEqual(m2.numpy(), 39)
-    self.assertRegexpMatches(m2.backing_device, 'CPU')
+    self.assertRegex(m2.backing_device, 'CPU')
 
     # flip arguments
     m1, m2 = func(int_gpu, resource, int_cpu)
     self.assertAllEqual(m1.numpy(), 38)
-    self.assertRegexpMatches(m1.backing_device, 'CPU')
+    self.assertRegex(m1.backing_device, 'CPU')
     self.assertAllEqual(m2.numpy(), 23)
-    self.assertRegexpMatches(m2.backing_device, 'CPU')
+    self.assertRegex(m2.backing_device, 'CPU')
 
   @test_util.run_gpu_only
   def testMultiDeviceColocateWith(self):
@@ -4032,9 +4028,9 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
 
       ra, rb = func(a, b)
       self.assertEqual(ra.numpy(), 2.0)
-      self.assertRegexpMatches(ra.backing_device, dev1)
+      self.assertRegex(ra.backing_device, dev1)
       self.assertEqual(rb.numpy(), 30.0)
-      self.assertRegexpMatches(rb.backing_device, dev2)
+      self.assertRegex(rb.backing_device, dev2)
 
   @test_util.run_gpu_only
   def testMultiDeviceResources(self):
@@ -4055,17 +4051,17 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
 
     r1, r2 = func(c1, g1)
     self.assertEqual(r1.numpy(), 10.0)
-    self.assertRegexpMatches(r1.backing_device, 'CPU')
+    self.assertRegex(r1.backing_device, 'CPU')
     self.assertEqual(r2.numpy(), 21.0)
-    self.assertRegexpMatches(r2.backing_device, 'GPU')
+    self.assertRegex(r2.backing_device, 'GPU')
 
     # Call with flipped inputs. Check that we look at resource's
     # device and reinstantiates the function when inputs' devices change.
     r1, r2 = func(g1, c1)
     self.assertEqual(r1.numpy(), 15.0)
-    self.assertRegexpMatches(r1.backing_device, 'CPU')
+    self.assertRegex(r1.backing_device, 'CPU')
     self.assertEqual(r2.numpy(), 14.0)
-    self.assertRegexpMatches(r2.backing_device, 'GPU')
+    self.assertRegex(r2.backing_device, 'GPU')
 
   @test_util.run_gpu_only
   def testOutputResources(self):
@@ -4084,12 +4080,12 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
 
     r1, res1, r2, res2 = func(c1, g1)
     self.assertEqual(r1.numpy(), 10.0)
-    self.assertRegexpMatches(r1.backing_device, 'CPU')
+    self.assertRegex(r1.backing_device, 'CPU')
     self.assertEqual(r2.numpy(), 21.0)
-    self.assertRegexpMatches(r2.backing_device, 'GPU')
+    self.assertRegex(r2.backing_device, 'GPU')
 
     def check_handle(handle, expected_value):
-      self.assertRegexpMatches(handle.backing_device, 'CPU')
+      self.assertRegex(handle.backing_device, 'CPU')
       tensor = gen_resource_variable_ops.read_variable_op(
           handle, dtypes.float32)
       self.assertEqual(tensor.numpy(), expected_value)
@@ -4105,9 +4101,9 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
     # for ops consuming handles returned from defuns.
     r1, res1, r2, res2 = func(g1, c1)
     self.assertEqual(r1.numpy(), 15.0)
-    self.assertRegexpMatches(r1.backing_device, 'CPU')
+    self.assertRegex(r1.backing_device, 'CPU')
     self.assertEqual(r2.numpy(), 14.0)
-    self.assertRegexpMatches(r2.backing_device, 'GPU')
+    self.assertRegex(r2.backing_device, 'GPU')
     check_handle(res1, 3.0)
     check_handle(res2, 2.0)
 
@@ -4139,7 +4135,7 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
     r1 = outer(g1)
 
     self.assertEqual(r1.numpy(), 6.0)
-    self.assertRegexpMatches(r1.backing_device, 'CPU')
+    self.assertRegex(r1.backing_device, 'CPU')
 
   @test_util.run_gpu_only
   def testReturnResourceFromNestedFunctionCall(self):
@@ -4170,10 +4166,10 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
     r1, res1 = outer(g1)
 
     self.assertEqual(r1.numpy(), 10.0)
-    self.assertRegexpMatches(r1.backing_device, 'CPU')
+    self.assertRegex(r1.backing_device, 'CPU')
 
     def check_handle(handle, expected_value):
-      self.assertRegexpMatches(handle.backing_device, 'CPU')
+      self.assertRegex(handle.backing_device, 'CPU')
       tensor = gen_resource_variable_ops.read_variable_op(
           handle, dtypes.float32)
       self.assertEqual(tensor.numpy(), expected_value)
@@ -4199,9 +4195,9 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
 
     # Make sure tensors are on expected devices.
     for tensor in [cc0, cc1]:
-      self.assertRegexpMatches(tensor.backing_device, 'CPU:0')
+      self.assertRegex(tensor.backing_device, 'CPU:0')
     for tensor in [cg0, cg1]:
-      self.assertRegexpMatches(tensor.backing_device, 'GPU:0')
+      self.assertRegex(tensor.backing_device, 'GPU:0')
 
     @function.defun
     def func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1):
@@ -4218,10 +4214,10 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
       return r1, r2, m2, m1
 
     r1, r2, m2, m1 = func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1)
-    self.assertRegexpMatches(m1.backing_device, 'CPU')
-    self.assertRegexpMatches(r1.backing_device, 'CPU')
-    self.assertRegexpMatches(m2.backing_device, 'GPU')
-    self.assertRegexpMatches(r2.backing_device, 'GPU')
+    self.assertRegex(m1.backing_device, 'CPU')
+    self.assertRegex(r1.backing_device, 'CPU')
+    self.assertRegex(m2.backing_device, 'GPU')
+    self.assertRegex(r2.backing_device, 'GPU')
     self.assertEqual(m1.numpy(), 34.0)
     self.assertEqual(r1.numpy(), 55000.0 + 3.0 * 19.0)
     self.assertEqual(m2.numpy(), 55.0)
@@ -4331,12 +4327,12 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase):
 
     # dtype mismatch
     value = constant_op.constant(1)
-    with self.assertRaisesRegexp(ValueError, 'Value .* to a tensor with dtype'):
+    with self.assertRaisesRegex(ValueError, 'Value .* to a tensor with dtype'):
       lazy_capture(2.0)
 
     # shape mismatch
     value = constant_op.constant([1.0])
-    with self.assertRaisesRegexp(ValueError, 'Value .* shape'):
+    with self.assertRaisesRegex(ValueError, 'Value .* shape'):
       lazy_capture(2.0)
 
   def testDeferredCaptureReturnNestWithCompositeTensor(self):
diff --git a/tensorflow/python/eager/pywrap_tfe_test.py b/tensorflow/python/eager/pywrap_tfe_test.py
index c292223f629..9bf698fded0 100644
--- a/tensorflow/python/eager/pywrap_tfe_test.py
+++ b/tensorflow/python/eager/pywrap_tfe_test.py
@@ -207,18 +207,17 @@ class Tests(test.TestCase):
     ctx_handle = ctx._handle  # pylint: disable=protected-access
 
     # Not enough base params
-    with self.assertRaisesRegexp(ValueError,
-                                 "at least 5 items in the input tuple"):
+    with self.assertRaisesRegex(ValueError,
+                                "at least 5 items in the input tuple"):
       pywrap_tfe.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, "Identity")
 
     # Not enough inputs
-    with self.assertRaisesRegexp(ValueError,
-                                 "Expected to be at least 6, was 5"):
+    with self.assertRaisesRegex(ValueError, "Expected to be at least 6, was 5"):
       pywrap_tfe.TFE_Py_FastPathExecute(ctx_handle, ctx_handle, "Identity",
                                         None, [])
 
     # Bad type
-    with self.assertRaisesRegexp(TypeError, "expected a string for op_name"):
+    with self.assertRaisesRegex(TypeError, "expected a string for op_name"):
       pywrap_tfe.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, ctx_handle,
                                         None, [], a_2_by_2)
 
@@ -239,11 +238,11 @@ class Tests(test.TestCase):
   @test_util.assert_no_new_tensors
   @test_util.assert_no_garbage_created
   def testInvalidNumOutputs(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         Exception, r"Value for number_attr\(\) -1 < 0 \[Op:Split\]"):
       array_ops.split(value=[1, 2, 3], num_or_size_splits=-1)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         Exception,
         "Value for attr 'num_split' of 0 must be at least minimum 1"):
       array_ops.split(value=[1, 2, 3], num_or_size_splits=0)
@@ -263,8 +262,8 @@ class Tests(test.TestCase):
     with ops.Graph().as_default():
       a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
       m = resource_variable_ops.ResourceVariable(a_2_by_2)
-      with self.assertRaisesRegexp(TypeError,
-                                   "Expected list for 'values' argument"):
+      with self.assertRaisesRegex(TypeError,
+                                  "Expected list for 'values' argument"):
         _ = array_ops.stack(m, axis=1)
 
   def testGraphResourceVariableRaisesFallback(self):
diff --git a/tensorflow/python/eager/remote_cluster_test.py b/tensorflow/python/eager/remote_cluster_test.py
index 864d5e7c0f3..84dbb11361a 100644
--- a/tensorflow/python/eager/remote_cluster_test.py
+++ b/tensorflow/python/eager/remote_cluster_test.py
@@ -623,14 +623,14 @@ class DynamicClusterTest(test.TestCase, parameterized.TestCase):
     self.assertGreater(total, 0)
 
   def testCheckAlive(self):
-    with self.assertRaisesRegexp(ValueError, "Context is not initialized."):
+    with self.assertRaisesRegex(ValueError, "Context is not initialized."):
       context.check_alive("/job:remote_device/task:0")
     context.context().ensure_initialized()
 
     self.assertTrue(context.check_alive("/job:remote_device/replica:0/task:0"))
     self.assertTrue(context.check_alive("/job:remote_device/replica:0/task:1"))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Client for target /job:remote_device/replica:0/task:10 not found."):
       context.check_alive("/job:remote_device/replica:0/task:10")
diff --git a/tensorflow/python/eager/tensor_test.py b/tensorflow/python/eager/tensor_test.py
index 6288adc373c..1d48d59f754 100644
--- a/tensorflow/python/eager/tensor_test.py
+++ b/tensorflow/python/eager/tensor_test.py
@@ -70,11 +70,11 @@ class TFETensorTest(test_util.TensorFlowTestCase):
     ctx = context.context()
     device = ctx.device_name
     # Missing device.
-    with self.assertRaisesRegexp(TypeError, r".*argument 'device' \(pos 2\).*"):
+    with self.assertRaisesRegex(TypeError, r".*argument 'device' \(pos 2\).*"):
       ops.EagerTensor(1)
     # Bad dtype type.
-    with self.assertRaisesRegexp(TypeError,
-                                 "Expecting a DataType value for dtype. Got"):
+    with self.assertRaisesRegex(TypeError,
+                                "Expecting a DataType value for dtype. Got"):
       ops.EagerTensor(1, device=device, dtype="1")
 
     # Following errors happen when trying to copy to GPU.
@@ -83,7 +83,7 @@ class TFETensorTest(test_util.TensorFlowTestCase):
 
     with ops.device("/device:GPU:0"):
       # Bad device.
-      with self.assertRaisesRegexp(TypeError, "Error parsing device argument"):
+      with self.assertRaisesRegex(TypeError, "Error parsing device argument"):
         ops.EagerTensor(1.0, device=1)
 
   def testNumpyValue(self):
@@ -109,7 +109,7 @@ class TFETensorTest(test_util.TensorFlowTestCase):
     self.assertAllEqual(values, t)
     ctx = context.context()
     # Bad dtype value.
-    with self.assertRaisesRegexp(TypeError, "Invalid dtype argument value"):
+    with self.assertRaisesRegex(TypeError, "Invalid dtype argument value"):
       ops.EagerTensor(values, device=ctx.device_name, dtype=12345)
 
   def testNumpyOrderHandling(self):
@@ -140,8 +140,7 @@ class TFETensorTest(test_util.TensorFlowTestCase):
     tensor = constant_op.constant(numpy_tensor)
     with self.assertRaises(TypeError):
       len(numpy_tensor)
-    with self.assertRaisesRegexp(
-        TypeError, r"Scalar tensor has no `len[(][)]`"):
+    with self.assertRaisesRegex(TypeError, r"Scalar tensor has no `len[(][)]`"):
       len(tensor)
 
     numpy_tensor = np.asarray([1.0, 2.0, 3.0])
@@ -274,8 +273,8 @@ class TFETensorTest(test_util.TensorFlowTestCase):
 
   def testIterateOverScalarTensorRaises(self):
     t = _create_tensor(1)
-    with self.assertRaisesRegexp(TypeError,
-                                 "Cannot iterate over a scalar tensor"):
+    with self.assertRaisesRegex(TypeError,
+                                "Cannot iterate over a scalar tensor"):
       iter(t)
 
   @test_util.run_gpu_only
@@ -367,9 +366,8 @@ class TFETensorTest(test_util.TensorFlowTestCase):
     self.assertAllEqual(x, [321, 16])
 
   def testEagerTensorError(self):
-    with self.assertRaisesRegexp(
-        TypeError,
-        "Cannot convert .* to EagerTensor of dtype .*"):
+    with self.assertRaisesRegex(TypeError,
+                                "Cannot convert .* to EagerTensor of dtype .*"):
       _ = ops.convert_to_tensor(1., dtype=dtypes.int32)
 
   def testEagerLargeConstant(self):
@@ -455,12 +453,12 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
   def testTensorListContainsNonTensors(self):
     t1 = _create_tensor([1, 2], dtype=dtypes.int32)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         r"Expected a list of EagerTensors but element 1 has type \"str\""):
       pywrap_tfe.TFE_Py_TensorShapeSlice([t1, "abc"], 0)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         r"Expected a list of EagerTensors but element 0 has type \"int\""):
       pywrap_tfe.TFE_Py_TensorShapeSlice([2, t1], 0)
@@ -468,7 +466,7 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
   def testTensorListNotList(self):
     t1 = _create_tensor([1, 2], dtype=dtypes.int32)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         r"tensors argument must be a list or a tuple. Got.*EagerTensor"):
       pywrap_tfe.TFE_Py_TensorShapeSlice(t1, -2)
@@ -476,9 +474,8 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
   def testNegativeSliceDim(self):
     t1 = _create_tensor([1, 2], dtype=dtypes.int32)
 
-    with self.assertRaisesRegexp(
-        ValueError,
-        r"Slice dimension must be non-negative. Got -2"):
+    with self.assertRaisesRegex(
+        ValueError, r"Slice dimension must be non-negative. Got -2"):
       pywrap_tfe.TFE_Py_TensorShapeSlice([t1], -2)
 
   def testUnicode(self):
@@ -495,31 +492,31 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
     t2 = _create_tensor([1, 2], dtype=dtypes.int32)
     t3 = _create_tensor(2, dtype=dtypes.int32)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         IndexError,
         r"Slice dimension \(2\) must be smaller than rank of all tensors, "
         "but tensor at index 0 has rank 2"):
       pywrap_tfe.TFE_Py_TensorShapeSlice([t1], 2)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         IndexError,
         r"Slice dimension \(1\) must be smaller than rank of all tensors, "
         "but tensor at index 0 has rank 1"):
       pywrap_tfe.TFE_Py_TensorShapeSlice([t2], 1)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         IndexError,
         r"Slice dimension \(1\) must be smaller than rank of all tensors, "
         "but tensor at index 1 has rank 1"):
       pywrap_tfe.TFE_Py_TensorShapeSlice([t1, t2], 1)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         IndexError,
         r"Slice dimension \(0\) must be smaller than rank of all tensors, "
         "but tensor at index 0 has rank 0"):
       pywrap_tfe.TFE_Py_TensorShapeSlice([t3], 0)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         IndexError,
         r"Slice dimension \(0\) must be smaller than rank of all tensors, "
         "but tensor at index 2 has rank 0"):
@@ -541,8 +538,7 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
   def testNonRectangularPackAsConstant(self):
     l = [array_ops.zeros((10, 1)).numpy(), array_ops.zeros(1).numpy()]
 
-    with self.assertRaisesRegexp(
-        ValueError, "non-rectangular Python sequence"):
+    with self.assertRaisesRegex(ValueError, "non-rectangular Python sequence"):
       constant_op.constant(l)
 
   @test_util.assert_no_new_pyobjects_executing_eagerly
diff --git a/tensorflow/python/feature_column/feature_column_test.py b/tensorflow/python/feature_column/feature_column_test.py
index 38800fc2162..a657656b9b6 100644
--- a/tensorflow/python/feature_column/feature_column_test.py
+++ b/tensorflow/python/feature_column/feature_column_test.py
@@ -133,11 +133,11 @@ class LazyColumnTest(test.TestCase):
 
   def test_error_if_feature_is_not_found(self):
     builder = _LazyBuilder(features={'a': [[2], [3.]]})
-    with self.assertRaisesRegexp(ValueError,
-                                 'bbb is not in features dictionary'):
+    with self.assertRaisesRegex(ValueError,
+                                'bbb is not in features dictionary'):
       builder.get('bbb')
-    with self.assertRaisesRegexp(ValueError,
-                                 'bbb is not in features dictionary'):
+    with self.assertRaisesRegex(ValueError,
+                                'bbb is not in features dictionary'):
       builder.get(u'bbb')
 
   def test_not_supported_feature_column(self):
@@ -157,8 +157,8 @@ class LazyColumnTest(test.TestCase):
         pass
 
     builder = _LazyBuilder(features={'a': [[2], [3.]]})
-    with self.assertRaisesRegexp(ValueError,
-                                 'NotAProperColumn is not supported'):
+    with self.assertRaisesRegex(ValueError,
+                                'NotAProperColumn is not supported'):
       builder.get(NotAProperColumn())
 
   def test_key_should_be_string_or_feature_colum(self):
@@ -167,7 +167,7 @@ class LazyColumnTest(test.TestCase):
       pass
 
     builder = _LazyBuilder(features={'a': [[2], [3.]]})
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, '"key" must be either a "str" or "_FeatureColumn".'):
       builder.get(NotAFeatureColumn())
 
@@ -199,7 +199,7 @@ class NumericColumnTest(test.TestCase):
     self.assertIsNone(a.normalizer_fn)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc._numeric_column(key=('aaa',))
 
   def test_shape_saved_as_tuple(self):
@@ -214,14 +214,14 @@ class NumericColumnTest(test.TestCase):
 
   def test_shape_and_default_value_compatibility(self):
     fc._numeric_column('aaa', shape=[2], default_value=[1, 2.])
-    with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+    with self.assertRaisesRegex(ValueError, 'The shape of default_value'):
       fc._numeric_column('aaa', shape=[2], default_value=[1, 2, 3.])
     fc._numeric_column(
         'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]])
-    with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+    with self.assertRaisesRegex(ValueError, 'The shape of default_value'):
       fc._numeric_column(
           'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]])
-    with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+    with self.assertRaisesRegex(ValueError, 'The shape of default_value'):
       fc._numeric_column(
           'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]])
 
@@ -230,30 +230,30 @@ class NumericColumnTest(test.TestCase):
         'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32)
     fc._numeric_column(
         'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32)
-    with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'):
+    with self.assertRaisesRegex(TypeError, 'must be compatible with dtype'):
       fc._numeric_column(
           'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32)
-    with self.assertRaisesRegexp(TypeError,
-                                 'default_value must be compatible with dtype'):
+    with self.assertRaisesRegex(TypeError,
+                                'default_value must be compatible with dtype'):
       fc._numeric_column('aaa', default_value=['string'])
 
   def test_shape_must_be_positive_integer(self):
-    with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
+    with self.assertRaisesRegex(TypeError, 'shape dimensions must be integer'):
       fc._numeric_column(
           'aaa', shape=[
               1.0,
           ])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'shape dimensions must be greater than 0'):
+    with self.assertRaisesRegex(ValueError,
+                                'shape dimensions must be greater than 0'):
       fc._numeric_column(
           'aaa', shape=[
               0,
           ])
 
   def test_dtype_is_convertible_to_float(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'dtype must be convertible to float'):
+    with self.assertRaisesRegex(ValueError,
+                                'dtype must be convertible to float'):
       fc._numeric_column('aaa', dtype=dtypes.string)
 
   def test_scalar_default_value_fills_the_shape(self):
@@ -306,7 +306,7 @@ class NumericColumnTest(test.TestCase):
       self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval())
 
   def test_normalizer_fn_must_be_callable(self):
-    with self.assertRaisesRegexp(TypeError, 'must be a callable'):
+    with self.assertRaisesRegex(TypeError, 'must be a callable'):
       fc._numeric_column('price', normalizer_fn='NotACallable')
 
   @test_util.run_deprecated_v1
@@ -337,7 +337,7 @@ class NumericColumnTest(test.TestCase):
             sparse_tensor.SparseTensor(
                 indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
     })
-    with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
+    with self.assertRaisesRegex(ValueError, 'must be a Tensor'):
       price._transform_feature(builder)
 
   @test_util.run_deprecated_v1
@@ -388,30 +388,26 @@ class BucketizedColumnTest(test.TestCase):
 
   def test_invalid_source_column_type(self):
     a = fc._categorical_column_with_hash_bucket('aaa', hash_bucket_size=10)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'source_column must be a column generated with numeric_column'):
       fc._bucketized_column(a, boundaries=[0, 1])
 
   def test_invalid_source_column_shape(self):
     a = fc._numeric_column('aaa', shape=[2, 3])
-    with self.assertRaisesRegexp(
-        ValueError, 'source_column must be one-dimensional column'):
+    with self.assertRaisesRegex(ValueError,
+                                'source_column must be one-dimensional column'):
       fc._bucketized_column(a, boundaries=[0, 1])
 
   def test_invalid_boundaries(self):
     a = fc._numeric_column('aaa')
-    with self.assertRaisesRegexp(
-        ValueError, 'boundaries must be a sorted list'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must be a sorted list'):
       fc._bucketized_column(a, boundaries=None)
-    with self.assertRaisesRegexp(
-        ValueError, 'boundaries must be a sorted list'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must be a sorted list'):
       fc._bucketized_column(a, boundaries=1.)
-    with self.assertRaisesRegexp(
-        ValueError, 'boundaries must be a sorted list'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must be a sorted list'):
       fc._bucketized_column(a, boundaries=[1, 0])
-    with self.assertRaisesRegexp(
-        ValueError, 'boundaries must be a sorted list'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must be a sorted list'):
       fc._bucketized_column(a, boundaries=[1, 1])
 
   def test_name(self):
@@ -540,7 +536,7 @@ class BucketizedColumnTest(test.TestCase):
             sparse_tensor.SparseTensor(
                 indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
     })
-    with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
+    with self.assertRaisesRegex(ValueError, 'must be a Tensor'):
       bucketized_price._transform_feature(builder)
 
   @test_util.run_deprecated_v1
@@ -681,22 +677,22 @@ class HashedCategoricalColumnTest(test.TestCase):
     self.assertEqual(dtypes.string, a.dtype)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc._categorical_column_with_hash_bucket(('key',), 10)
 
   def test_bucket_size_should_be_given(self):
-    with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be set.'):
       fc._categorical_column_with_hash_bucket('aaa', None)
 
   def test_bucket_size_should_be_positive(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'hash_bucket_size must be at least 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'hash_bucket_size must be at least 1'):
       fc._categorical_column_with_hash_bucket('aaa', 0)
 
   def test_dtype_should_be_string_or_integer(self):
     fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string)
     fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32)
 
   @test_util.run_deprecated_v1
@@ -786,7 +782,7 @@ class HashedCategoricalColumnTest(test.TestCase):
     })
     builder.get(string_fc)
     builder.get(int_fc)
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       builder.get(float_fc)
 
   def test_dtype_should_match_with_tensor(self):
@@ -795,7 +791,7 @@ class HashedCategoricalColumnTest(test.TestCase):
     wire_tensor = sparse_tensor.SparseTensor(
         values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
     builder = _LazyBuilder({'wire': wire_tensor})
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       builder.get(hashed_sparse)
 
   @test_util.run_deprecated_v1
@@ -916,37 +912,34 @@ class HashedCategoricalColumnTest(test.TestCase):
 class CrossedColumnTest(test.TestCase):
 
   def test_keys_empty(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'keys must be a list with length > 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'keys must be a list with length > 1'):
       fc._crossed_column([], 10)
 
   def test_keys_length_one(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'keys must be a list with length > 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'keys must be a list with length > 1'):
       fc._crossed_column(['a'], 10)
 
   def test_key_type_unsupported(self):
-    with self.assertRaisesRegexp(ValueError, 'Unsupported key type'):
+    with self.assertRaisesRegex(ValueError, 'Unsupported key type'):
       fc._crossed_column(['a', fc._numeric_column('c')], 10)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'categorical_column_with_hash_bucket is not supported'):
       fc._crossed_column(
           ['a', fc._categorical_column_with_hash_bucket('c', 10)], 10)
 
   def test_hash_bucket_size_negative(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'hash_bucket_size must be > 1'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
       fc._crossed_column(['a', 'c'], -1)
 
   def test_hash_bucket_size_zero(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'hash_bucket_size must be > 1'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
       fc._crossed_column(['a', 'c'], 0)
 
   def test_hash_bucket_size_none(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'hash_bucket_size must be > 1'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
       fc._crossed_column(['a', 'c'], None)
 
   def test_name(self):
@@ -1192,7 +1185,7 @@ class CrossedColumnTest(test.TestCase):
     t = _TestColumnWithWeights()
     crossed = fc._crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'crossed_column does not support weight_tensor.*{}'.format(t.name)):
         fc.linear_model({
@@ -1280,7 +1273,7 @@ class CrossedColumnTest(test.TestCase):
     t = _TestColumnWithWeights()
     crossed = fc._crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'crossed_column does not support weight_tensor.*{}'.format(t.name)):
         get_keras_linear_model_predictions({
@@ -1335,12 +1328,12 @@ def get_keras_linear_model_predictions(features,
 class LinearModelTest(test.TestCase):
 
   def test_raises_if_empty_feature_columns(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'feature_columns must not be empty'):
+    with self.assertRaisesRegex(ValueError,
+                                'feature_columns must not be empty'):
       fc.linear_model(features={}, feature_columns=[])
 
   def test_should_be_feature_column(self):
-    with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
+    with self.assertRaisesRegex(ValueError, 'must be a _FeatureColumn'):
       fc.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
 
   def test_should_be_dense_or_categorical_column(self):
@@ -1358,19 +1351,19 @@ class LinearModelTest(test.TestCase):
       def _parse_example_spec(self):
         pass
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
       fc.linear_model(
           features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
 
   def test_does_not_support_dict_columns(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Expected feature_columns to be iterable, found dict.'):
       fc.linear_model(
           features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')})
 
   def test_raises_if_duplicate_name(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Duplicate feature column name found for columns'):
       fc.linear_model(
           features={'a': [[0]]},
@@ -1608,7 +1601,7 @@ class LinearModelTest(test.TestCase):
     price = fc._numeric_column('price', shape=2)
     with ops.Graph().as_default():
       features = {'price': [[1.], [5.]]}
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
         fc.linear_model(features, [price])
@@ -1831,7 +1824,7 @@ class LinearModelTest(test.TestCase):
           'price1': [[1.], [5.], [7.]],  # batchsize = 3
           'price2': [[3.], [4.]]  # batchsize = 2
       }
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'Batch size \(first dimension\) of each feature must be same.'):
       fc.linear_model(features, [price1, price2])
@@ -1846,7 +1839,7 @@ class LinearModelTest(test.TestCase):
           'price2': [[3.], [4.]],  # batchsize = 2
           'price3': [[3.], [4.], [5.]]  # batchsize = 3
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         fc.linear_model(features, [price1, price2, price3])
@@ -1861,8 +1854,8 @@ class LinearModelTest(test.TestCase):
       }
       predictions = fc.linear_model(features, [price1, price2])
       with _initialized_session() as sess:
-        with self.assertRaisesRegexp(errors.OpError,
-                                     'must have the same size and shape'):
+        with self.assertRaisesRegex(errors.OpError,
+                                    'must have the same size and shape'):
           sess.run(
               predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
 
@@ -1976,7 +1969,7 @@ class LinearModelTest(test.TestCase):
     self.assertEqual(0, features['price'].shape.ndims)
 
     # Static rank 0 should fail
-    with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+    with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
       fc.linear_model(features, [price])
 
     # Dynamic rank 0 should fail
@@ -2014,12 +2007,12 @@ class LinearModelTest(test.TestCase):
 class _LinearModelTest(test.TestCase):
 
   def test_raises_if_empty_feature_columns(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'feature_columns must not be empty'):
+    with self.assertRaisesRegex(ValueError,
+                                'feature_columns must not be empty'):
       get_keras_linear_model_predictions(features={}, feature_columns=[])
 
   def test_should_be_feature_column(self):
-    with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
+    with self.assertRaisesRegex(ValueError, 'must be a _FeatureColumn'):
       get_keras_linear_model_predictions(
           features={'a': [[0]]}, feature_columns='NotSupported')
 
@@ -2038,19 +2031,19 @@ class _LinearModelTest(test.TestCase):
       def _parse_example_spec(self):
         pass
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
       get_keras_linear_model_predictions(
           features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
 
   def test_does_not_support_dict_columns(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Expected feature_columns to be iterable, found dict.'):
       fc.linear_model(
           features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')})
 
   def test_raises_if_duplicate_name(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Duplicate feature column name found for columns'):
       get_keras_linear_model_predictions(
           features={'a': [[0]]},
@@ -2276,7 +2269,7 @@ class _LinearModelTest(test.TestCase):
     price = fc._numeric_column('price', shape=2)
     with ops.Graph().as_default():
       features = {'price': [[1.], [5.]]}
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
         get_keras_linear_model_predictions(features, [price])
@@ -2466,7 +2459,7 @@ class _LinearModelTest(test.TestCase):
           'price1': [[1.], [5.], [7.]],  # batchsize = 3
           'price2': [[3.], [4.]]  # batchsize = 2
       }
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
       get_keras_linear_model_predictions(features, [price1, price2])
@@ -2481,7 +2474,7 @@ class _LinearModelTest(test.TestCase):
           'price2': [[3.], [4.]],  # batchsize = 2
           'price3': [[3.], [4.], [5.]]  # batchsize = 3
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         get_keras_linear_model_predictions(features, [price1, price2, price3])
@@ -2497,8 +2490,8 @@ class _LinearModelTest(test.TestCase):
       predictions = get_keras_linear_model_predictions(features,
                                                        [price1, price2])
       with _initialized_session() as sess:
-        with self.assertRaisesRegexp(errors.OpError,
-                                     'must have the same size and shape'):
+        with self.assertRaisesRegex(errors.OpError,
+                                    'must have the same size and shape'):
           sess.run(
               predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
 
@@ -2618,7 +2611,7 @@ class _LinearModelTest(test.TestCase):
     self.assertEqual(0, features['price'].shape.ndims)
 
     # Static rank 0 should fail
-    with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+    with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
       get_keras_linear_model_predictions(features, [price])
 
     # Dynamic rank 0 should fail
@@ -2735,12 +2728,12 @@ class InputLayerTest(test.TestCase):
 class FunctionalInputLayerTest(test.TestCase):
 
   def test_raises_if_empty_feature_columns(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'feature_columns must not be empty'):
+    with self.assertRaisesRegex(ValueError,
+                                'feature_columns must not be empty'):
       fc.input_layer(features={}, feature_columns=[])
 
   def test_should_be_dense_column(self):
-    with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'):
+    with self.assertRaisesRegex(ValueError, 'must be a _DenseColumn'):
       fc.input_layer(
           features={'a': [[0]]},
           feature_columns=[
@@ -2748,7 +2741,7 @@ class FunctionalInputLayerTest(test.TestCase):
           ])
 
   def test_does_not_support_dict_columns(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Expected feature_columns to be iterable, found dict.'):
       fc.input_layer(
           features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')})
@@ -2769,7 +2762,7 @@ class FunctionalInputLayerTest(test.TestCase):
         self.assertAllClose([[0., 1.]], self.evaluate(net))
 
   def test_raises_if_duplicate_name(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Duplicate feature column name found for columns'):
       fc.input_layer(
           features={'a': [[0]]},
@@ -2796,7 +2789,7 @@ class FunctionalInputLayerTest(test.TestCase):
     price = fc._numeric_column('price', shape=2)
     with ops.Graph().as_default():
       features = {'price': [[1.], [5.]]}
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
         fc.input_layer(features, [price])
@@ -2962,7 +2955,7 @@ class FunctionalInputLayerTest(test.TestCase):
               sparse_tensor.SparseTensor(
                   indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
       }
-      with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'):
+      with self.assertRaisesRegex(Exception, 'must be a _DenseColumn'):
         fc.input_layer(features, [animal])
 
   def test_static_batch_size_mismatch(self):
@@ -2973,7 +2966,7 @@ class FunctionalInputLayerTest(test.TestCase):
           'price1': [[1.], [5.], [7.]],  # batchsize = 3
           'price2': [[3.], [4.]]  # batchsize = 2
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         fc.input_layer(features, [price1, price2])
@@ -2988,7 +2981,7 @@ class FunctionalInputLayerTest(test.TestCase):
           'price2': [[3.], [4.]],  # batchsize = 2
           'price3': [[3.], [4.], [5.]]  # batchsize = 3
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         fc.input_layer(features, [price1, price2, price3])
@@ -3003,8 +2996,8 @@ class FunctionalInputLayerTest(test.TestCase):
       }
       net = fc.input_layer(features, [price1, price2])
       with _initialized_session() as sess:
-        with self.assertRaisesRegexp(errors.OpError,
-                                     'Dimensions of inputs should match'):
+        with self.assertRaisesRegex(errors.OpError,
+                                    'Dimensions of inputs should match'):
           sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
 
   def test_runtime_batch_size_matches(self):
@@ -3255,7 +3248,7 @@ class FunctionalInputLayerTest(test.TestCase):
     self.assertEqual(0, features['price'].shape.ndims)
 
     # Static rank 0 should fail
-    with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+    with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
       fc.input_layer(features, [price])
 
     # Dynamic rank 0 should fail
@@ -3287,7 +3280,7 @@ class MakeParseExampleSpecTest(test.TestCase):
     key1 = 'key1'
     parse_spec1 = parsing_ops.FixedLenFeature(
         shape=(2,), dtype=dtypes.float32, default_value=0.)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'All feature_columns must be _FeatureColumn instances.*invalid_column'):
       fc.make_parse_example_spec(
@@ -3317,7 +3310,7 @@ class MakeParseExampleSpecTest(test.TestCase):
     parse_spec1 = parsing_ops.FixedLenFeature(
         shape=(2,), dtype=dtypes.float32, default_value=0.)
     parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'feature_columns contain different parse_spec for key key1'):
       fc.make_parse_example_spec(
@@ -3389,7 +3382,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
     }, column._parse_example_spec)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc._categorical_column_with_vocabulary_file(
           key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
 
@@ -3422,12 +3415,12 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
       }, column._parse_example_spec)
 
   def test_vocabulary_file_none(self):
-    with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
+    with self.assertRaisesRegex(ValueError, 'Missing vocabulary_file'):
       fc._categorical_column_with_vocabulary_file(
           key='aaa', vocabulary_file=None, vocabulary_size=3)
 
   def test_vocabulary_file_empty_string(self):
-    with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
+    with self.assertRaisesRegex(ValueError, 'Missing vocabulary_file'):
       fc._categorical_column_with_vocabulary_file(
           key='aaa', vocabulary_file='', vocabulary_size=3)
 
@@ -3440,17 +3433,17 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         values=('marlo', 'skywalker', 'omar'),
         dense_shape=(2, 2))
     column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
-    with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'):
+    with self.assertRaisesRegex(errors.OpError, 'file_does_not_exist'):
       with self.cached_session():
         lookup_ops.tables_initializer().run()
 
   def test_invalid_vocabulary_size(self):
-    with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
+    with self.assertRaisesRegex(ValueError, 'Invalid vocabulary_size'):
       fc._categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file=self._wire_vocabulary_file_name,
           vocabulary_size=-1)
-    with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
+    with self.assertRaisesRegex(ValueError, 'Invalid vocabulary_size'):
       fc._categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file=self._wire_vocabulary_file_name,
@@ -3467,12 +3460,12 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         values=('marlo', 'skywalker', 'omar'),
         dense_shape=(2, 2))
     column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
-    with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'):
+    with self.assertRaisesRegex(errors.OpError, 'Invalid vocab_size'):
       with self.cached_session():
         lookup_ops.tables_initializer().run()
 
   def test_invalid_num_oov_buckets(self):
-    with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
+    with self.assertRaisesRegex(ValueError, 'Invalid num_oov_buckets'):
       fc._categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file='path',
@@ -3480,7 +3473,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
           num_oov_buckets=-1)
 
   def test_invalid_dtype(self):
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       fc._categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file='path',
@@ -3488,8 +3481,8 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
           dtype=dtypes.float64)
 
   def test_invalid_buckets_and_default_value(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'both num_oov_buckets and default_value'):
+    with self.assertRaisesRegex(ValueError,
+                                'both num_oov_buckets and default_value'):
       fc._categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file=self._wire_vocabulary_file_name,
@@ -3507,7 +3500,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=(12, 24, 36),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
 
   def test_invalid_input_dtype_string(self):
@@ -3520,7 +3513,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
 
   @test_util.run_deprecated_v1
@@ -3849,7 +3842,7 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
     }, column._parse_example_spec)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc._categorical_column_with_vocabulary_list(
           key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo'))
 
@@ -3888,57 +3881,57 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
       }, column._parse_example_spec)
 
   def test_invalid_dtype(self):
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa',
           vocabulary_list=('omar', 'stringer', 'marlo'),
           dtype=dtypes.float32)
 
   def test_invalid_mapping_dtype(self):
-    with self.assertRaisesRegexp(
-        ValueError, r'vocabulary dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError,
+                                r'vocabulary dtype must be string or integer'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12., 24., 36.))
 
   def test_mismatched_int_dtype(self):
-    with self.assertRaisesRegexp(
-        ValueError, r'dtype.*and vocabulary dtype.*do not match'):
+    with self.assertRaisesRegex(ValueError,
+                                r'dtype.*and vocabulary dtype.*do not match'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa',
           vocabulary_list=('omar', 'stringer', 'marlo'),
           dtype=dtypes.int32)
 
   def test_mismatched_string_dtype(self):
-    with self.assertRaisesRegexp(
-        ValueError, r'dtype.*and vocabulary dtype.*do not match'):
+    with self.assertRaisesRegex(ValueError,
+                                r'dtype.*and vocabulary dtype.*do not match'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
 
   def test_none_mapping(self):
-    with self.assertRaisesRegexp(
-        ValueError, r'vocabulary_list.*must be non-empty'):
+    with self.assertRaisesRegex(ValueError,
+                                r'vocabulary_list.*must be non-empty'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=None)
 
   def test_empty_mapping(self):
-    with self.assertRaisesRegexp(
-        ValueError, r'vocabulary_list.*must be non-empty'):
+    with self.assertRaisesRegex(ValueError,
+                                r'vocabulary_list.*must be non-empty'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=tuple([]))
 
   def test_duplicate_mapping(self):
-    with self.assertRaisesRegexp(ValueError, 'Duplicate keys'):
+    with self.assertRaisesRegex(ValueError, 'Duplicate keys'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12, 24, 12))
 
   def test_invalid_num_oov_buckets(self):
-    with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
+    with self.assertRaisesRegex(ValueError, 'Invalid num_oov_buckets'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=-1)
 
   def test_invalid_buckets_and_default_value(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'both num_oov_buckets and default_value'):
+    with self.assertRaisesRegex(ValueError,
+                                'both num_oov_buckets and default_value'):
       fc._categorical_column_with_vocabulary_list(
           key='aaa',
           vocabulary_list=(12, 24, 36),
@@ -3952,7 +3945,7 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=(12, 24, 36),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
 
   def test_invalid_input_dtype_string(self):
@@ -3962,7 +3955,7 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
 
   @test_util.run_deprecated_v1
@@ -4257,7 +4250,7 @@ class IdentityCategoricalColumnTest(test.TestCase):
     }, column._parse_example_spec)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc._categorical_column_with_identity(key=('aaa',), num_buckets=3)
 
   @test_util.run_deprecated_v1
@@ -4271,20 +4264,20 @@ class IdentityCategoricalColumnTest(test.TestCase):
       }, column._parse_example_spec)
 
   def test_invalid_num_buckets_zero(self):
-    with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'):
+    with self.assertRaisesRegex(ValueError, 'num_buckets 0 < 1'):
       fc._categorical_column_with_identity(key='aaa', num_buckets=0)
 
   def test_invalid_num_buckets_negative(self):
-    with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'):
+    with self.assertRaisesRegex(ValueError, 'num_buckets -1 < 1'):
       fc._categorical_column_with_identity(key='aaa', num_buckets=-1)
 
   def test_invalid_default_value_too_small(self):
-    with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'):
+    with self.assertRaisesRegex(ValueError, 'default_value -1 not in range'):
       fc._categorical_column_with_identity(
           key='aaa', num_buckets=3, default_value=-1)
 
   def test_invalid_default_value_too_big(self):
-    with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'):
+    with self.assertRaisesRegex(ValueError, 'default_value 3 not in range'):
       fc._categorical_column_with_identity(
           key='aaa', num_buckets=3, default_value=3)
 
@@ -4294,7 +4287,7 @@ class IdentityCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'):
+    with self.assertRaisesRegex(ValueError, 'Invalid input, not integer'):
       column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
 
   @test_util.run_deprecated_v1
@@ -4416,8 +4409,8 @@ class IdentityCategoricalColumnTest(test.TestCase):
         _LazyBuilder({'aaa': sparse_input}))
 
     with _initialized_session():
-      with self.assertRaisesRegexp(errors.OpError,
-                                   r'indices\[0\] .* 2 .* \[0, 2\)'):
+      with self.assertRaisesRegex(errors.OpError,
+                                  r'indices\[0\] .* 2 .* \[0, 2\)'):
         self.evaluate(embedding_lookup)
 
   @test_util.run_deprecated_v1
@@ -4912,7 +4905,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
   def test_invalid_initializer(self):
     categorical_column = fc._categorical_column_with_identity(
         key='aaa', num_buckets=3)
-    with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
+    with self.assertRaisesRegex(ValueError, 'initializer must be callable'):
       fc._embedding_column(
           categorical_column, dimension=2, initializer='not_fn')
 
@@ -5710,7 +5703,7 @@ class SharedEmbeddingColumnTest(test.TestCase, parameterized.TestCase):
         key='aaa', num_buckets=3)
     categorical_column_b = fc._categorical_column_with_identity(
         key='bbb', num_buckets=3)
-    with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
+    with self.assertRaisesRegex(ValueError, 'initializer must be callable'):
       fc_new.shared_embedding_columns(
           [categorical_column_a, categorical_column_b],
           dimension=2,
@@ -5724,9 +5717,8 @@ class SharedEmbeddingColumnTest(test.TestCase, parameterized.TestCase):
         key='bbb', num_buckets=3)
     categorical_column_c = fc._categorical_column_with_hash_bucket(
         key='ccc', hash_bucket_size=3)
-    with self.assertRaisesRegexp(
-        ValueError,
-        'all categorical_columns must have the same type.*'
+    with self.assertRaisesRegex(
+        ValueError, 'all categorical_columns must have the same type.*'
         '_IdentityCategoricalColumn.*_HashedCategoricalColumn'):
       fc_new.shared_embedding_columns(
           [categorical_column_a, categorical_column_b, categorical_column_c],
@@ -6347,7 +6339,7 @@ class WeightedCategoricalColumnTest(test.TestCase):
       }, column._parse_example_spec)
 
   def test_invalid_dtype_none(self):
-    with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
+    with self.assertRaisesRegex(ValueError, 'is not convertible to float'):
       fc._weighted_categorical_column(
           categorical_column=fc._categorical_column_with_identity(
               key='ids', num_buckets=3),
@@ -6355,7 +6347,7 @@ class WeightedCategoricalColumnTest(test.TestCase):
           dtype=None)
 
   def test_invalid_dtype_string(self):
-    with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
+    with self.assertRaisesRegex(ValueError, 'is not convertible to float'):
       fc._weighted_categorical_column(
           categorical_column=fc._categorical_column_with_identity(
               key='ids', num_buckets=3),
@@ -6371,11 +6363,11 @@ class WeightedCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'Bad dtype'):
+    with self.assertRaisesRegex(ValueError, 'Bad dtype'):
       _transform_features({'ids': strings, 'values': strings}, (column,))
 
   def test_column_name_collision(self):
-    with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'):
+    with self.assertRaisesRegex(ValueError, r'Parse config.*already exists'):
       fc._weighted_categorical_column(
           categorical_column=fc._categorical_column_with_identity(
               key='aaa', num_buckets=3),
@@ -6390,8 +6382,8 @@ class WeightedCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(
-        ValueError, 'values is not in features dictionary'):
+    with self.assertRaisesRegex(ValueError,
+                                'values is not in features dictionary'):
       _transform_features({'ids': inputs}, (column,))
 
   @test_util.run_deprecated_v1
@@ -6555,8 +6547,8 @@ class WeightedCategoricalColumnTest(test.TestCase):
             key='ids', num_buckets=3),
         weight_feature_key='values')
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError,
-                                   r'Dimensions.*are not compatible'):
+      with self.assertRaisesRegex(ValueError,
+                                  r'Dimensions.*are not compatible'):
         get_keras_linear_model_predictions({
             'ids':
                 sparse_tensor.SparseTensorValue(
@@ -6592,7 +6584,7 @@ class WeightedCategoricalColumnTest(test.TestCase):
       config.graph_options.rewrite_options.constant_folding = (
           rewriter_config_pb2.RewriterConfig.OFF)
       with _initialized_session(config):
-        with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
+        with self.assertRaisesRegex(errors.OpError, 'Incompatible shapes'):
           self.evaluate(predictions)
 
   def test_keras_linear_model_mismatched_dense_shape(self):
@@ -6656,8 +6648,8 @@ class WeightedCategoricalColumnTest(test.TestCase):
             key='ids', num_buckets=3),
         weight_feature_key='values')
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
-          ValueError, r'Dimensions.*are not compatible'):
+      with self.assertRaisesRegex(ValueError,
+                                  r'Dimensions.*are not compatible'):
         fc.linear_model({
             'ids': sparse_tensor.SparseTensorValue(
                 indices=((0, 0), (1, 0), (1, 1)),
@@ -6691,7 +6683,7 @@ class WeightedCategoricalColumnTest(test.TestCase):
       config.graph_options.rewrite_options.constant_folding = (
           rewriter_config_pb2.RewriterConfig.OFF)
       with _initialized_session(config):
-        with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
+        with self.assertRaisesRegex(errors.OpError, 'Incompatible shapes'):
           self.evaluate(predictions)
 
   def test_linear_model_mismatched_dense_shape(self):
diff --git a/tensorflow/python/feature_column/feature_column_v2_test.py b/tensorflow/python/feature_column/feature_column_v2_test.py
index dda1af8a00e..47cda174064 100644
--- a/tensorflow/python/feature_column/feature_column_v2_test.py
+++ b/tensorflow/python/feature_column/feature_column_v2_test.py
@@ -196,11 +196,11 @@ class LazyColumnTest(test.TestCase):
   def test_error_if_feature_is_not_found(self):
     transformation_cache = fc.FeatureTransformationCache(
         features={'a': [[2], [3.]]})
-    with self.assertRaisesRegexp(ValueError,
-                                 'bbb is not in features dictionary'):
+    with self.assertRaisesRegex(ValueError,
+                                'bbb is not in features dictionary'):
       transformation_cache.get('bbb', None)
-    with self.assertRaisesRegexp(ValueError,
-                                 'bbb is not in features dictionary'):
+    with self.assertRaisesRegex(ValueError,
+                                'bbb is not in features dictionary'):
       transformation_cache.get(u'bbb', None)
 
   def test_not_supported_feature_column(self):
@@ -225,8 +225,8 @@ class LazyColumnTest(test.TestCase):
 
     transformation_cache = fc.FeatureTransformationCache(
         features={'a': [[2], [3.]]})
-    with self.assertRaisesRegexp(ValueError,
-                                 'NotAProperColumn is not supported'):
+    with self.assertRaisesRegex(ValueError,
+                                'NotAProperColumn is not supported'):
       transformation_cache.get(NotAProperColumn(), None)
 
   def test_key_should_be_string_or_feature_colum(self):
@@ -236,7 +236,7 @@ class LazyColumnTest(test.TestCase):
 
     transformation_cache = fc.FeatureTransformationCache(
         features={'a': [[2], [3.]]})
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, '"key" must be either a "str" or "FeatureColumn".'):
       transformation_cache.get(NotAFeatureColumn(), None)
 
@@ -270,7 +270,7 @@ class NumericColumnTest(test.TestCase):
     self.assertTrue(a._is_v2_column)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc.numeric_column(key=('aaa',))
 
   def test_shape_saved_as_tuple(self):
@@ -286,15 +286,15 @@ class NumericColumnTest(test.TestCase):
   def test_shape_and_default_value_compatibility(self):
     a = fc.numeric_column('aaa', shape=[2], default_value=[1, 2.])
     self.assertEqual((1, 2.), a.default_value)
-    with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+    with self.assertRaisesRegex(ValueError, 'The shape of default_value'):
       fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.])
       a = fc.numeric_column(
           'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]])
       self.assertEqual(((2, 3), (1, 2), (2, 3.)), a.default_value)
-    with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+    with self.assertRaisesRegex(ValueError, 'The shape of default_value'):
       fc.numeric_column(
           'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]])
-    with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
+    with self.assertRaisesRegex(ValueError, 'The shape of default_value'):
       fc.numeric_column(
           'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]])
 
@@ -303,30 +303,30 @@ class NumericColumnTest(test.TestCase):
         'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32)
     fc.numeric_column(
         'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32)
-    with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'):
+    with self.assertRaisesRegex(TypeError, 'must be compatible with dtype'):
       fc.numeric_column(
           'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32)
-    with self.assertRaisesRegexp(TypeError,
-                                 'default_value must be compatible with dtype'):
+    with self.assertRaisesRegex(TypeError,
+                                'default_value must be compatible with dtype'):
       fc.numeric_column('aaa', default_value=['string'])
 
   def test_shape_must_be_positive_integer(self):
-    with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
+    with self.assertRaisesRegex(TypeError, 'shape dimensions must be integer'):
       fc.numeric_column(
           'aaa', shape=[
               1.0,
           ])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'shape dimensions must be greater than 0'):
+    with self.assertRaisesRegex(ValueError,
+                                'shape dimensions must be greater than 0'):
       fc.numeric_column(
           'aaa', shape=[
               0,
           ])
 
   def test_dtype_is_convertible_to_float(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'dtype must be convertible to float'):
+    with self.assertRaisesRegex(ValueError,
+                                'dtype must be convertible to float'):
       fc.numeric_column('aaa', dtype=dtypes.string)
 
   def test_scalar_default_value_fills_the_shape(self):
@@ -381,7 +381,7 @@ class NumericColumnTest(test.TestCase):
                         self.evaluate(features['price']))
 
   def test_normalizer_fn_must_be_callable(self):
-    with self.assertRaisesRegexp(TypeError, 'must be a callable'):
+    with self.assertRaisesRegex(TypeError, 'must be a callable'):
       fc.numeric_column('price', normalizer_fn='NotACallable')
 
   def test_normalizer_fn_transform_feature(self):
@@ -416,7 +416,7 @@ class NumericColumnTest(test.TestCase):
             sparse_tensor.SparseTensor(
                 indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
     })
-    with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
+    with self.assertRaisesRegex(ValueError, 'must be a Tensor'):
       price.transform_feature(transformation_cache, None)
 
   def test_deep_copy(self):
@@ -478,30 +478,26 @@ class BucketizedColumnTest(test.TestCase):
 
   def test_invalid_source_column_type(self):
     a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'source_column must be a column generated with numeric_column'):
       fc.bucketized_column(a, boundaries=[0, 1])
 
   def test_invalid_source_column_shape(self):
     a = fc.numeric_column('aaa', shape=[2, 3])
-    with self.assertRaisesRegexp(
-        ValueError, 'source_column must be one-dimensional column'):
+    with self.assertRaisesRegex(ValueError,
+                                'source_column must be one-dimensional column'):
       fc.bucketized_column(a, boundaries=[0, 1])
 
   def test_invalid_boundaries(self):
     a = fc.numeric_column('aaa')
-    with self.assertRaisesRegexp(ValueError,
-                                 'boundaries must not be empty'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must not be empty'):
       fc.bucketized_column(a, boundaries=None)
-    with self.assertRaisesRegexp(ValueError,
-                                 'boundaries must be a sorted list'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must be a sorted list'):
       fc.bucketized_column(a, boundaries=1.)
-    with self.assertRaisesRegexp(ValueError,
-                                 'boundaries must be a sorted list'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must be a sorted list'):
       fc.bucketized_column(a, boundaries=[1, 0])
-    with self.assertRaisesRegexp(ValueError,
-                                 'boundaries must be a sorted list'):
+    with self.assertRaisesRegex(ValueError, 'boundaries must be a sorted list'):
       fc.bucketized_column(a, boundaries=[1, 1])
 
   def test_name(self):
@@ -652,7 +648,7 @@ class BucketizedColumnTest(test.TestCase):
             sparse_tensor.SparseTensor(
                 indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
     })
-    with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
+    with self.assertRaisesRegex(ValueError, 'must be a Tensor'):
       bucketized_price.transform_feature(transformation_cache, None)
 
   def test_deep_copy(self):
@@ -792,16 +788,16 @@ class HashedCategoricalColumnTest(test.TestCase):
     self.assertTrue(a._is_v2_column)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc.categorical_column_with_hash_bucket(('key',), 10)
 
   def test_bucket_size_should_be_given(self):
-    with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be set.'):
       fc.categorical_column_with_hash_bucket('aaa', None)
 
   def test_bucket_size_should_be_positive(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'hash_bucket_size must be at least 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'hash_bucket_size must be at least 1'):
       fc.categorical_column_with_hash_bucket('aaa', 0)
 
   def test_dtype_should_be_string_or_integer(self):
@@ -810,7 +806,7 @@ class HashedCategoricalColumnTest(test.TestCase):
     self.assertEqual(dtypes.string, a.dtype)
     self.assertEqual(dtypes.int32, b.dtype)
 
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32)
 
   def test_deep_copy(self):
@@ -896,7 +892,7 @@ class HashedCategoricalColumnTest(test.TestCase):
     })
     transformation_cache.get(string_fc, None)
     transformation_cache.get(int_fc, None)
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       transformation_cache.get(float_fc, None)
 
   def test_dtype_should_match_with_tensor(self):
@@ -905,7 +901,7 @@ class HashedCategoricalColumnTest(test.TestCase):
     wire_tensor = sparse_tensor.SparseTensor(
         values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
     transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor})
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       transformation_cache.get(hashed_sparse, None)
 
   def test_ints_should_be_hashed(self):
@@ -1005,34 +1001,34 @@ class HashedCategoricalColumnTest(test.TestCase):
 class CrossedColumnTest(test.TestCase):
 
   def test_keys_empty(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'keys must be a list with length > 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'keys must be a list with length > 1'):
       fc.crossed_column([], 10)
 
   def test_keys_length_one(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'keys must be a list with length > 1'):
+    with self.assertRaisesRegex(ValueError,
+                                'keys must be a list with length > 1'):
       fc.crossed_column(['a'], 10)
 
   def test_key_type_unsupported(self):
-    with self.assertRaisesRegexp(ValueError, 'Unsupported key type'):
+    with self.assertRaisesRegex(ValueError, 'Unsupported key type'):
       fc.crossed_column(['a', fc.numeric_column('c')], 10)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'categorical_column_with_hash_bucket is not supported'):
       fc.crossed_column(
           ['a', fc.categorical_column_with_hash_bucket('c', 10)], 10)
 
   def test_hash_bucket_size_negative(self):
-    with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
       fc.crossed_column(['a', 'c'], -1)
 
   def test_hash_bucket_size_zero(self):
-    with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
       fc.crossed_column(['a', 'c'], 0)
 
   def test_hash_bucket_size_none(self):
-    with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'):
+    with self.assertRaisesRegex(ValueError, 'hash_bucket_size must be > 1'):
       fc.crossed_column(['a', 'c'], None)
 
   def test_name(self):
@@ -1314,7 +1310,7 @@ class CrossedColumnTest(test.TestCase):
     t = _TestColumnWithWeights()
     crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'crossed_column does not support weight_tensor.*{}'.format(t.name)):
         fc_old.linear_model({
@@ -1411,12 +1407,12 @@ class CrossedColumnTest(test.TestCase):
 class OldLinearModelTest(test.TestCase):
 
   def test_raises_if_empty_feature_columns(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'feature_columns must not be empty'):
+    with self.assertRaisesRegex(ValueError,
+                                'feature_columns must not be empty'):
       fc_old.linear_model(features={}, feature_columns=[])
 
   def test_should_be_feature_column(self):
-    with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
+    with self.assertRaisesRegex(ValueError, 'must be a _FeatureColumn'):
       fc_old.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
 
   def test_should_be_dense_or_categorical_column(self):
@@ -1446,19 +1442,19 @@ class OldLinearModelTest(test.TestCase):
       def _parse_example_spec(self):
         pass
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
       fc_old.linear_model(
           features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
 
   def test_does_not_support_dict_columns(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Expected feature_columns to be iterable, found dict.'):
       fc_old.linear_model(
           features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
 
   def test_raises_if_duplicate_name(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Duplicate feature column name found for columns'):
       fc_old.linear_model(
           features={'a': [[0]]},
@@ -1725,7 +1721,7 @@ class OldLinearModelTest(test.TestCase):
     price = fc.numeric_column('price', shape=2)
     with ops.Graph().as_default():
       features = {'price': [[1.], [5.]]}
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
         fc_old.linear_model(features, [price])
@@ -1948,7 +1944,7 @@ class OldLinearModelTest(test.TestCase):
           'price1': [[1.], [5.], [7.]],  # batchsize = 3
           'price2': [[3.], [4.]]  # batchsize = 2
       }
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
       fc_old.linear_model(features, [price1, price2])
@@ -1963,7 +1959,7 @@ class OldLinearModelTest(test.TestCase):
           'price2': [[3.], [4.]],  # batchsize = 2
           'price3': [[3.], [4.], [5.]]  # batchsize = 3
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         fc_old.linear_model(features, [price1, price2, price3])
@@ -1978,8 +1974,8 @@ class OldLinearModelTest(test.TestCase):
       }
       predictions = fc_old.linear_model(features, [price1, price2])
       with _initialized_session() as sess:
-        with self.assertRaisesRegexp(errors.OpError,
-                                     'must have the same size and shape'):
+        with self.assertRaisesRegex(errors.OpError,
+                                    'must have the same size and shape'):
           sess.run(
               predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
 
@@ -2099,7 +2095,7 @@ class OldLinearModelTest(test.TestCase):
     self.assertEqual(0, features['price'].shape.ndims)
 
     # Static rank 0 should fail
-    with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+    with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
       fc_old.linear_model(features, [price])
 
     # This test needs to construct graph placeholders
@@ -2288,8 +2284,8 @@ class OldLinearModelTest(test.TestCase):
                   values=(1, 2, 1),
                   dense_shape=(2, 2)),
       }
-      with self.assertRaisesRegexp(ValueError,
-                                   'SharedEmbeddingColumns are not supported'):
+      with self.assertRaisesRegex(ValueError,
+                                  'SharedEmbeddingColumns are not supported'):
         fc_old.linear_model(features, all_cols)
 
 
@@ -2397,12 +2393,12 @@ class InputLayerTest(test.TestCase):
 class FunctionalInputLayerTest(test.TestCase):
 
   def test_raises_if_empty_feature_columns(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'feature_columns must not be empty'):
+    with self.assertRaisesRegex(ValueError,
+                                'feature_columns must not be empty'):
       fc_old.input_layer(features={}, feature_columns=[])
 
   def test_should_be_dense_column(self):
-    with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'):
+    with self.assertRaisesRegex(ValueError, 'must be a _DenseColumn'):
       fc_old.input_layer(
           features={'a': [[0]]},
           feature_columns=[
@@ -2410,7 +2406,7 @@ class FunctionalInputLayerTest(test.TestCase):
           ])
 
   def test_does_not_support_dict_columns(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Expected feature_columns to be iterable, found dict.'):
       fc_old.input_layer(
           features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
@@ -2437,7 +2433,7 @@ class FunctionalInputLayerTest(test.TestCase):
       self.assertAllClose([[0., 1.]], self.evaluate(net))
 
   def test_raises_if_duplicate_name(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Duplicate feature column name found for columns'):
       fc_old.input_layer(
           features={'a': [[0]]},
@@ -2470,7 +2466,7 @@ class FunctionalInputLayerTest(test.TestCase):
     price = fc.numeric_column('price', shape=2)
     with ops.Graph().as_default():
       features = {'price': [[1.], [5.]]}
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
         fc_old.input_layer(features, [price])
@@ -2642,7 +2638,7 @@ class FunctionalInputLayerTest(test.TestCase):
               sparse_tensor.SparseTensor(
                   indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
       }
-      with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'):
+      with self.assertRaisesRegex(Exception, 'must be a _DenseColumn'):
         fc_old.input_layer(features, [animal])
 
   def test_static_batch_size_mismatch(self):
@@ -2653,7 +2649,7 @@ class FunctionalInputLayerTest(test.TestCase):
           'price1': [[1.], [5.], [7.]],  # batchsize = 3
           'price2': [[3.], [4.]]  # batchsize = 2
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         fc_old.input_layer(features, [price1, price2])
@@ -2668,7 +2664,7 @@ class FunctionalInputLayerTest(test.TestCase):
           'price2': [[3.], [4.]],  # batchsize = 2
           'price3': [[3.], [4.], [5.]]  # batchsize = 3
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         fc_old.input_layer(features, [price1, price2, price3])
@@ -2683,8 +2679,8 @@ class FunctionalInputLayerTest(test.TestCase):
       }
       net = fc_old.input_layer(features, [price1, price2])
       with _initialized_session() as sess:
-        with self.assertRaisesRegexp(errors.OpError,
-                                     'Dimensions of inputs should match'):
+        with self.assertRaisesRegex(errors.OpError,
+                                    'Dimensions of inputs should match'):
           sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
 
   def test_runtime_batch_size_matches(self):
@@ -2855,7 +2851,7 @@ class FunctionalInputLayerTest(test.TestCase):
     self.assertEqual(0, features['price'].shape.ndims)
 
     # Static rank 0 should fail
-    with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+    with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
       fc_old.input_layer(features, [price])
 
     # This test needs to construct graph placeholders
@@ -2908,7 +2904,7 @@ class MakeParseExampleSpecTest(test.TestCase):
     key1 = 'key1'
     parse_spec1 = parsing_ops.FixedLenFeature(
         shape=(2,), dtype=dtypes.float32, default_value=0.)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'All feature_columns must be FeatureColumn instances.*invalid_column'):
       fc.make_parse_example_spec_v2((self._TestFeatureColumn({
@@ -2942,7 +2938,7 @@ class MakeParseExampleSpecTest(test.TestCase):
     parse_spec1 = parsing_ops.FixedLenFeature(
         shape=(2,), dtype=dtypes.float32, default_value=0.)
     parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'feature_columns contain different parse_spec for key key1'):
       fc.make_parse_example_spec_v2((self._TestFeatureColumn({
@@ -3038,7 +3034,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
     self.assertTrue(column._is_v2_column)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc.categorical_column_with_vocabulary_file(
           key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
 
@@ -3069,12 +3065,12 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
       }, column.parse_example_spec)
 
   def test_vocabulary_file_none(self):
-    with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
+    with self.assertRaisesRegex(ValueError, 'Missing vocabulary_file'):
       fc.categorical_column_with_vocabulary_file(
           key='aaa', vocabulary_file=None, vocabulary_size=3)
 
   def test_vocabulary_file_empty_string(self):
-    with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
+    with self.assertRaisesRegex(ValueError, 'Missing vocabulary_file'):
       fc.categorical_column_with_vocabulary_file(
           key='aaa', vocabulary_file='', vocabulary_size=3)
 
@@ -3085,7 +3081,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('marlo', 'skywalker', 'omar'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'):
+    with self.assertRaisesRegex(errors.OpError, 'file_does_not_exist'):
       column.get_sparse_tensors(
           fc.FeatureTransformationCache({
               'aaa': inputs
@@ -3093,12 +3089,12 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
       self.evaluate(lookup_ops.tables_initializer())
 
   def test_invalid_vocabulary_size(self):
-    with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
+    with self.assertRaisesRegex(ValueError, 'Invalid vocabulary_size'):
       fc.categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file=self._wire_vocabulary_file_name,
           vocabulary_size=-1)
-    with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
+    with self.assertRaisesRegex(ValueError, 'Invalid vocabulary_size'):
       fc.categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file=self._wire_vocabulary_file_name,
@@ -3113,7 +3109,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('marlo', 'skywalker', 'omar'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'):
+    with self.assertRaisesRegex(errors.OpError, 'Invalid vocab_size'):
       column.get_sparse_tensors(
           fc.FeatureTransformationCache({
               'aaa': inputs
@@ -3121,7 +3117,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
       self.evaluate(lookup_ops.tables_initializer())
 
   def test_invalid_num_oov_buckets(self):
-    with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
+    with self.assertRaisesRegex(ValueError, 'Invalid num_oov_buckets'):
       fc.categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file='path',
@@ -3129,7 +3125,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
           num_oov_buckets=-1)
 
   def test_invalid_dtype(self):
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       fc.categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file='path',
@@ -3137,8 +3133,8 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
           dtype=dtypes.float64)
 
   def test_invalid_buckets_and_default_value(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'both num_oov_buckets and default_value'):
+    with self.assertRaisesRegex(ValueError,
+                                'both num_oov_buckets and default_value'):
       fc.categorical_column_with_vocabulary_file(
           key='aaa',
           vocabulary_file=self._wire_vocabulary_file_name,
@@ -3156,7 +3152,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=(12, 24, 36),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column.get_sparse_tensors(
           fc.FeatureTransformationCache({
               'aaa': inputs
@@ -3172,7 +3168,7 @@ class VocabularyFileCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column.get_sparse_tensors(
           fc.FeatureTransformationCache({
               'aaa': inputs
@@ -3523,7 +3519,7 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
     self.assertTrue(column._is_v2_column)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc.categorical_column_with_vocabulary_list(
           key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo'))
 
@@ -3559,57 +3555,57 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
       }, column.parse_example_spec)
 
   def test_invalid_dtype(self):
-    with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa',
           vocabulary_list=('omar', 'stringer', 'marlo'),
           dtype=dtypes.float32)
 
   def test_invalid_mapping_dtype(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'vocabulary dtype must be string or integer'):
+    with self.assertRaisesRegex(ValueError,
+                                r'vocabulary dtype must be string or integer'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12., 24., 36.))
 
   def test_mismatched_int_dtype(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'dtype.*and vocabulary dtype.*do not match'):
+    with self.assertRaisesRegex(ValueError,
+                                r'dtype.*and vocabulary dtype.*do not match'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa',
           vocabulary_list=('omar', 'stringer', 'marlo'),
           dtype=dtypes.int32)
 
   def test_mismatched_string_dtype(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'dtype.*and vocabulary dtype.*do not match'):
+    with self.assertRaisesRegex(ValueError,
+                                r'dtype.*and vocabulary dtype.*do not match'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
 
   def test_none_mapping(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'vocabulary_list.*must be non-empty'):
+    with self.assertRaisesRegex(ValueError,
+                                r'vocabulary_list.*must be non-empty'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=None)
 
   def test_empty_mapping(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'vocabulary_list.*must be non-empty'):
+    with self.assertRaisesRegex(ValueError,
+                                r'vocabulary_list.*must be non-empty'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=tuple([]))
 
   def test_duplicate_mapping(self):
-    with self.assertRaisesRegexp(ValueError, 'Duplicate keys'):
+    with self.assertRaisesRegex(ValueError, 'Duplicate keys'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12, 24, 12))
 
   def test_invalid_num_oov_buckets(self):
-    with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
+    with self.assertRaisesRegex(ValueError, 'Invalid num_oov_buckets'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=-1)
 
   def test_invalid_buckets_and_default_value(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'both num_oov_buckets and default_value'):
+    with self.assertRaisesRegex(ValueError,
+                                'both num_oov_buckets and default_value'):
       fc.categorical_column_with_vocabulary_list(
           key='aaa',
           vocabulary_list=(12, 24, 36),
@@ -3623,7 +3619,7 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=(12, 24, 36),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column.get_sparse_tensors(
           fc.FeatureTransformationCache({
               'aaa': inputs
@@ -3636,7 +3632,7 @@ class VocabularyListCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
+    with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
       column.get_sparse_tensors(
           fc.FeatureTransformationCache({
               'aaa': inputs
@@ -3942,7 +3938,7 @@ class IdentityCategoricalColumnTest(test.TestCase):
     self.assertTrue(column._is_v2_column)
 
   def test_key_should_be_string(self):
-    with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
+    with self.assertRaisesRegex(ValueError, 'key must be a string.'):
       fc.categorical_column_with_identity(key=('aaa',), num_buckets=3)
 
   def test_deep_copy(self):
@@ -3955,20 +3951,20 @@ class IdentityCategoricalColumnTest(test.TestCase):
       }, column.parse_example_spec)
 
   def test_invalid_num_buckets_zero(self):
-    with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'):
+    with self.assertRaisesRegex(ValueError, 'num_buckets 0 < 1'):
       fc.categorical_column_with_identity(key='aaa', num_buckets=0)
 
   def test_invalid_num_buckets_negative(self):
-    with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'):
+    with self.assertRaisesRegex(ValueError, 'num_buckets -1 < 1'):
       fc.categorical_column_with_identity(key='aaa', num_buckets=-1)
 
   def test_invalid_default_value_too_small(self):
-    with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'):
+    with self.assertRaisesRegex(ValueError, 'default_value -1 not in range'):
       fc.categorical_column_with_identity(
           key='aaa', num_buckets=3, default_value=-1)
 
   def test_invalid_default_value_too_big(self):
-    with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'):
+    with self.assertRaisesRegex(ValueError, 'default_value 3 not in range'):
       fc.categorical_column_with_identity(
           key='aaa', num_buckets=3, default_value=3)
 
@@ -3978,7 +3974,7 @@ class IdentityCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'):
+    with self.assertRaisesRegex(ValueError, 'Invalid input, not integer'):
       column.get_sparse_tensors(
           fc.FeatureTransformationCache({
               'aaa': inputs
@@ -4134,8 +4130,8 @@ class IdentityCategoricalColumnTest(test.TestCase):
     state_manager = _TestStateManager()
     embedding_column.create_state(state_manager)
 
-    with self.assertRaisesRegexp(errors.OpError,
-                                 r'indices\[0\] = 2 is not in \[0, 2\)'):
+    with self.assertRaisesRegex(errors.OpError,
+                                r'indices\[0\] = 2 is not in \[0, 2\)'):
       # Provide sparse input and get dense result.
       embedding_lookup = embedding_column.get_dense_tensor(
           fc.FeatureTransformationCache({'aaa': sparse_input}), state_manager)
@@ -4337,7 +4333,7 @@ class IndicatorColumnTest(test.TestCase):
     self.assertFalse(indicator_b._is_v2_column)
 
   def test_not_categorical_input(self):
-    with self.assertRaisesRegexp(ValueError, 'Unsupported input type.'):
+    with self.assertRaisesRegex(ValueError, 'Unsupported input type.'):
       fc.indicator_column('aaa')
 
   def test_1D_shape_succeeds(self):
@@ -4732,7 +4728,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
   def test_invalid_initializer(self):
     categorical_column = fc.categorical_column_with_identity(
         key='aaa', num_buckets=3)
-    with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
+    with self.assertRaisesRegex(ValueError, 'initializer must be callable'):
       fc.embedding_column(categorical_column, dimension=2, initializer='not_fn')
 
   def test_parse_example(self):
@@ -5573,7 +5569,7 @@ class SharedEmbeddingColumnTest(test.TestCase, parameterized.TestCase):
           key='aaa', num_buckets=3)
       categorical_column_b = fc.categorical_column_with_identity(
           key='bbb', num_buckets=3)
-      with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
+      with self.assertRaisesRegex(ValueError, 'initializer must be callable'):
         fc.shared_embedding_columns_v2(
             [categorical_column_a, categorical_column_b],
             dimension=2,
@@ -5588,7 +5584,7 @@ class SharedEmbeddingColumnTest(test.TestCase, parameterized.TestCase):
           key='bbb', num_buckets=3)
       categorical_column_c = fc.categorical_column_with_hash_bucket(
           key='ccc', hash_bucket_size=3)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'all categorical_columns must have the same type.*'
           'IdentityCategoricalColumn.*HashedCategoricalColumn'):
         fc.shared_embedding_columns_v2(
@@ -6039,7 +6035,7 @@ class WeightedCategoricalColumnTest(test.TestCase):
       }, column.parse_example_spec)
 
   def test_invalid_dtype_none(self):
-    with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
+    with self.assertRaisesRegex(ValueError, 'is not convertible to float'):
       fc.weighted_categorical_column(
           categorical_column=fc.categorical_column_with_identity(
               key='ids', num_buckets=3),
@@ -6047,7 +6043,7 @@ class WeightedCategoricalColumnTest(test.TestCase):
           dtype=None)
 
   def test_invalid_dtype_string(self):
-    with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
+    with self.assertRaisesRegex(ValueError, 'is not convertible to float'):
       fc.weighted_categorical_column(
           categorical_column=fc.categorical_column_with_identity(
               key='ids', num_buckets=3),
@@ -6063,14 +6059,14 @@ class WeightedCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError, 'Bad dtype'):
+    with self.assertRaisesRegex(ValueError, 'Bad dtype'):
       fc._transform_features_v2({
           'ids': strings,
           'values': strings
       }, (column,), None)
 
   def test_column_name_collision(self):
-    with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'):
+    with self.assertRaisesRegex(ValueError, r'Parse config.*already exists'):
       fc.weighted_categorical_column(
           categorical_column=fc.categorical_column_with_identity(
               key='aaa', num_buckets=3),
@@ -6085,8 +6081,8 @@ class WeightedCategoricalColumnTest(test.TestCase):
         indices=((0, 0), (1, 0), (1, 1)),
         values=('omar', 'stringer', 'marlo'),
         dense_shape=(2, 2))
-    with self.assertRaisesRegexp(ValueError,
-                                 'values is not in features dictionary'):
+    with self.assertRaisesRegex(ValueError,
+                                'values is not in features dictionary'):
       fc._transform_features_v2({'ids': inputs}, (column,), None)
 
   def test_parse_example(self):
@@ -6252,8 +6248,8 @@ class WeightedCategoricalColumnTest(test.TestCase):
             key='ids', num_buckets=3),
         weight_feature_key='values')
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError,
-                                   r'Dimensions.*are not compatible'):
+      with self.assertRaisesRegex(ValueError,
+                                  r'Dimensions.*are not compatible'):
         fc_old.linear_model({
             'ids':
                 sparse_tensor.SparseTensorValue(
@@ -6288,7 +6284,7 @@ class WeightedCategoricalColumnTest(test.TestCase):
       config.graph_options.rewrite_options.constant_folding = (
           rewriter_config_pb2.RewriterConfig.OFF)
       with _initialized_session(config):
-        with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
+        with self.assertRaisesRegex(errors.OpError, 'Incompatible shapes'):
           self.evaluate(predictions)
 
   def test_old_linear_model_mismatched_dense_shape(self):
diff --git a/tensorflow/python/feature_column/sequence_feature_column_test.py b/tensorflow/python/feature_column/sequence_feature_column_test.py
index e0cd73d17e4..da6d1dee4ba 100644
--- a/tensorflow/python/feature_column/sequence_feature_column_test.py
+++ b/tensorflow/python/feature_column/sequence_feature_column_test.py
@@ -76,7 +76,7 @@ class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
     context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
     seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
     context_input = math_ops.cast(context_input, dtype=dtypes.float32)
-    with self.assertRaisesRegexp(ValueError, 'sequence_input must have rank 3'):
+    with self.assertRaisesRegex(ValueError, 'sequence_input must have rank 3'):
       sfc.concatenate_context_input(context_input, seq_input)
 
   @parameterized.named_parameters(
@@ -90,23 +90,23 @@ class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
     seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
     seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
     context_input = math_ops.cast(context_input, dtype=dtypes.float32)
-    with self.assertRaisesRegexp(ValueError, 'context_input must have rank 2'):
+    with self.assertRaisesRegex(ValueError, 'context_input must have rank 2'):
       sfc.concatenate_context_input(context_input, seq_input)
 
   def test_integer_seq_input_throws_error(self):
     seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
     context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
     context_input = math_ops.cast(context_input, dtype=dtypes.float32)
-    with self.assertRaisesRegexp(
-        TypeError, 'sequence_input must have dtype float32'):
+    with self.assertRaisesRegex(TypeError,
+                                'sequence_input must have dtype float32'):
       sfc.concatenate_context_input(context_input, seq_input)
 
   def test_integer_context_input_throws_error(self):
     seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
     context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
     seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
-    with self.assertRaisesRegexp(
-        TypeError, 'context_input must have dtype float32'):
+    with self.assertRaisesRegex(TypeError,
+                                'context_input must have dtype float32'):
       sfc.concatenate_context_input(context_input, seq_input)
 
 
@@ -811,20 +811,20 @@ class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
     self.assertEqual((1, 2), a.shape)
 
   def test_shape_must_be_positive_integer(self):
-    with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
+    with self.assertRaisesRegex(TypeError, 'shape dimensions must be integer'):
       sfc.sequence_numeric_column('aaa', shape=[1.0])
 
-    with self.assertRaisesRegexp(
-        ValueError, 'shape dimensions must be greater than 0'):
+    with self.assertRaisesRegex(ValueError,
+                                'shape dimensions must be greater than 0'):
       sfc.sequence_numeric_column('aaa', shape=[0])
 
   def test_dtype_is_convertible_to_float(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'dtype must be convertible to float'):
+    with self.assertRaisesRegex(ValueError,
+                                'dtype must be convertible to float'):
       sfc.sequence_numeric_column('aaa', dtype=dtypes.string)
 
   def test_normalizer_fn_must_be_callable(self):
-    with self.assertRaisesRegexp(TypeError, 'must be a callable'):
+    with self.assertRaisesRegex(TypeError, 'must be a callable'):
       sfc.sequence_numeric_column('aaa', normalizer_fn='NotACallable')
 
   @parameterized.named_parameters(
diff --git a/tensorflow/python/feature_column/serialization_test.py b/tensorflow/python/feature_column/serialization_test.py
index 69b954022af..a170afff1c2 100644
--- a/tensorflow/python/feature_column/serialization_test.py
+++ b/tensorflow/python/feature_column/serialization_test.py
@@ -32,11 +32,11 @@ class FeatureColumnSerializationTest(test.TestCase):
     class NotAFeatureColumn(object):
       pass
 
-    with self.assertRaisesRegexp(ValueError, 'is not a FeatureColumn'):
+    with self.assertRaisesRegex(ValueError, 'is not a FeatureColumn'):
       serialization.serialize_feature_column(NotAFeatureColumn())
 
   def test_deserialize_invalid_config(self):
-    with self.assertRaisesRegexp(ValueError, 'Improper config format: {}'):
+    with self.assertRaisesRegex(ValueError, 'Improper config format: {}'):
       serialization.deserialize_feature_column({})
 
   def test_deserialize_config_missing_key(self):
@@ -52,12 +52,12 @@ class FeatureColumnSerializationTest(test.TestCase):
         'class_name': 'NumericColumn'
     }
 
-    with self.assertRaisesRegexp(
-        ValueError, 'Invalid config:.*expected keys.*dtype'):
+    with self.assertRaisesRegex(ValueError,
+                                'Invalid config:.*expected keys.*dtype'):
       serialization.deserialize_feature_column(config_missing_key)
 
   def test_deserialize_invalid_class(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Unknown feature_column_v2: NotExistingFeatureColumnClass'):
       serialization.deserialize_feature_column({
           'class_name': 'NotExistingFeatureColumnClass',
diff --git a/tensorflow/python/framework/auto_control_deps_test.py b/tensorflow/python/framework/auto_control_deps_test.py
index d0e08e676d5..61c14ce74fe 100644
--- a/tensorflow/python/framework/auto_control_deps_test.py
+++ b/tensorflow/python/framework/auto_control_deps_test.py
@@ -761,8 +761,8 @@ class AutomaticControlDependenciesTest(test.TestCase):
     v = resource_variable_ops.ResourceVariable(1.0)
     grad = backprop.implicit_grad(lambda v: v**2)(v)
 
-    with self.assertRaisesRegexp(TypeError,
-                                 ".*must return zero or more Tensors.*"):
+    with self.assertRaisesRegex(TypeError,
+                                ".*must return zero or more Tensors.*"):
       # TODO(akshayka): We might want to allow defun-ing Python functions
       # that return operations (and just execute the op instead of running it).
       optimizer.apply_gradients(grad)
diff --git a/tensorflow/python/framework/config_test.py b/tensorflow/python/framework/config_test.py
index 65845535ea7..345e7f0d9be 100644
--- a/tensorflow/python/framework/config_test.py
+++ b/tensorflow/python/framework/config_test.py
@@ -83,8 +83,8 @@ class ConfigTest(test.TestCase, parameterized.TestCase):
     self.assertEqual(config.get_device_policy(), 'silent_for_int32')
     self.assertEqual(context.DEVICE_PLACEMENT_SILENT_FOR_INT32,
                      context.context().device_policy)
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'Tensors on conflicting devices'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'Tensors on conflicting devices'):
       copy_tensor(dtypes.float32)
     copy_tensor()
 
@@ -98,8 +98,8 @@ class ConfigTest(test.TestCase, parameterized.TestCase):
     self.assertEqual(config.get_device_policy(), 'explicit')
     self.assertEqual(context.DEVICE_PLACEMENT_EXPLICIT,
                      context.context().device_policy)
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'Tensors on conflicting devices'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'Tensors on conflicting devices'):
       copy_tensor()
 
     config.set_device_policy(None)
@@ -409,7 +409,7 @@ class DeviceTest(test.TestCase):
         self.evaluate(d)
 
     # Modifying the CPU configuration is not supported
-    with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
+    with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
       config.set_logical_device_configuration(cpus[0], [
           context.LogicalDeviceConfiguration(),
           context.LogicalDeviceConfiguration(),
@@ -445,20 +445,20 @@ class DeviceTest(test.TestCase):
     self.assertEqual(len(config.get_visible_devices('GPU')), 0)
     self.assertEqual(len(config.list_logical_devices('XLA_GPU')), 0)
 
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'Could not satisfy'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'Could not satisfy'):
       with ops.device('/device:GPU:0'):
         a = array_ops.identity(1.0)
         self.evaluate(a)
 
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'Could not satisfy'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'Could not satisfy'):
       with ops.device('/device:XLA_GPU:0'):
         a = array_ops.identity(1.0)
         self.evaluate(a)
 
     # Modifying the visible devices is not supported
-    with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
+    with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
       config.set_visible_devices(gpus)
 
     # Setting the same visible devices is fine
@@ -477,7 +477,7 @@ class DeviceTest(test.TestCase):
         a = constant_op.constant(1.0)
         self.evaluate(a)
 
-    with self.assertRaisesRegexp(RuntimeError, 'unknown device'):
+    with self.assertRaisesRegex(RuntimeError, 'unknown device'):
       with ops.device('/device:GPU:' + str(len(gpus))):
         a = constant_op.constant(1.0)
         self.evaluate(a)
@@ -515,12 +515,12 @@ class DeviceTest(test.TestCase):
   @reset_eager
   def testDeviceDetailsErrors(self):
     logical_devices = config.list_logical_devices()
-    with self.assertRaisesRegexp(ValueError,
-                                 'must be a tf.config.PhysicalDevice'):
+    with self.assertRaisesRegex(ValueError,
+                                'must be a tf.config.PhysicalDevice'):
       config.get_device_details(logical_devices[0])
 
     phys_dev = context.PhysicalDevice('/physical_device:CPU:100', 'CPU')
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'The PhysicalDevice must be one obtained from '
         'calling `tf.config.list_physical_devices`'):
       config.get_device_details(phys_dev)
@@ -546,20 +546,20 @@ class DeviceTest(test.TestCase):
         a = array_ops.identity(1.0)
         self.evaluate(a)
 
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 'Could not satisfy'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                'Could not satisfy'):
       with ops.device('/device:GPU:' + str(len(logical_gpus))):
         a = array_ops.identity(1.0)
         self.evaluate(a)
 
     # Modifying the GPU configuration is not supported
-    with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
+    with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
       config.set_logical_device_configuration(gpus[-1], [
           context.LogicalDeviceConfiguration(memory_limit=20),
           context.LogicalDeviceConfiguration(memory_limit=20)
       ])
 
-    with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
+    with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
       config.set_logical_device_configuration(gpus[-1], [
           context.LogicalDeviceConfiguration(memory_limit=10),
           context.LogicalDeviceConfiguration(memory_limit=10),
@@ -589,7 +589,7 @@ class DeviceTest(test.TestCase):
     self.assertTrue(len(logical_gpus), len(gpus))
 
     # Modifying the GPU configuration is not supported
-    with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
+    with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
       for gpu in gpus:
         config.set_memory_growth(gpu, False)
 
@@ -606,7 +606,7 @@ class DeviceTest(test.TestCase):
     if len(gpus) > 1:
       # Assert if other GPUs were not configured
       config.set_memory_growth(gpus[0], True)
-      with self.assertRaisesRegexp(ValueError, 'cannot differ'):
+      with self.assertRaisesRegex(ValueError, 'cannot differ'):
         c = context.context().config
 
       # If we limit visibility to GPU 0, growth is fine
@@ -621,7 +621,7 @@ class DeviceTest(test.TestCase):
 
       # Growth now fails because all the GPUs are visible and not the same
       config.set_visible_devices(gpus, 'GPU')
-      with self.assertRaisesRegexp(ValueError, 'cannot differ'):
+      with self.assertRaisesRegex(ValueError, 'cannot differ'):
         c = context.context().config
 
     for gpu in gpus:
@@ -630,7 +630,7 @@ class DeviceTest(test.TestCase):
     c = context.context().config
     self.assertTrue(c.gpu_options.allow_growth)
 
-    with self.assertRaisesRegexp(ValueError, 'memory limit'):
+    with self.assertRaisesRegex(ValueError, 'memory limit'):
       config.set_logical_device_configuration(gpus[-1], [
           context.LogicalDeviceConfiguration(),
           context.LogicalDeviceConfiguration()
@@ -645,7 +645,7 @@ class DeviceTest(test.TestCase):
     c = context.context().config
     self.assertFalse(c.gpu_options.allow_growth)
 
-    with self.assertRaisesRegexp(ValueError, 'virtual devices'):
+    with self.assertRaisesRegex(ValueError, 'virtual devices'):
       config.set_memory_growth(gpus[-1], False)
 
   @test_util.run_gpu_only
@@ -719,7 +719,7 @@ class DeviceTest(test.TestCase):
     # Handle invalid visible device list
     context.context()._config = config_pb2.ConfigProto(
         gpu_options=config_pb2.GPUOptions(visible_device_list=str(gpu_count)))
-    with self.assertRaisesRegexp(ValueError, 'Invalid visible device index'):
+    with self.assertRaisesRegex(ValueError, 'Invalid visible device index'):
       gpus = config.list_physical_devices('GPU')
       new_config = context.context().config
     context.context()._physical_devices = None
diff --git a/tensorflow/python/framework/device_spec_test.py b/tensorflow/python/framework/device_spec_test.py
index 850b9a561ae..7ccb96fbb50 100644
--- a/tensorflow/python/framework/device_spec_test.py
+++ b/tensorflow/python/framework/device_spec_test.py
@@ -149,7 +149,7 @@ class DeviceSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     d.parse_from_string("/job:muu/device:GPU:2")
     self.assertEqual("/job:muu/device:GPU:2", d.to_string())
 
-    with self.assertRaisesRegexp(ValueError, "Cannot specify multiple"):
+    with self.assertRaisesRegex(ValueError, "Cannot specify multiple"):
       d.parse_from_string("/job:muu/device:GPU:2/cpu:0")
 
   @parameterized.named_parameters(*TEST_V1_AND_V2)
@@ -173,7 +173,7 @@ class DeviceSpecTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     self.assertAllEqual(["muu", "GPU", 2],
                         [d.job, d.device_type, d.device_index])
 
-    with self.assertRaisesRegexp(ValueError, "Cannot specify multiple"):
+    with self.assertRaisesRegex(ValueError, "Cannot specify multiple"):
       d.parse_from_string("/job:muu/device:GPU:2/cpu:0")
 
   def test_merge_legacy(self):
diff --git a/tensorflow/python/framework/device_test.py b/tensorflow/python/framework/device_test.py
index 2b34c1ec7fd..b8d57e6a072 100644
--- a/tensorflow/python/framework/device_test.py
+++ b/tensorflow/python/framework/device_test.py
@@ -80,16 +80,16 @@ class DeviceTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testCheckValid(self):
     device.check_valid("/job:foo/replica:0")
 
-    with self.assertRaisesRegexp(ValueError, "invalid literal for int"):
+    with self.assertRaisesRegex(ValueError, "invalid literal for int"):
       device.check_valid("/job:j/replica:foo")
 
-    with self.assertRaisesRegexp(ValueError, "invalid literal for int"):
+    with self.assertRaisesRegex(ValueError, "invalid literal for int"):
       device.check_valid("/job:j/task:bar")
 
-    with self.assertRaisesRegexp(ValueError, "Unknown attribute: 'bar'"):
+    with self.assertRaisesRegex(ValueError, "Unknown attribute: 'bar'"):
       device.check_valid("/bar:muu/baz:2")
 
-    with self.assertRaisesRegexp(ValueError, "Cannot specify multiple device"):
+    with self.assertRaisesRegex(ValueError, "Cannot specify multiple device"):
       device.check_valid("/cpu:0/device:GPU:2")
 
 
diff --git a/tensorflow/python/framework/dtypes_test.py b/tensorflow/python/framework/dtypes_test.py
index 1b7e02b6179..0673ee41f0e 100644
--- a/tensorflow/python/framework/dtypes_test.py
+++ b/tensorflow/python/framework/dtypes_test.py
@@ -261,42 +261,42 @@ class TypesTest(test_util.TensorFlowTestCase):
 
       # check some values that are known
       if numpy_dtype == np.bool_:
-        self.assertEquals(dtype.min, 0)
-        self.assertEquals(dtype.max, 1)
+        self.assertEqual(dtype.min, 0)
+        self.assertEqual(dtype.max, 1)
       if numpy_dtype == np.int8:
-        self.assertEquals(dtype.min, -128)
-        self.assertEquals(dtype.max, 127)
+        self.assertEqual(dtype.min, -128)
+        self.assertEqual(dtype.max, 127)
       if numpy_dtype == np.int16:
-        self.assertEquals(dtype.min, -32768)
-        self.assertEquals(dtype.max, 32767)
+        self.assertEqual(dtype.min, -32768)
+        self.assertEqual(dtype.max, 32767)
       if numpy_dtype == np.int32:
-        self.assertEquals(dtype.min, -2147483648)
-        self.assertEquals(dtype.max, 2147483647)
+        self.assertEqual(dtype.min, -2147483648)
+        self.assertEqual(dtype.max, 2147483647)
       if numpy_dtype == np.int64:
-        self.assertEquals(dtype.min, -9223372036854775808)
-        self.assertEquals(dtype.max, 9223372036854775807)
+        self.assertEqual(dtype.min, -9223372036854775808)
+        self.assertEqual(dtype.max, 9223372036854775807)
       if numpy_dtype == np.uint8:
-        self.assertEquals(dtype.min, 0)
-        self.assertEquals(dtype.max, 255)
+        self.assertEqual(dtype.min, 0)
+        self.assertEqual(dtype.max, 255)
       if numpy_dtype == np.uint16:
         if dtype == dtypes.uint16:
-          self.assertEquals(dtype.min, 0)
-          self.assertEquals(dtype.max, 65535)
+          self.assertEqual(dtype.min, 0)
+          self.assertEqual(dtype.max, 65535)
         elif dtype == dtypes.bfloat16:
-          self.assertEquals(dtype.min, 0)
-          self.assertEquals(dtype.max, 4294967295)
+          self.assertEqual(dtype.min, 0)
+          self.assertEqual(dtype.max, 4294967295)
       if numpy_dtype == np.uint32:
-        self.assertEquals(dtype.min, 0)
-        self.assertEquals(dtype.max, 4294967295)
+        self.assertEqual(dtype.min, 0)
+        self.assertEqual(dtype.max, 4294967295)
       if numpy_dtype == np.uint64:
-        self.assertEquals(dtype.min, 0)
-        self.assertEquals(dtype.max, 18446744073709551615)
+        self.assertEqual(dtype.min, 0)
+        self.assertEqual(dtype.max, 18446744073709551615)
       if numpy_dtype in (np.float16, np.float32, np.float64):
-        self.assertEquals(dtype.min, np.finfo(numpy_dtype).min)
-        self.assertEquals(dtype.max, np.finfo(numpy_dtype).max)
+        self.assertEqual(dtype.min, np.finfo(numpy_dtype).min)
+        self.assertEqual(dtype.max, np.finfo(numpy_dtype).max)
       if numpy_dtype == dtypes.bfloat16.as_numpy_dtype:
-        self.assertEquals(dtype.min, float.fromhex("-0x1.FEp127"))
-        self.assertEquals(dtype.max, float.fromhex("0x1.FEp127"))
+        self.assertEqual(dtype.min, float.fromhex("-0x1.FEp127"))
+        self.assertEqual(dtype.max, float.fromhex("0x1.FEp127"))
 
   def testRepr(self):
     self.skipTest("b/142725777")
@@ -304,11 +304,11 @@ class TypesTest(test_util.TensorFlowTestCase):
       if enum > 100:
         continue
       dtype = dtypes.DType(enum)
-      self.assertEquals(repr(dtype), "tf." + name)
+      self.assertEqual(repr(dtype), "tf." + name)
       import tensorflow as tf
       dtype2 = eval(repr(dtype))
-      self.assertEquals(type(dtype2), dtypes.DType)
-      self.assertEquals(dtype, dtype2)
+      self.assertEqual(type(dtype2), dtypes.DType)
+      self.assertEqual(dtype, dtype2)
 
   def testEqWithNonTFTypes(self):
     self.assertNotEqual(dtypes.int32, int)
diff --git a/tensorflow/python/framework/error_interpolation_test.py b/tensorflow/python/framework/error_interpolation_test.py
index 4e6027373cb..8e2e9b983c8 100644
--- a/tensorflow/python/framework/error_interpolation_test.py
+++ b/tensorflow/python/framework/error_interpolation_test.py
@@ -240,7 +240,7 @@ class InterpolateFilenamesAndLineNumbersTest(test.TestCase):
     two_tags_no_seps = "{{node One}}{{node Three}}"
     interpolated_string = error_interpolation.interpolate(
         two_tags_no_seps, self.graph)
-    self.assertRegexpMatches(
+    self.assertRegex(
         interpolated_string, r"error_interpolation_test\.py:[0-9]+."
         r"*error_interpolation_test\.py:[0-9]+")
 
@@ -250,13 +250,13 @@ class InterpolateFilenamesAndLineNumbersTest(test.TestCase):
         two_tags_with_seps, self.graph)
     expected_regex = (r"^;;;.*error_interpolation_test\.py:[0-9]+\) "
                       r",,,.*error_interpolation_test\.py:[0-9]+\) ;;;$")
-    self.assertRegexpMatches(interpolated_string, expected_regex)
+    self.assertRegex(interpolated_string, expected_regex)
 
   def testNewLine(self):
     newline = "\n\n{{node One}}"
     interpolated_string = error_interpolation.interpolate(newline, self.graph)
-    self.assertRegexpMatches(interpolated_string,
-                             r"error_interpolation_test\.py:[0-9]+.*")
+    self.assertRegex(interpolated_string,
+                     r"error_interpolation_test\.py:[0-9]+.*")
 
 
 @test_util.run_deprecated_v1
@@ -279,7 +279,7 @@ class InputNodesTest(test.TestCase):
         two_tags_with_seps, self.graph)
     expected_regex = (r"^;;;.*error_interpolation_test\.py:[0-9]+\) "
                       r",,,.*error_interpolation_test\.py:[0-9]+\) ;;;$")
-    self.assertRegexpMatches(interpolated_string, expected_regex)
+    self.assertRegex(interpolated_string, expected_regex)
 
   def testBasicInputs(self):
     tag = ";;;{{node Three}};;;"
@@ -287,7 +287,7 @@ class InputNodesTest(test.TestCase):
     expected_regex = re.compile(
         r"^;;;.*error_interpolation_test\.py:[0-9]+\) "
         r";;;.*Input.*error_interpolation_test\.py:[0-9]+\)", re.DOTALL)
-    self.assertRegexpMatches(interpolated_string, expected_regex)
+    self.assertRegex(interpolated_string, expected_regex)
 
 
 @test_util.run_deprecated_v1
@@ -332,7 +332,7 @@ class InterpolateDeviceSummaryTest(test.TestCase):
     self.assertEqual(2, num_devices)
     name_re = r"_fancy_device_function<.*error_interpolation_test.py, [0-9]+>"
     expected_re = r"with tf.device\(.*%s\)" % name_re
-    self.assertRegexpMatches(result, expected_re)
+    self.assertRegex(result, expected_re)
 
 
 @test_util.run_deprecated_v1
diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py
index 58a1d379304..9160670a302 100644
--- a/tensorflow/python/framework/function_test.py
+++ b/tensorflow/python/framework/function_test.py
@@ -144,7 +144,7 @@ class FunctionTest(test.TestCase):
       return a
 
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           (r"output names must be either empty or equal in size to outputs. "
            "output names size = 2 outputs size = 1")):
@@ -305,7 +305,7 @@ class FunctionTest(test.TestCase):
 
     x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
     with session.Session(graph=g) as sess:
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "SymGrad expects to return 1.*but get 2.*instead"):
         _ = sess.run(dinp, {inp: x})
@@ -438,8 +438,8 @@ class FunctionTest(test.TestCase):
     g = ops.Graph()
     with g.as_default(), self.cached_session():
       self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0)
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "assertion failed.*-3"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "assertion failed.*-3"):
         self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0)
 
   @test_util.run_deprecated_v1
@@ -453,8 +453,8 @@ class FunctionTest(test.TestCase):
 
     with self.cached_session():
       self.assertEqual(1.0, MyFn(1.0).eval())
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "assertion"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "assertion"):
         _ = MyFn(100.0).eval()
 
   @test_util.run_deprecated_v1
@@ -514,14 +514,14 @@ class FunctionTest(test.TestCase):
       self.assertEqual(4, sess.run(cond, {pred: True, x: 3}))
 
       # The assertion should still fire if the False branch is taken.
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "assertion"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "assertion"):
         sess.run(cond, {pred: False, x: 3})
 
       # Similarly for loops.
       self.assertEqual(3, sess.run(loop, {pred: False, x: 3}))
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "assertion"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "assertion"):
         sess.run(loop, {pred: True, x: 3})
 
   @test_util.run_deprecated_v1
@@ -566,7 +566,7 @@ class FunctionTest(test.TestCase):
 
   def testDefineErrors(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError, "can not return None"):
+      with self.assertRaisesRegex(ValueError, "can not return None"):
 
         @function.Defun()
         def TwoNone():
@@ -574,28 +574,28 @@ class FunctionTest(test.TestCase):
 
         _ = TwoNone.definition
 
-      with self.assertRaisesRegexp(ValueError, "are not supported"):
+      with self.assertRaisesRegex(ValueError, "are not supported"):
 
         @function.Defun()
         def DefaultArg(unused_a=12):
           return constant_op.constant([1])
 
         _ = DefaultArg.definition
-      with self.assertRaisesRegexp(ValueError, "are not supported"):
+      with self.assertRaisesRegex(ValueError, "are not supported"):
 
         @function.Defun()
         def KwArgs(**unused_kwargs):
           return constant_op.constant([1])
 
         _ = KwArgs.definition
-      with self.assertRaisesRegexp(ValueError, "specified input types"):
+      with self.assertRaisesRegex(ValueError, "specified input types"):
 
         @function.Defun(dtypes.float32)
         def PlusMinusV2(a, b):
           return a + b, b - a
 
         _ = PlusMinusV2.definition
-      with self.assertRaisesRegexp(ValueError, "specified input types"):
+      with self.assertRaisesRegex(ValueError, "specified input types"):
 
         @function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)
         def PlusMinusV3(a, b):
@@ -623,25 +623,25 @@ class FunctionTest(test.TestCase):
       # pylint: disable=too-many-function-args
       # pylint: disable=unexpected-keyword-arg
       # pylint: disable=no-value-for-parameter
-      with self.assertRaisesRegexp(ValueError, "arguments: 0"):
+      with self.assertRaisesRegex(ValueError, "arguments: 0"):
         _ = Const(1)
-      with self.assertRaisesRegexp(ValueError, "arguments: 0"):
+      with self.assertRaisesRegex(ValueError, "arguments: 0"):
         _ = Const(1, 2)
 
-      with self.assertRaisesRegexp(ValueError, "arguments: 1"):
+      with self.assertRaisesRegex(ValueError, "arguments: 1"):
         _ = PlusOne()
       _ = PlusOne(1)
-      with self.assertRaisesRegexp(ValueError, "arguments: 1"):
+      with self.assertRaisesRegex(ValueError, "arguments: 1"):
         _ = PlusOne(1, 2)
 
-      with self.assertRaisesRegexp(ValueError, "arguments: 2"):
+      with self.assertRaisesRegex(ValueError, "arguments: 2"):
         _ = PlusMinus()
-      with self.assertRaisesRegexp(ValueError, "arguments: 2"):
+      with self.assertRaisesRegex(ValueError, "arguments: 2"):
         _ = PlusMinus(1)
       _ = PlusMinus(1, 2)
 
       _ = PlusOne(1, name="p1")
-      with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
+      with self.assertRaisesRegex(ValueError, "Unknown keyword arguments"):
         _ = PlusOne(1, device="/device:GPU:0")
 
   def testFunctionDecorator(self):
@@ -788,7 +788,7 @@ class FunctionTest(test.TestCase):
           y = logging_ops.Print(y, [y], "inner")
         return y
 
-      with self.assertRaisesRegexp(ValueError, "not an element of this graph."):
+      with self.assertRaisesRegex(ValueError, "not an element of this graph."):
         # NOTE: We still do not support capturing control deps.
         _ = Foo(x)
 
@@ -1051,7 +1051,7 @@ class FunctionTest(test.TestCase):
       return t + constant_op.constant(3, dtype=dtypes.int32)
 
     # First time we try to capture a stateful RandomUniform op.
-    with self.assertRaisesRegexp(ValueError, "Cannot capture a stateful node"):
+    with self.assertRaisesRegex(ValueError, "Cannot capture a stateful node"):
       res = StatefulFn()
 
     # This time we whitelist this op, so that its recreated.
@@ -1313,7 +1313,7 @@ class FunctionsFromProtos(test.TestCase):
     library.gradient.extend([gradient])
     library.function.extend([F1.definition])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "FunctionDefLibrary missing 'G1_[0-9a-zA-Z]{8,11}' FunctionDef"):
       function.from_library(library)
@@ -1323,7 +1323,7 @@ class FunctionsFromProtos(test.TestCase):
     library.gradient.extend([gradient])
     library.function.extend([G1.definition])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "FunctionDefLibrary missing 'F1_[0-9a-zA-Z]{8,11}' FunctionDef"):
       function.from_library(library)
@@ -1353,7 +1353,7 @@ class FunctionsFromProtos(test.TestCase):
 
     library.gradient.extend([gradient1, gradient2])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "FunctionDefLibrary contains cyclic gradient functions!"):
       function.from_library(library)
 
diff --git a/tensorflow/python/framework/graph_util_test.py b/tensorflow/python/framework/graph_util_test.py
index d39b8d0a906..4957ee7d97e 100644
--- a/tensorflow/python/framework/graph_util_test.py
+++ b/tensorflow/python/framework/graph_util_test.py
@@ -189,7 +189,7 @@ class DeviceFunctionsTest(test.TestCase):
     graph_def = graph_pb2.GraphDef()
     n1 = graph_def.node.add()
     n1.name = "n1"
-    with self.assertRaisesRegexp(TypeError, "must be a list"):
+    with self.assertRaisesRegex(TypeError, "must be a list"):
       graph_util.extract_sub_graph(graph_def, "n1")
 
   def create_node_def(self, op, name, inputs):
diff --git a/tensorflow/python/framework/importer_test.py b/tensorflow/python/framework/importer_test.py
index ae30c15e844..9d64311b4b1 100644
--- a/tensorflow/python/framework/importer_test.py
+++ b/tensorflow/python/framework/importer_test.py
@@ -465,7 +465,7 @@ class ImportGraphDefTest(test.TestCase):
     error_msg = ("Input 0 of node import/B was passed int32 from import/A:0 "
                  "incompatible with expected float.")
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError, error_msg):
+      with self.assertRaisesRegex(ValueError, error_msg):
         importer.import_graph_def(
             self._MakeGraphDef("""
             node { name: 'A' op: 'IntOutput' }
@@ -494,7 +494,7 @@ class ImportGraphDefTest(test.TestCase):
   def testInvalidSignatureTooManyInputsInGraphDef(self):
     with ops.Graph().as_default():
       # TODO(skyewm): improve error message
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "NodeDef expected inputs '' do not match 1 inputs specified"):
         importer.import_graph_def(
@@ -506,7 +506,7 @@ class ImportGraphDefTest(test.TestCase):
   def testInvalidSignatureNotEnoughInputsInGraphDef(self):
     with ops.Graph().as_default():
       # TODO(skyewm): improve error message
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "NodeDef expected inputs 'int32, float' do not match 1 inputs "
           "specified"):
@@ -518,8 +518,8 @@ class ImportGraphDefTest(test.TestCase):
 
   def testMissingInputOpInGraphDef(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError,
-                                   "Node 'B': Unknown input node 'A:0'"):
+      with self.assertRaisesRegex(ValueError,
+                                  "Node 'B': Unknown input node 'A:0'"):
         importer.import_graph_def(
             self._MakeGraphDef("""
             node { name: 'B' op: 'FloatInput' input: 'A:0' }
@@ -538,7 +538,7 @@ class ImportGraphDefTest(test.TestCase):
 
   def testMissingInputTensorInGraphDef(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "Node 'B': Connecting to invalid output 1 of source node A "
           "which has 1 outputs"):
@@ -550,8 +550,8 @@ class ImportGraphDefTest(test.TestCase):
 
   def testMissingControlInputInGraphDef(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError,
-                                   r"Node 'B': Unknown input node '\^A'"):
+      with self.assertRaisesRegex(ValueError,
+                                  r"Node 'B': Unknown input node '\^A'"):
         importer.import_graph_def(
             self._MakeGraphDef("""
             node { name: 'B' op: 'None' input: '^A' }
@@ -559,8 +559,8 @@ class ImportGraphDefTest(test.TestCase):
 
   def testInvalidTensorNameOutputIndexInGraphDef(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError,
-                                   "Node 'B': Unknown input node 'A:B'"):
+      with self.assertRaisesRegex(ValueError,
+                                  "Node 'B': Unknown input node 'A:B'"):
         importer.import_graph_def(
             self._MakeGraphDef("""
             node { name: 'B' op: 'None' input: 'A:B' }
@@ -568,8 +568,8 @@ class ImportGraphDefTest(test.TestCase):
 
   def testInvalidTensorNameInGraphDef(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError,
-                                   "Node 'B': Unknown input node 'A:B:0'"):
+      with self.assertRaisesRegex(ValueError,
+                                  "Node 'B': Unknown input node 'A:B:0'"):
         importer.import_graph_def(
             self._MakeGraphDef("""
             node { name: 'B' op: 'None' input: 'A:B:0' }
@@ -577,7 +577,7 @@ class ImportGraphDefTest(test.TestCase):
 
   def testMissingReturnOperation(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Requested return node 'B' not found in graph def"):
         importer.import_graph_def(
             self._MakeGraphDef("""
@@ -587,7 +587,7 @@ class ImportGraphDefTest(test.TestCase):
 
   def testMissingReturnTensor(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r"Invalid return output 1 of node 'A', which has 1 output\(s\)"):
         importer.import_graph_def(
@@ -596,7 +596,7 @@ class ImportGraphDefTest(test.TestCase):
             """),
             return_elements=["A:1"])
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Requested return tensor 'B:0' not found in graph def"):
         importer.import_graph_def(
             self._MakeGraphDef("""
@@ -604,8 +604,8 @@ class ImportGraphDefTest(test.TestCase):
             """),
             return_elements=["B:0"])
 
-      with self.assertRaisesRegexp(ValueError,
-                                   "Cannot convert 'A:B:0' to a tensor name."):
+      with self.assertRaisesRegex(ValueError,
+                                  "Cannot convert 'A:B:0' to a tensor name."):
         importer.import_graph_def(
             self._MakeGraphDef("""
             node { name: 'A' op: 'IntOutput' }
@@ -614,7 +614,7 @@ class ImportGraphDefTest(test.TestCase):
 
   def testMissingInputMap(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r"Attempted to map inputs that were not found in graph_def: \[B:0\]"):
         importer.import_graph_def(
@@ -633,7 +633,7 @@ class ImportGraphDefTest(test.TestCase):
           input_map={"A:0": constant_op.constant(5.0)})
 
       # Mapping a non-existent output of an existing node should fail.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r"Attempted to map inputs that were not found in graph_def: \[A:2\]"):
         importer.import_graph_def(
@@ -644,7 +644,7 @@ class ImportGraphDefTest(test.TestCase):
 
   def testInputMapTypeMismatch(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Input 0 of node import/B was passed float from Const:0 "
           "incompatible with expected int32."):
         importer.import_graph_def(
@@ -870,7 +870,7 @@ class ImportGraphDefTest(test.TestCase):
           } }""")
 
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Node 'B' expects to be colocated with unknown node 'A'"):
         importer.import_graph_def(
             original_graph_def, return_elements=["B"], name="imported_graph")
@@ -919,17 +919,17 @@ class ImportGraphDefTest(test.TestCase):
 
   def testInvalidInputForReturnOperations(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
-          TypeError, "return_elements must be a list of strings."):
+      with self.assertRaisesRegex(TypeError,
+                                  "return_elements must be a list of strings."):
         importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
 
-      with self.assertRaisesRegexp(ValueError,
-                                   "Cannot convert 'a:b:c' to a tensor name."):
+      with self.assertRaisesRegex(ValueError,
+                                  "Cannot convert 'a:b:c' to a tensor name."):
         importer.import_graph_def(
             self._MakeGraphDef(""), return_elements=["a:b:c"])
 
   def testDuplicateOperationNames(self):
-    with self.assertRaisesRegexp(ValueError, "Node 'A' is not unique"):
+    with self.assertRaisesRegex(ValueError, "Node 'A' is not unique"):
       importer.import_graph_def(
           self._MakeGraphDef("""
           node { name: 'A' op: 'IntOutput' }
@@ -1077,7 +1077,7 @@ class ImportGraphDefTest(test.TestCase):
 
   def testVersionLow(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r"GraphDef producer version -1 below min producer %d supported "
           r"by TensorFlow \S+\.  Please regenerate your graph.$" %
@@ -1086,7 +1086,7 @@ class ImportGraphDefTest(test.TestCase):
 
   def testVersionHigh(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r"GraphDef min consumer version %d above current version %d "
           r"for TensorFlow \S+\.  Please upgrade TensorFlow\.$" %
@@ -1137,7 +1137,7 @@ class ImportGraphDefTest(test.TestCase):
           """),
           return_elements=["A"],
           producer_op_list=producer_op_list)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Operation 'import/A' has no attr named 'default_int'."):
         a[0].get_attr("default_int")
 
diff --git a/tensorflow/python/framework/memory_checker_test.py b/tensorflow/python/framework/memory_checker_test.py
index 62af2814395..bed6aaca587 100644
--- a/tensorflow/python/framework/memory_checker_test.py
+++ b/tensorflow/python/framework/memory_checker_test.py
@@ -125,7 +125,7 @@ class MemoryCheckerTest(test.TestCase):
       x = constant_op.constant(1)  # pylint: disable=unused-variable
       memory_checker.record_snapshot()
 
-    with self.assertRaisesRegexp(AssertionError, 'New Python objects'):
+    with self.assertRaisesRegex(AssertionError, 'New Python objects'):
       memory_checker.assert_no_new_python_objects()
 
   def testNewPythonObjectBelowThreshold(self):
diff --git a/tensorflow/python/framework/meta_graph_test.py b/tensorflow/python/framework/meta_graph_test.py
index eff613b4204..ae44fbce0f0 100644
--- a/tensorflow/python/framework/meta_graph_test.py
+++ b/tensorflow/python/framework/meta_graph_test.py
@@ -407,13 +407,13 @@ class ScopedMetaGraphTest(test.TestCase):
       new_image = constant_op.constant(
           1.2, dtypes.float32, shape=[100, 28], name="images")
 
-    with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
+    with self.assertRaisesRegex(ValueError, "Graph contains unbound inputs"):
       meta_graph.import_scoped_meta_graph(
           os.path.join(test_dir, exported_filenames[0]),
           graph=graph,
           import_scope="new_hidden1")
 
-    with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
+    with self.assertRaisesRegex(ValueError, "Graph contains unbound inputs"):
       meta_graph.import_scoped_meta_graph(
           os.path.join(test_dir, exported_filenames[0]),
           graph=graph,
@@ -829,7 +829,7 @@ class ScopedMetaGraphTest(test.TestCase):
 
     graph2 = ops.Graph()
     with graph2.as_default():
-      with self.assertRaisesRegexp(ValueError, "Graph contains unbound inputs"):
+      with self.assertRaisesRegex(ValueError, "Graph contains unbound inputs"):
         meta_graph.import_scoped_meta_graph(
             orig_meta_graph, import_scope="new_hidden1")
 
@@ -952,7 +952,7 @@ class MetaGraphWithVariableScopeTest(test.TestCase):
               "python/framework/testdata/metrics_export_meta_graph.pb"))
       self.assertEqual(len(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)),
                        2)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           AttributeError, "'Tensor' object has no attribute 'initializer'"):
         initializer = variables.local_variables_initializer()
 
diff --git a/tensorflow/python/framework/ops_test.py b/tensorflow/python/framework/ops_test.py
index 7626bd780bb..09a192dea52 100644
--- a/tensorflow/python/framework/ops_test.py
+++ b/tensorflow/python/framework/ops_test.py
@@ -80,12 +80,12 @@ class ResourceTest(test_util.TensorFlowTestCase):
           handle=handle,
           create_op=test_ops.resource_create_op(handle),
           is_initialized_op=test_ops.resource_initialized_op(handle))
-      self.assertEquals(
+      self.assertEqual(
           len(
               resources.report_uninitialized_resources(
                   resources.shared_resources()).eval()), 1)
       resources.initialize_resources(resources.shared_resources()).run()
-      self.assertEquals(
+      self.assertEqual(
           len(
               resources.report_uninitialized_resources(
                   resources.shared_resources()).eval()), 0)
@@ -108,7 +108,7 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
     op = ops.Operation(
         ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
     t = op.outputs[0]
-    with self.assertRaisesRegexp(TypeError, "Cannot iterate"):
+    with self.assertRaisesRegex(TypeError, "Cannot iterate"):
       iter(t)
 
   def testIterableGraph(self):
@@ -118,14 +118,12 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
     op = ops.Operation(
         ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
     t = op.outputs[0]
-    with self.assertRaisesRegexp(TypeError, "iterating.*not allowed in Graph"):
+    with self.assertRaisesRegex(TypeError, "iterating.*not allowed in Graph"):
       next(iter(t))
-    with self.assertRaisesRegexp(
-        TypeError, "iterating.*AutoGraph did convert"):
+    with self.assertRaisesRegex(TypeError, "iterating.*AutoGraph did convert"):
       with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
         next(iter(t))
-    with self.assertRaisesRegexp(
-        TypeError, "iterating.*AutoGraph is disabled"):
+    with self.assertRaisesRegex(TypeError, "iterating.*AutoGraph is disabled"):
       with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
         next(iter(t))
 
@@ -133,15 +131,15 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
     op = ops.Operation(
         ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.bool])
     t = op.outputs[0]
-    with self.assertRaisesRegexp(
-        TypeError, "using.*as a.*bool.*not allowed in Graph"):
+    with self.assertRaisesRegex(TypeError,
+                                "using.*as a.*bool.*not allowed in Graph"):
       bool(t)
-    with self.assertRaisesRegexp(
-        TypeError, "using.*as a.*bool.*AutoGraph did convert"):
+    with self.assertRaisesRegex(TypeError,
+                                "using.*as a.*bool.*AutoGraph did convert"):
       with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
         bool(t)
-    with self.assertRaisesRegexp(
-        TypeError, "using.*as a.*bool.*AutoGraph is disabled"):
+    with self.assertRaisesRegex(TypeError,
+                                "using.*as a.*bool.*AutoGraph is disabled"):
       with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
         bool(t)
 
@@ -181,7 +179,7 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
     with self.cached_session():
       a = array_ops.ones([1, 2, 3])
       b = array_ops.ones([4, 5, 6])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, r"Dimensions must be equal, but are 2 and 5 for .*add"
           r".*Add(V2)?.* with input shapes: \[1,2,3\], \[4,5,6\]."):
         _ = a + b
@@ -190,11 +188,11 @@ class TensorAndShapeTest(test_util.TensorFlowTestCase):
     with ops.Graph().as_default():
       x = array_ops.ones((3, 4), name="test_ones")
 
-    with self.assertRaisesRegexp(NotImplementedError,
-                                 r"Cannot convert a symbolic.+test_ones"):
+    with self.assertRaisesRegex(NotImplementedError,
+                                r"Cannot convert a symbolic.+test_ones"):
       np.array(x)
 
-    with self.assertRaisesRegexp(TypeError, "not well defined.+test_ones"):
+    with self.assertRaisesRegex(TypeError, "not well defined.+test_ones"):
       len(x)
 
     # EagerTensors should still behave as numpy arrays.
@@ -591,7 +589,7 @@ class OperationTest(test_util.TensorFlowTestCase):
         ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
         [dtypes.float32_ref, dtypes.float32])
     self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
-    self.assertEquals([], list(op1.inputs))
+    self.assertEqual([], list(op1.inputs))
     ref_t, nonref_t = op1.values()
     # NOTE(mrry): Must specify input_types to preserve ref-typed input.
     op2 = ops.Operation(
@@ -601,7 +599,7 @@ class OperationTest(test_util.TensorFlowTestCase):
     self.assertProtoEquals(
         "op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
         op2.node_def)
-    self.assertEquals([ref_t, nonref_t], list(op2.inputs))
+    self.assertEqual([ref_t, nonref_t], list(op2.inputs))
     op3 = ops.Operation(
         ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
     self.assertProtoEquals(
@@ -715,8 +713,8 @@ class OperationTest(test_util.TensorFlowTestCase):
   def testNoConvert(self):
     # Operation cannot be converted to Tensor.
     op = control_flow_ops.no_op()
-    with self.assertRaisesRegexp(TypeError,
-                                 r"Can't convert Operation '.*' to Tensor"):
+    with self.assertRaisesRegex(TypeError,
+                                r"Can't convert Operation '.*' to Tensor"):
       ops.convert_to_tensor(op)
 
   def testStr(self):
@@ -751,7 +749,7 @@ class OperationTest(test_util.TensorFlowTestCase):
                      [tensor_util.make_tensor_proto(1, dtypes.int32)])
 
     type_val = op.get_attr("type_val")
-    # First check that type_val is a DType, because the assertEquals will work
+    # First check that type_val is a DType, because the assertEqual will work
     # no matter what since DType overrides __eq__
     self.assertIsInstance(type_val, dtypes.DType)
     self.assertEqual(type_val, dtypes.int32)
@@ -769,7 +767,7 @@ class OperationTest(test_util.TensorFlowTestCase):
                      attr_value_pb2.NameAttrList(name="MyFunc"))
 
     # Try fetching missing attr
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
       op.get_attr("FakeAttr")
 
@@ -832,7 +830,7 @@ class OperationTest(test_util.TensorFlowTestCase):
       y.op._add_control_input(x.op)  # pylint: disable=protected-access
       x.op._add_control_input(y.op)  # pylint: disable=protected-access
     with self.session(graph=graph) as sess:
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "Graph is invalid, contains a cycle with 2 nodes"):
         self.evaluate(x)
@@ -845,25 +843,25 @@ class OperationTest(test_util.TensorFlowTestCase):
       z = x + y
 
     z.op._update_input(0, y)  # pylint: disable=protected-access
-    self.assertEquals(list(z.op.inputs), [y, y])
-    self.assertEquals(x.consumers(), [])
-    self.assertEquals(y.consumers(), [z.op, z.op])
+    self.assertEqual(list(z.op.inputs), [y, y])
+    self.assertEqual(x.consumers(), [])
+    self.assertEqual(y.consumers(), [z.op, z.op])
     with session.Session(graph=g) as sess:
-      self.assertEquals(self.evaluate(z), 4)
+      self.assertEqual(self.evaluate(z), 4)
 
     z.op._update_input(0, x)  # pylint: disable=protected-access
-    self.assertEquals(list(z.op.inputs), [x, y])
-    self.assertEquals(x.consumers(), [z.op])
-    self.assertEquals(y.consumers(), [z.op])
+    self.assertEqual(list(z.op.inputs), [x, y])
+    self.assertEqual(x.consumers(), [z.op])
+    self.assertEqual(y.consumers(), [z.op])
     with session.Session(graph=g) as sess:
-      self.assertEquals(self.evaluate(z), 3)
+      self.assertEqual(self.evaluate(z), 3)
 
     z.op._update_input(1, y)  # pylint: disable=protected-access
-    self.assertEquals(list(z.op.inputs), [x, y])
-    self.assertEquals(x.consumers(), [z.op])
-    self.assertEquals(y.consumers(), [z.op])
+    self.assertEqual(list(z.op.inputs), [x, y])
+    self.assertEqual(x.consumers(), [z.op])
+    self.assertEqual(y.consumers(), [z.op])
     with session.Session(graph=g) as sess:
-      self.assertEquals(self.evaluate(z), 3)
+      self.assertEqual(self.evaluate(z), 3)
 
   def testUpdateInputGraphError(self):
     g_0 = ops.Graph()
@@ -873,7 +871,7 @@ class OperationTest(test_util.TensorFlowTestCase):
     with g_1.as_default():
       y = constant_op.constant(2)
       z = y * 2
-      with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
+      with self.assertRaisesRegex(ValueError, "must be from the same graph"):
         z.op._update_input(0, x)  # pylint: disable=protected-access
 
   def testUpdateInputTypeError(self):
@@ -885,7 +883,7 @@ class OperationTest(test_util.TensorFlowTestCase):
       z = y + w
       z.op._update_input(0, x)  # pylint: disable=protected-access
     with session.Session(graph=g) as sess:
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "Input 0 of node add was passed string from Const_1:0 incompatible "
           "with expected int32"):
@@ -898,7 +896,7 @@ class OperationTest(test_util.TensorFlowTestCase):
       x = constant_op.constant(0, shape=[3, 1])
       y = constant_op.constant(1, shape=[2, 2])
       z = w + x
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
       z.op._update_input(0, y)  # pylint: disable=protected-access
@@ -907,11 +905,10 @@ class OperationTest(test_util.TensorFlowTestCase):
     g = ops.Graph()
     with g.as_default():
       x = constant_op.constant(1)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.OutOfRangeError,
         r"Cannot update edge. Input index \[1\] is greater than the number of "
-        r"total inputs \[0\]."
-    ):
+        r"total inputs \[0\]."):
       x.op._update_input(1, x)  # pylint: disable=protected-access
 
   @test_util.enable_control_flow_v2
@@ -955,7 +952,7 @@ class OperationTest(test_util.TensorFlowTestCase):
     self.assertEqual(len(x.op.op_def.input_arg), 0)
     self.assertEqual(len(x.op.op_def.output_arg), 1)
 
-    self.assertRegexpMatches(z.op.op_def.name, "Add(V2)?")
+    self.assertRegex(z.op.op_def.name, "Add(V2)?")
     self.assertEqual(len(z.op.op_def.input_arg), 2)
     self.assertEqual(len(z.op.op_def.output_arg), 1)
 
@@ -966,7 +963,7 @@ class OperationTest(test_util.TensorFlowTestCase):
       x = constant_op.constant(1)
     with g_1.as_default():
       y = constant_op.constant(2)
-      with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
+      with self.assertRaisesRegex(ValueError, "must be from the same graph"):
         y * x  # pylint: disable=pointless-statement
 
   def testInputsAreImmutable(self):
@@ -974,8 +971,8 @@ class OperationTest(test_util.TensorFlowTestCase):
     with g.as_default():
       x = test_ops.int_output()
       op = test_ops.int_input_int_output(x, name="myop").op
-    with self.assertRaisesRegexp(
-        AttributeError, "'tuple' object has no attribute 'append'"):
+    with self.assertRaisesRegex(AttributeError,
+                                "'tuple' object has no attribute 'append'"):
       op.inputs.append(None)
 
 
@@ -1462,8 +1459,8 @@ class DeviceTest(test_util.TensorFlowTestCase):
     with context.eager_mode():
       with ops.device("/device:CPU:0"):
         t = constant_op.constant(1.0)
-        self.assertRegexpMatches(t.device, "/device:CPU:0")
-        self.assertRegexpMatches(t.backing_device, "/device:CPU:0")
+        self.assertRegex(t.device, "/device:CPU:0")
+        self.assertRegex(t.backing_device, "/device:CPU:0")
 
   def testDevicePartialString(self):
     g = ops.Graph()
@@ -1904,8 +1901,8 @@ class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
 
     suffixes = ["", "_1", "_2"]
     for t, s in zip(threads, suffixes):
-      self.assertEquals("foo" + s + "/FloatOutput", t.result[0].name)
-      self.assertEquals("foo" + s + "/FloatOutput_1", t.result[1].name)
+      self.assertEqual("foo" + s + "/FloatOutput", t.result[0].name)
+      self.assertEqual("foo" + s + "/FloatOutput_1", t.result[1].name)
 
 
 class ObjectWithName(object):
@@ -2095,7 +2092,7 @@ class RegistrationTest(test_util.TensorFlowTestCase):
       x = test_ops.float_output()
       with g.gradient_override_map({"CopyOp": "unknown_override"}):
         y = test_ops.copy_op(x)
-      with self.assertRaisesRegexp(LookupError, "unknown_override"):
+      with self.assertRaisesRegex(LookupError, "unknown_override"):
         ops.get_gradient_function(y.op)
 
 
@@ -2616,8 +2613,8 @@ class InitScopeTest(test_util.TensorFlowTestCase):
         # First ensure that graphs that are not building functions are
         # not escaped.
         function_with_variables("foo")
-        with self.assertRaisesRegexp(ValueError,
-                                     r"Variable foo already exists.*"):
+        with self.assertRaisesRegex(ValueError,
+                                    r"Variable foo already exists.*"):
           # This will fail because reuse is not set to True.
           function_with_variables("foo")
 
@@ -2710,12 +2707,11 @@ class InitScopeTest(test_util.TensorFlowTestCase):
     with context.eager_mode():
       c = constant_op.constant(1.0)
       with ops.Graph().as_default():
-        with self.assertRaisesRegexp(
-            RuntimeError, "Attempting to capture an EagerTensor"):
+        with self.assertRaisesRegex(RuntimeError,
+                                    "Attempting to capture an EagerTensor"):
           math_ops.add(c, c)
         c2 = constant_op.constant(2.0)
-      with self.assertRaisesRegexp(
-          TypeError, "Graph tensors"):
+      with self.assertRaisesRegex(TypeError, "Graph tensors"):
         math_ops.add(c2, c2)
 
   def testPreservesNameScopeInEagerExecution(self):
@@ -3083,7 +3079,7 @@ class DeviceStackTest(test_util.TensorFlowTestCase):
     self.assertEqual(1, len(three_list))
     func_description = three_list[0].obj
     expected_regex = r"device_func<.*ops_test.py, [0-9]+"
-    self.assertRegexpMatches(func_description, expected_regex)
+    self.assertRegex(func_description, expected_regex)
 
   @test_util.run_deprecated_v1
   def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
@@ -3097,8 +3093,7 @@ class DeviceStackTest(test_util.TensorFlowTestCase):
     two_metadata = const_two.op._device_assignments[0]
 
     # Verify both types of device assignment return the right stack info.
-    self.assertRegexpMatches("ops_test.py",
-                             os.path.basename(one_metadata.filename))
+    self.assertRegex("ops_test.py", os.path.basename(one_metadata.filename))
     self.assertEqual(one_metadata.filename, two_metadata.filename)
     self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
 
@@ -3266,7 +3261,7 @@ class DeprecatedTest(test_util.TensorFlowTestCase):
 
   def testGraphConstructionFail(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(NotImplementedError, self._error()):
+      with self.assertRaisesRegex(NotImplementedError, self._error()):
         test_ops.old()
 
 
@@ -3317,19 +3312,19 @@ class NameScopeTest(test_util.TensorFlowTestCase):
           with ops.name_scope("_"):
             pass
 
-    self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
+    self.assertRaisesRegex(ValueError, "'_' is not a valid scope name", f)
 
 
 class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
 
   @test_util.run_v1_only("b/120545219")
   def testBadArgumentsToEnableEagerExecution(self):
-    with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
+    with self.assertRaisesRegex(TypeError, "config must be a tf.ConfigProto"):
       ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
-    with self.assertRaisesRegexp(ValueError, "device_policy must be one of"):
+    with self.assertRaisesRegex(ValueError, "device_policy must be one of"):
       c = config_pb2.ConfigProto()
       ops.enable_eager_execution(c, c)
-    with self.assertRaisesRegexp(ValueError, "execution_mode must be one of"):
+    with self.assertRaisesRegex(ValueError, "execution_mode must be one of"):
       c = config_pb2.ConfigProto()
       ops.enable_eager_execution(c, execution_mode=c)
 
diff --git a/tensorflow/python/framework/registry_test.py b/tensorflow/python/framework/registry_test.py
index 5adf12fdacf..52bdc4ca7a7 100644
--- a/tensorflow/python/framework/registry_test.py
+++ b/tensorflow/python/framework/registry_test.py
@@ -50,7 +50,7 @@ class RegistryTest(test.TestCase, parameterized.TestCase):
   def testDuplicate(self):
     myreg = registry.Registry('testbar')
     myreg.register(bar, 'Bar')
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         KeyError, r'Registering two testbar with name \'Bar\'! '
         r'\(Previous registration was in [^ ]+ .*.py:[0-9]+\)'):
       myreg.register(bar, 'Bar')
diff --git a/tensorflow/python/framework/subscribe_test.py b/tensorflow/python/framework/subscribe_test.py
index a74e96f9d9d..3ccb7fd0bba 100644
--- a/tensorflow/python/framework/subscribe_test.py
+++ b/tensorflow/python/framework/subscribe_test.py
@@ -118,7 +118,7 @@ class SubscribeTest(test_util.TensorFlowTestCase):
     self._ExpectSubscribedIdentities(subscribed)
 
     # Expect an exception to be raised for unsupported types.
-    with self.assertRaisesRegexp(TypeError, 'has invalid type'):
+    with self.assertRaisesRegex(TypeError, 'has invalid type'):
       subscribe.subscribe(c.name,
                           lambda t: script_ops.py_func(sub, [t], [t.dtype]))
 
diff --git a/tensorflow/python/framework/tensor_shape_div_test.py b/tensorflow/python/framework/tensor_shape_div_test.py
index 5160c75e527..e892d7dffbd 100644
--- a/tensorflow/python/framework/tensor_shape_div_test.py
+++ b/tensorflow/python/framework/tensor_shape_div_test.py
@@ -42,7 +42,7 @@ class DimensionDivTest(test_util.TensorFlowTestCase):
       two = tensor_shape.Dimension(2)
       message = (r"unsupported operand type\(s\) for /: "
                  r"'int' and 'Dimension', please use // instead")
-      with self.assertRaisesRegexp(TypeError, message):
+      with self.assertRaisesRegex(TypeError, message):
         _ = 6 / two
 
 
diff --git a/tensorflow/python/framework/tensor_shape_test.py b/tensorflow/python/framework/tensor_shape_test.py
index e1bc6d5e8aa..fec9664a5ca 100644
--- a/tensorflow/python/framework/tensor_shape_test.py
+++ b/tensorflow/python/framework/tensor_shape_test.py
@@ -217,15 +217,15 @@ class DimensionTest(test_util.TensorFlowTestCase):
     two = tensor_shape.Dimension(2)
     message = (r"unsupported operand type\(s\) for /: "
                r"'Dimension' and 'Dimension', please use // instead")
-    with self.assertRaisesRegexp(TypeError, message):
+    with self.assertRaisesRegex(TypeError, message):
       _ = six / two
     message = (r"unsupported operand type\(s\) for /: "
                r"'Dimension' and 'int', please use // instead")
-    with self.assertRaisesRegexp(TypeError, message):
+    with self.assertRaisesRegex(TypeError, message):
       _ = six / 2
     message = (r"unsupported operand type\(s\) for /: "
                r"'int' and 'Dimension', please use // instead")
-    with self.assertRaisesRegexp(TypeError, message):
+    with self.assertRaisesRegex(TypeError, message):
       _ = 6 / two
 
 
@@ -390,7 +390,7 @@ class ShapeTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testTruedivFails(self):
     unknown = tensor_shape.Dimension(None)
     self.assertEqual((unknown // unknown).value, None)
-    with self.assertRaisesRegexp(TypeError, r"unsupported operand type"):
+    with self.assertRaisesRegex(TypeError, r"unsupported operand type"):
       unknown / unknown  # pylint: disable=pointless-statement
 
   def testConvertFromProto(self):
@@ -481,8 +481,8 @@ class ShapeTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       _ = unk1 != unk0
 
   def testAsList(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 "not defined on an unknown TensorShape"):
+    with self.assertRaisesRegex(ValueError,
+                                "not defined on an unknown TensorShape"):
       tensor_shape.unknown_shape().as_list()
     self.assertAllEqual([None, None], tensor_shape.unknown_shape(2).as_list())
     self.assertAllEqual([2, None, 4], tensor_shape.TensorShape(
diff --git a/tensorflow/python/framework/tensor_spec_test.py b/tensorflow/python/framework/tensor_spec_test.py
index 85c4cd8bf81..f67aa4c9013 100644
--- a/tensorflow/python/framework/tensor_spec_test.py
+++ b/tensorflow/python/framework/tensor_spec_test.py
@@ -172,11 +172,11 @@ class TensorSpecTest(test_util.TensorFlowTestCase):
 class BoundedTensorSpecTest(test_util.TensorFlowTestCase):
 
   def testInvalidMinimum(self):
-    with self.assertRaisesRegexp(ValueError, "not compatible"):
+    with self.assertRaisesRegex(ValueError, "not compatible"):
       tensor_spec.BoundedTensorSpec((3, 5), dtypes.uint8, (0, 0, 0), (1, 1))
 
   def testInvalidMaximum(self):
-    with self.assertRaisesRegexp(ValueError, "not compatible"):
+    with self.assertRaisesRegex(ValueError, "not compatible"):
       tensor_spec.BoundedTensorSpec((3, 5), dtypes.uint8, 0, (1, 1, 1))
 
   def testMinimumMaximumAttributes(self):
@@ -190,9 +190,9 @@ class BoundedTensorSpecTest(test_util.TensorFlowTestCase):
   def testNotWriteableNP(self):
     spec = tensor_spec.BoundedTensorSpec(
         (1, 2, 3), dtypes.float32, 0, (5, 5, 5))
-    with self.assertRaisesRegexp(ValueError, "read-only"):
+    with self.assertRaisesRegex(ValueError, "read-only"):
       spec.minimum[0] = -1
-    with self.assertRaisesRegexp(ValueError, "read-only"):
+    with self.assertRaisesRegex(ValueError, "read-only"):
       spec.maximum[0] = 100
 
   def testReuseSpec(self):
diff --git a/tensorflow/python/framework/tensor_util_test.py b/tensorflow/python/framework/tensor_util_test.py
index ad0aec1623d..6d7643cc805 100644
--- a/tensorflow/python/framework/tensor_util_test.py
+++ b/tensorflow/python/framework/tensor_util_test.py
@@ -736,7 +736,7 @@ class TensorUtilTest(test.TestCase):
 
     # Validate the helpful error message when trying to convert an
     # unconvertible list as strings.
-    with self.assertRaisesRegexp(TypeError, "Failed to convert object"):
+    with self.assertRaisesRegex(TypeError, "Failed to convert object"):
       tensor_util.make_tensor_proto([tensor_shape.Dimension(1)])
 
   def testTensorShapeVerification(self):
diff --git a/tensorflow/python/framework/test_combinations_test.py b/tensorflow/python/framework/test_combinations_test.py
index 5586d4bd733..f49cd368d50 100644
--- a/tensorflow/python/framework/test_combinations_test.py
+++ b/tensorflow/python/framework/test_combinations_test.py
@@ -125,7 +125,7 @@ class TestingCombinationsTest(test.TestCase):
   def test_overlapping_keys(self):
     c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"])
     c2 = combinations.combine(mode=["eager"], loss=["callable"])
-    with self.assertRaisesRegexp(ValueError, ".*Keys.+overlap.+"):
+    with self.assertRaisesRegex(ValueError, ".*Keys.+overlap.+"):
       _ = combinations.times(c1, c2)
 
 
diff --git a/tensorflow/python/framework/test_util_test.py b/tensorflow/python/framework/test_util_test.py
index 2bd75c3919e..f2176cd0b3b 100644
--- a/tensorflow/python/framework/test_util_test.py
+++ b/tensorflow/python/framework/test_util_test.py
@@ -104,8 +104,8 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     # assert_equal_graph_def doesn't care about order
     test_util.assert_equal_graph_def(def_57, def_75)
     # Compare two unequal graphs
-    with self.assertRaisesRegexp(AssertionError,
-                                 r"^Found unexpected node '{{node seven}}"):
+    with self.assertRaisesRegex(AssertionError,
+                                r"^Found unexpected node '{{node seven}}"):
       test_util.assert_equal_graph_def(def_57, def_empty)
 
   def testIsGoogleCudaEnabled(self):
@@ -162,8 +162,7 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Check if the assertion failure message contains the content of
     # the inner proto.
-    with self.assertRaisesRegexp(AssertionError,
-                                 r'meta_graph_version: "inner"'):
+    with self.assertRaisesRegex(AssertionError, r'meta_graph_version: "inner"'):
       self.assertProtoEquals("", meta_graph_def_outer)
 
   @test_util.run_in_graph_and_eager_modes
@@ -270,19 +269,19 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def testAllCloseScalars(self):
     self.assertAllClose(7, 7 + 1e-8)
-    with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
+    with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
       self.assertAllClose(7, 7 + 1e-5)
 
   @test_util.run_in_graph_and_eager_modes
   def testAllCloseList(self):
-    with self.assertRaisesRegexp(AssertionError, r"not close dif"):
+    with self.assertRaisesRegex(AssertionError, r"not close dif"):
       self.assertAllClose([0], [1])
 
   @test_util.run_in_graph_and_eager_modes
   def testAllCloseDictToNonDict(self):
-    with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
+    with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
       self.assertAllClose(1, {"a": 1})
-    with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
+    with self.assertRaisesRegex(ValueError, r"Can't compare dict to non-dict"):
       self.assertAllClose({"a": 1}, 1)
 
   @test_util.run_in_graph_and_eager_modes
@@ -313,17 +312,17 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     for k in expected:
       actual = dict(expected)
       del actual[k]
-      with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
+      with self.assertRaisesRegex(AssertionError, r"mismatched keys"):
         self.assertAllClose(expected, actual)
 
     # With each item changed.
-    with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
+    with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
       self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
-    with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
+    with self.assertRaisesRegex(AssertionError, r"Shape mismatch"):
       self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
     c_copy = np.array(c)
     c_copy[1, 1, 1] += 1e-5
-    with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
+    with self.assertRaisesRegex(AssertionError, r"Not equal to tolerance"):
       self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
 
   @test_util.run_in_graph_and_eager_modes
@@ -349,8 +348,8 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Test mismatched values
     b["y"][1][0]["nested"]["n"] = 4.2
-    with self.assertRaisesRegexp(AssertionError,
-                                 r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
+    with self.assertRaisesRegex(AssertionError,
+                                r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
       self.assertAllClose(a, b)
 
   @test_util.run_in_graph_and_eager_modes
@@ -465,7 +464,7 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     self.assertAllEqual([120] * 3, k)
     self.assertAllEqual([20] * 3, j)
 
-    with self.assertRaisesRegexp(AssertionError, r"not equal lhs"):
+    with self.assertRaisesRegex(AssertionError, r"not equal lhs"):
       self.assertAllEqual([0] * 3, k)
 
   @test_util.run_in_graph_and_eager_modes
@@ -479,7 +478,7 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     self.assertNotAllEqual([120] * 3, k)
     self.assertNotAllEqual([20] * 3, j)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         AssertionError, r"two values are equal at all elements.*extra message"):
       self.assertNotAllEqual([120], k, msg="extra message")
 
@@ -705,7 +704,8 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def test_run_in_eager_and_graph_modes_test_class(self):
     msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
-    with self.assertRaisesRegexp(ValueError, msg):
+    with self.assertRaisesRegex(ValueError, msg):
+
       @test_util.run_in_graph_and_eager_modes()
       class Foo(object):
         pass
@@ -817,7 +817,7 @@ class SkipTestTest(test_util.TensorFlowTestCase):
                                    ["foo bar", "test message"]):
         raise ValueError("test message")
     try:
-      with self.assertRaisesRegexp(ValueError, "foo bar"):
+      with self.assertRaisesRegex(ValueError, "foo bar"):
         with test_util.skip_if_error(self, ValueError, "test message"):
           raise ValueError("foo bar")
     except unittest.SkipTest:
@@ -854,7 +854,7 @@ class SkipTestTest(test_util.TensorFlowTestCase):
 
   def test_skip_if_error_should_raise_message_mismatch(self):
     try:
-      with self.assertRaisesRegexp(ValueError, "foo bar"):
+      with self.assertRaisesRegex(ValueError, "foo bar"):
         with test_util.skip_if_error(self, ValueError, "test message"):
           raise ValueError("foo bar")
     except unittest.SkipTest:
@@ -862,7 +862,7 @@ class SkipTestTest(test_util.TensorFlowTestCase):
 
   def test_skip_if_error_should_raise_no_message(self):
     try:
-      with self.assertRaisesRegexp(ValueError, ""):
+      with self.assertRaisesRegex(ValueError, ""):
         with test_util.skip_if_error(self, ValueError, "test message"):
           raise ValueError()
     except unittest.SkipTest:
@@ -924,7 +924,7 @@ class GarbageCollectionTest(test_util.TensorFlowTestCase):
       def test_has_no_leak(self):
         constant_op.constant([3.], name="no-leak")
 
-    with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
+    with self.assertRaisesRegex(AssertionError, "Tensors not deallocated"):
       LeakedTensorTest().test_has_leak()
 
     LeakedTensorTest().test_has_no_leak()
diff --git a/tensorflow/python/framework/versions_test.py b/tensorflow/python/framework/versions_test.py
index 12c8ea0be25..417efe2f688 100644
--- a/tensorflow/python/framework/versions_test.py
+++ b/tensorflow/python/framework/versions_test.py
@@ -28,10 +28,8 @@ class VersionTest(test.TestCase):
     self.assertEqual(type(versions.__version__), str)
     self.assertEqual(type(versions.VERSION), str)
     # This pattern will need to grow as we include alpha, builds, etc.
-    self.assertRegexpMatches(versions.__version__,
-                             r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
-    self.assertRegexpMatches(versions.VERSION,
-                             r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
+    self.assertRegex(versions.__version__, r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
+    self.assertRegex(versions.VERSION, r'^\d+\.\d+\.(\d+(\-\w+)?|head)$')
 
   def testGraphDefVersion(self):
     version = versions.GRAPH_DEF_VERSION
diff --git a/tensorflow/python/keras/backend_test.py b/tensorflow/python/keras/backend_test.py
index f36db9605dc..48bbedbd4fc 100644
--- a/tensorflow/python/keras/backend_test.py
+++ b/tensorflow/python/keras/backend_test.py
@@ -2134,8 +2134,8 @@ class ControlOpsTests(test.TestCase):
     def false_func():
       return y
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'Rank of `condition` should be less than'):
+    with self.assertRaisesRegex(ValueError,
+                                'Rank of `condition` should be less than'):
       backend.switch(backend.equal(x, x), false_func, true_func)
 
 
diff --git a/tensorflow/python/keras/callbacks_test.py b/tensorflow/python/keras/callbacks_test.py
index d180e85a1d9..fdaf2e24227 100644
--- a/tensorflow/python/keras/callbacks_test.py
+++ b/tensorflow/python/keras/callbacks_test.py
@@ -274,7 +274,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
 
     with self.captureWritesToStream(sys.stdout) as printed:
       model.fit(dataset, epochs=2, steps_per_epoch=10)
-      self.assertRegexpMatches(printed.contents(), expected_log)
+      self.assertRegex(printed.contents(), expected_log)
 
   @keras_parameterized.run_all_keras_modes
   def test_callback_warning(self):
@@ -320,7 +320,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
 
     with self.captureWritesToStream(sys.stdout) as printed:
       model.fit(dataset, epochs=2, steps_per_epoch=10)
-      self.assertRegexpMatches(printed.contents(), expected_log)
+      self.assertRegex(printed.contents(), expected_log)
 
   @keras_parameterized.run_with_all_model_types
   @keras_parameterized.run_all_keras_modes
@@ -335,7 +335,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
 
     with self.captureWritesToStream(sys.stdout) as printed:
       model.fit(training_dataset, epochs=2, validation_data=val_dataset)
-      self.assertRegexpMatches(printed.contents(), expected_log)
+      self.assertRegex(printed.contents(), expected_log)
 
   @keras_parameterized.run_with_all_model_types
   @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@@ -350,7 +350,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
 
     with self.captureWritesToStream(sys.stdout) as printed:
       model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
-      self.assertRegexpMatches(printed.contents(), expected_log)
+      self.assertRegex(printed.contents(), expected_log)
 
   @keras_parameterized.run_with_all_model_types
   @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@@ -381,7 +381,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
     with self.captureWritesToStream(sys.stdout) as printed:
       model.fit(
           x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
-      self.assertRegexpMatches(printed.contents(), expected_log)
+      self.assertRegex(printed.contents(), expected_log)
 
   @keras_parameterized.run_with_all_model_types
   @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@@ -647,7 +647,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
     os.remove(filepath.format(epoch=9))
 
     # Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
-    with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
+    with self.assertRaisesRegex(ValueError, 'Unrecognized save_freq'):
       keras.callbacks.ModelCheckpoint(
           filepath,
           monitor=monitor,
@@ -669,7 +669,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
         save_freq=3)
 
     # Case 9: `ModelCheckpoint` with valid and invalid `options` argument.
-    with self.assertRaisesRegexp(TypeError, 'tf.train.CheckpointOptions'):
+    with self.assertRaisesRegex(TypeError, 'tf.train.CheckpointOptions'):
       keras.callbacks.ModelCheckpoint(
           filepath,
           monitor=monitor,
@@ -677,7 +677,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
           save_weights_only=True,
           mode=mode,
           options=save_options_lib.SaveOptions())
-    with self.assertRaisesRegexp(TypeError, 'tf.saved_model.SaveOptions'):
+    with self.assertRaisesRegex(TypeError, 'tf.saved_model.SaveOptions'):
       keras.callbacks.ModelCheckpoint(
           filepath,
           monitor=monitor,
@@ -881,8 +881,9 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
 
     callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
 
-    with self.assertRaisesRegexp(IOError, 'Please specify a non-directory '
-                                          'filepath for ModelCheckpoint.'):
+    with self.assertRaisesRegex(
+        IOError, 'Please specify a non-directory '
+        'filepath for ModelCheckpoint.'):
       model.fit(train_ds, epochs=1, callbacks=[callback])
 
   def test_ModelCheckpoint_with_bad_path_placeholders(self):
@@ -893,8 +894,8 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
     filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
     callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
 
-    with self.assertRaisesRegexp(KeyError, 'Failed to format this callback '
-                                           'filepath.*'):
+    with self.assertRaisesRegex(KeyError, 'Failed to format this callback '
+                                'filepath.*'):
       model.fit(train_ds, epochs=1, callbacks=[callback])
 
   def test_ModelCheckpoint_nonblocking(self):
@@ -971,7 +972,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
       cb_list.on_test_batch_end(0, logs)
       cb_list.on_test_end(logs)
 
-      with self.assertRaisesRegexp(RuntimeError, 'NumPy conversion'):
+      with self.assertRaisesRegex(RuntimeError, 'NumPy conversion'):
         # on_epoch_end should still block.
         cb_list.on_epoch_end(0, logs)
       cb_list.on_train_end(logs)
@@ -1271,7 +1272,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
   def test_ReduceLROnPlateau_backwards_compatibility(self):
     with test.mock.patch.object(logging, 'warning') as mock_log:
       reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_log.call_args), '`epsilon` argument is deprecated')
     self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
     self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
@@ -2090,7 +2091,7 @@ class TestTensorBoardV2(keras_parameterized.TestCase):
     return result
 
   def test_TensorBoard_invalid_argument(self):
-    with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
+    with self.assertRaisesRegex(ValueError, 'Unrecognized arguments'):
       keras.callbacks.TensorBoard(wwrite_images=True)
 
   def test_TensorBoard_non_blocking(self):
diff --git a/tensorflow/python/keras/distribute/distribute_strategy_test.py b/tensorflow/python/keras/distribute/distribute_strategy_test.py
index eac1e2feb8b..e8131ad88fb 100644
--- a/tensorflow/python/keras/distribute/distribute_strategy_test.py
+++ b/tensorflow/python/keras/distribute/distribute_strategy_test.py
@@ -395,7 +395,7 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
       self.assertEqual(steps, 2)
 
       # All samples can not be consumed in specified number of steps
-      with self.assertRaisesRegexp(ValueError, 'not divisible by steps'):
+      with self.assertRaisesRegex(ValueError, 'not divisible by steps'):
         distributed_training_utils.get_input_params(
             distribution, 63, steps=2, batch_size=None)
 
@@ -409,7 +409,7 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
         self.assertEqual(steps, 3)
       else:
         # Computed global batch size can not be sharded across replicas
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError, 'could not be sharded evenly '
             'across the sync replicas'):
           distributed_training_utils.get_input_params(
@@ -448,7 +448,7 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
       self.assertEqual(steps, 5)
 
       # Number of samples is less than global batch size * steps
-      with self.assertRaisesRegexp(ValueError, 'less than samples required'):
+      with self.assertRaisesRegex(ValueError, 'less than samples required'):
         distributed_training_utils.get_input_params(
             distribution, 64, steps=10, batch_size=13)
 
@@ -1166,8 +1166,8 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
       self.assertAllClose(
           predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
 
-      with self.assertRaisesRegexp(ValueError,
-                                   'Number of steps could not be inferred'):
+      with self.assertRaisesRegex(ValueError,
+                                  'Number of steps could not be inferred'):
         model.fit(dataset, epochs=1)
 
   @combinations.generate(all_strategy_combinations())
@@ -1238,7 +1238,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
       dataset = dataset.repeat(100)
       dataset = dataset.batch(10)
 
-      with self.assertRaisesRegexp(ValueError, 'incompatible with the layer'):
+      with self.assertRaisesRegex(ValueError, 'incompatible with the layer'):
         model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
 
   @combinations.generate(
@@ -1692,8 +1692,8 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
 
       # Check for `steps_per_epoch`.
       if distribution.num_replicas_in_sync > 1:
-        with self.assertRaisesRegexp(ValueError,
-                                     'distributed dataset, you must specify'):
+        with self.assertRaisesRegex(ValueError,
+                                    'distributed dataset, you must specify'):
           model.fit(ds, epochs=2)
 
   @combinations.generate(
@@ -1746,8 +1746,8 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
 
       # Check for `steps_per_epoch`.
       if distribution.num_replicas_in_sync > 1:
-        with self.assertRaisesRegexp(ValueError,
-                                     'distributed dataset, you must specify'):
+        with self.assertRaisesRegex(ValueError,
+                                    'distributed dataset, you must specify'):
           model.fit(ds, epochs=2)
 
   @combinations.generate(
@@ -1815,7 +1815,7 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
     ds = ds.filter(lambda *args, **kwargs: True)  # Makes the size UNKNOWN.
     bc = BatchCountingCB()
 
-    with self.assertRaisesRegexp(ValueError, 'steps_per_execution'):
+    with self.assertRaisesRegex(ValueError, 'steps_per_execution'):
       model.fit(ds, epochs=2, callbacks=[bc])
 
     train_ds = ds.repeat(2)
@@ -1823,7 +1823,7 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
     self.assertEqual(bc.train_begin_batches, [0, 20, 40, 0, 20, 40])
     self.assertEqual(bc.train_end_batches, [19, 39, 49, 19, 39, 49])
 
-    with self.assertRaisesRegexp(ValueError, 'steps_per_execution'):
+    with self.assertRaisesRegex(ValueError, 'steps_per_execution'):
       model.evaluate(ds, callbacks=[bc])
 
     test_ds = ds.repeat(2)
@@ -2266,8 +2266,8 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
                           (parameter_server_strategy.ParameterServerStrategyV1,
                            parameter_server_strategy.ParameterServerStrategy))
 
-    with self.assertRaisesRegexp(NotImplementedError,
-                                 'ParameterServerStrategy*'):
+    with self.assertRaisesRegex(NotImplementedError,
+                                'ParameterServerStrategy*'):
       with distribution.scope():
         model = simple_sequential_model()
         optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
@@ -2494,8 +2494,7 @@ class TestModelCapturesStrategy(test.TestCase, parameterized.TestCase):
     # This should raise an error because the metric is constructed
     # outside of the scope, and not by compile
     if distribution_strategy_context.has_strategy():
-      with self.assertRaisesRegexp(
-          ValueError, 'All metrics must be created in'):
+      with self.assertRaisesRegex(ValueError, 'All metrics must be created in'):
         model.compile(
             optimizer=keras.optimizers.adam_v2.Adam(1e-4),
             loss=keras.losses.MeanSquaredError(),
diff --git a/tensorflow/python/keras/distribute/distributed_training_utils_test.py b/tensorflow/python/keras/distribute/distributed_training_utils_test.py
index 39b4c366cbd..c47d694c5b5 100644
--- a/tensorflow/python/keras/distribute/distributed_training_utils_test.py
+++ b/tensorflow/python/keras/distribute/distributed_training_utils_test.py
@@ -48,8 +48,8 @@ class DistributedTrainingUtilsTest(test.TestCase):
     ]
 
     for callback in unsupported_predefined_callbacks:
-      with self.assertRaisesRegexp(
-          ValueError, 'You must specify a Keras Optimizer V2'):
+      with self.assertRaisesRegex(ValueError,
+                                  'You must specify a Keras Optimizer V2'):
         distributed_training_utils.validate_callbacks([callback],
                                                       v1_adam.AdamOptimizer())
 
diff --git a/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py b/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py
index d605f9a9228..6ec7cc2bac5 100644
--- a/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py
+++ b/tensorflow/python/keras/distribute/keras_dnn_correctness_test.py
@@ -262,13 +262,13 @@ class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
     if (context.executing_eagerly()) or is_default_strategy(distribution):
       self.run_correctness_test(distribution, use_numpy, use_validation_data)
     elif K.is_tpu_strategy(distribution) and not context.executing_eagerly():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'Expected `model` argument to be a functional `Model` instance, '
           'but got a subclass model instead.'):
         self.run_correctness_test(distribution, use_numpy, use_validation_data)
     else:
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'We currently do not support distribution strategy with a '
           '`Sequential` model that is created without `input_shape`/'
@@ -281,13 +281,13 @@ class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
         is_default_strategy(distribution)):
       self.run_dynamic_lr_test(distribution)
     elif K.is_tpu_strategy(distribution):
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'Expected `model` argument to be a functional `Model` instance, '
           'but got a subclass model instead.'):
         self.run_dynamic_lr_test(distribution)
     else:
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'We currently do not support distribution strategy with a '
           '`Sequential` model that is created without `input_shape`/'
@@ -299,7 +299,7 @@ class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
   def test_dnn_correctness_with_partial_last_batch_eval(self, distribution,
                                                         use_numpy,
                                                         use_validation_data):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'Expected `model` argument to be a functional `Model` instance, '
         'but got a subclass model instead.'):
diff --git a/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py b/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py
index 70fe41505bf..7b1bc7665b8 100644
--- a/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py
+++ b/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py
@@ -96,9 +96,8 @@ class DistributionStrategyStatefulLstmModelCorrectnessTest(
           keras_correctness_test_base.test_combinations_with_tpu_strategies()))
   def test_incorrectly_use_multiple_cores_for_stateful_lstm_model(
       self, distribution, use_numpy, use_validation_data):
-    with self.assertRaisesRegexp(
-        ValueError,
-        'RNNs with stateful=True not yet supported with '
+    with self.assertRaisesRegex(
+        ValueError, 'RNNs with stateful=True not yet supported with '
         'tf.distribute.Strategy.'):
       self.run_correctness_test(
           distribution,
diff --git a/tensorflow/python/keras/distribute/keras_utils_test.py b/tensorflow/python/keras/distribute/keras_utils_test.py
index 0f65bbbf917..1f6132a0228 100644
--- a/tensorflow/python/keras/distribute/keras_utils_test.py
+++ b/tensorflow/python/keras/distribute/keras_utils_test.py
@@ -196,7 +196,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
       # Removed device and input tensor shape details from the error message
       # since the order of the device and the corresponding input tensor shape
       # is not deterministic over different runs.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'Input tensor shapes do not match for '
           'distributed tensor inputs '
           'DistributedValues:.+'):
@@ -220,7 +220,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
       # Removed device and input tensor dtype details from the error message
       # since the order of the device and the corresponding input tensor dtype
       # is not deterministic over different runs.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'Input tensor dtypes do not match for '
           'distributed tensor inputs '
           'DistributedValues:.+'):
@@ -301,7 +301,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
       model = _SimpleMLP(3)
 
       if not context.executing_eagerly():
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError,
             'We currently do not support distribution strategy with a '
             '`Sequential` model that is created without `input_shape`/'
@@ -330,7 +330,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
         model.compile(
             'sgd')
       else:
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError,
             'We currently do not support distribution strategy with a '
             '`Sequential` model that is created without '
@@ -345,7 +345,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
     with distribution.scope():
       loss_object = losses.MeanSquaredError()
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'Please use `tf.keras.losses.Reduction.SUM` or '
           '`tf.keras.losses.Reduction.NONE`'):
         y = np.asarray([1, 0])
@@ -501,7 +501,7 @@ class TestDistributionStrategyValidation(test.TestCase, parameterized.TestCase):
           keras_test_lib.all_strategy_combinations_minus_default()))
   def test_layer_outside_scope(self, distribution):
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'was not created in the distribution strategy'):
         x = keras.layers.Input(shape=(3,), name='input')
         y = keras.layers.Dense(4, name='dense')(x)
@@ -519,7 +519,7 @@ class TestDistributionStrategyValidation(test.TestCase, parameterized.TestCase):
       keras_test_lib.all_strategy_combinations_minus_default())
   def test_model_outside_scope(self, distribution):
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'was not created in the distribution strategy'):
         x = keras.layers.Input(shape=(3,), name='input')
         y = keras.layers.Dense(4, name='dense')(x)
@@ -542,9 +542,9 @@ class TestDistributionStrategyWithStaticShapes(test.TestCase,
           mode=['graph', 'eager']))
   def test_input_batch_size_not_divisible_by_num_replicas(self, distribution):
     with distribution.scope():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, r'The `batch_size` argument \(5\) must be divisible by '
-                      r'the number of replicas \(2\)'):
+          r'the number of replicas \(2\)'):
         keras.layers.Input(shape=(3,), batch_size=5, name='input')
 
   @combinations.generate(
diff --git a/tensorflow/python/keras/engine/base_layer_test.py b/tensorflow/python/keras/engine/base_layer_test.py
index 559e927d603..efb4442c0f1 100644
--- a/tensorflow/python/keras/engine/base_layer_test.py
+++ b/tensorflow/python/keras/engine/base_layer_test.py
@@ -126,8 +126,8 @@ class BaseLayerTest(keras_parameterized.TestCase):
                                                   input_shape=(3,))
       self.assertEqual(model.dynamic, True)
       # But then you cannot run the model since you're in a graph scope.
-      with self.assertRaisesRegexp(
-          ValueError, 'You must enable eager execution'):
+      with self.assertRaisesRegex(ValueError,
+                                  'You must enable eager execution'):
         model.compile(rmsprop.RMSprop(0.001), loss='mse')
 
   def test_manual_compute_output_shape(self):
@@ -244,7 +244,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def test_invalid_forward_pass(self):
     inputs = input_layer.Input((3,))
-    with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
+    with self.assertRaisesRegex(ValueError, 'You did something wrong!'):
       _ = InvalidLayer()(inputs)
 
   def test_no_legacy_model(self):
@@ -259,25 +259,25 @@ class BaseLayerTest(keras_parameterized.TestCase):
     expected_regex = (r'The following are legacy tf\.layers\.Layers:\n  '
                       '{}\n  {}'.format(legacy_dense_0, legacy_dense_1))
 
-    with self.assertRaisesRegexp(TypeError, expected_regex):
+    with self.assertRaisesRegex(TypeError, expected_regex):
       _ = training_lib.Model(inputs=[inputs], outputs=[layer])
 
     model = training_lib.Model(inputs=[inputs], outputs=[inputs])
-    with self.assertRaisesRegexp(TypeError, expected_regex):
+    with self.assertRaisesRegex(TypeError, expected_regex):
       model._insert_layers([legacy_dense_0, legacy_dense_1])
 
   def test_no_legacy_sequential(self):
     layer = [layers.Dense(1), legacy_core.Dense(1, name='legacy_dense_0')]
 
     expected_regex = r'legacy tf\.layers\.Layers:\n  {}'.format(layer[1])
-    with self.assertRaisesRegexp(TypeError, expected_regex):
+    with self.assertRaisesRegex(TypeError, expected_regex):
       _ = sequential.Sequential(layer)
 
-    with self.assertRaisesRegexp(TypeError, expected_regex):
+    with self.assertRaisesRegex(TypeError, expected_regex):
       _ = sequential.Sequential([input_layer.Input(shape=(4,))] + layer)
 
     model = sequential.Sequential()
-    with self.assertRaisesRegexp(TypeError, expected_regex):
+    with self.assertRaisesRegex(TypeError, expected_regex):
       for l in layer:
         model.add(l)
 
@@ -499,11 +499,11 @@ class BaseLayerTest(keras_parameterized.TestCase):
     self.assertEqual(len(weights), 2)
     self.assertAllClose(weights[0], kernel)
     self.assertAllClose(weights[1], bias)
-    with self.assertRaisesRegexp(
-        ValueError, 'but the layer was expecting 2 weights'):
+    with self.assertRaisesRegex(ValueError,
+                                'but the layer was expecting 2 weights'):
       layer.set_weights([1, 2, 3])
-    with self.assertRaisesRegexp(
-        ValueError, 'not compatible with provided weight shape'):
+    with self.assertRaisesRegex(ValueError,
+                                'not compatible with provided weight shape'):
       layer.set_weights([kernel.T, bias])
 
   def test_get_config_error(self):
@@ -516,7 +516,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
 
     # `__init__` includes kwargs but `get_config` is not overridden, so
     # an error should be thrown:
-    with self.assertRaisesRegexp(NotImplementedError, 'Layer MyLayer has'):
+    with self.assertRaisesRegex(NotImplementedError, 'Layer MyLayer has'):
       MyLayer('custom').get_config()
 
     class MyLayerNew(base_layer.Layer):
@@ -550,11 +550,11 @@ class BaseLayerTest(keras_parameterized.TestCase):
     self.assertEqual(dense.count_params(), 16 * 4 + 16)
 
     dense = layers.Dense(16)
-    with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
+    with self.assertRaisesRegex(ValueError, 'call `count_params`'):
       dense.count_params()
 
     model = sequential.Sequential(layers.Dense(16))
-    with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
+    with self.assertRaisesRegex(ValueError, 'call `count_params`'):
       model.count_params()
 
     dense = layers.Dense(16, input_dim=4)
@@ -569,7 +569,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
         pass
 
     layer = CustomLayerNotCallingSuper()
-    with self.assertRaisesRegexp(RuntimeError, 'You must call `super()'):
+    with self.assertRaisesRegex(RuntimeError, 'You must call `super()'):
       layer(np.random.random((10, 2)))
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -594,7 +594,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
     out = self.evaluate(layer(x=x, y=y))
     self.assertAllClose(out, 2 * np.ones((10, 1)))
 
-    with self.assertRaisesRegexp(ValueError, 'must always be passed'):
+    with self.assertRaisesRegex(ValueError, 'must always be passed'):
       layer(y=y)
 
     class TFFunctionLayer(base_layer.Layer):
@@ -775,14 +775,14 @@ class SymbolicSupportTest(keras_parameterized.TestCase):
       x1 = array_ops.ones((3, 3))
     x2 = array_ops.ones((3, 3))
     self.assertIsInstance(x2, ops.EagerTensor)
-    with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
+    with self.assertRaisesRegex(TypeError, 'Graph tensors'):
       math_ops.matmul(x1, x2)
 
   def test_mixing_numpy_arrays_and_graph_tensors(self):
     with ops.Graph().as_default():
       x1 = array_ops.ones((3, 3))
     x2 = np.ones((3, 3), dtype='float32')
-    with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
+    with self.assertRaisesRegex(TypeError, 'Graph tensors'):
       math_ops.matmul(x1, x2)
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -1336,8 +1336,8 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
     if testing_utils.should_run_eagerly():
       model.fit(x, y, epochs=2, batch_size=5)
     else:
-      with self.assertRaisesRegexp(errors_impl.InaccessibleTensorError,
-                                   'ActivityRegularizer'):
+      with self.assertRaisesRegex(errors_impl.InaccessibleTensorError,
+                                  'ActivityRegularizer'):
         model.fit(x, y, epochs=2, batch_size=5)
 
   def test_conditional_activity_regularizer_with_wrappers_in_call(self):
@@ -1368,8 +1368,8 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
     if testing_utils.should_run_eagerly():
       model.fit(x, y, epochs=2, batch_size=5)
     else:
-      with self.assertRaisesRegexp(errors_impl.InaccessibleTensorError,
-                                   'ActivityRegularizer'):
+      with self.assertRaisesRegex(errors_impl.InaccessibleTensorError,
+                                  'ActivityRegularizer'):
         model.fit(x, y, epochs=2, batch_size=5)
 
 
@@ -1525,7 +1525,7 @@ class DTypeTest(keras_parameterized.TestCase):
     layer = IdentityLayer()
     with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
       layer(self._const('float64'))
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_warn.call_args),
           ".*from dtype float64 to the layer's dtype of float32.*"
           "The layer has dtype float32 because.*")
@@ -1539,7 +1539,7 @@ class DTypeTest(keras_parameterized.TestCase):
     layer = IdentityLayer()
     with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
       layer(self._const('float64'))
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_warn.call_args),
           ".*from dtype float64 to the layer's dtype of float32.*"
           "The layer has dtype float32 because.*")
diff --git a/tensorflow/python/keras/engine/base_layer_utils_test.py b/tensorflow/python/keras/engine/base_layer_utils_test.py
index c039befda7f..72a4977f003 100644
--- a/tensorflow/python/keras/engine/base_layer_utils_test.py
+++ b/tensorflow/python/keras/engine/base_layer_utils_test.py
@@ -90,13 +90,13 @@ class OpLayerTest(keras_parameterized.TestCase):
     self.assertAllClose(expected, output)
 
   def test_ragged_op_layer(self):
-    with self.assertRaisesRegexp(ValueError, 'Keras automatic op wrapping'):
+    with self.assertRaisesRegex(ValueError, 'Keras automatic op wrapping'):
       int_values = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)
       float_values = math_ops.cast(int_values, dtypes.float32)
       _ = keras.Model(int_values, float_values)
 
   def test_sparse_op_layer(self):
-    with self.assertRaisesRegexp(ValueError, 'Keras automatic op wrapping'):
+    with self.assertRaisesRegex(ValueError, 'Keras automatic op wrapping'):
       int_values = keras.Input(shape=(None,), dtype=dtypes.int32, sparse=True)
       float_values = math_ops.cast(int_values, dtypes.float32)
       _ = keras.Model(int_values, float_values)
diff --git a/tensorflow/python/keras/engine/data_adapter_test.py b/tensorflow/python/keras/engine/data_adapter_test.py
index be9c6d79193..fad193009cf 100644
--- a/tensorflow/python/keras/engine/data_adapter_test.py
+++ b/tensorflow/python/keras/engine/data_adapter_test.py
@@ -647,12 +647,12 @@ class DatasetAdapterTest(DataAdapterTestBase):
     self.assertIsNone(adapter.partial_batch_size())
 
   def test_invalid_targets_argument(self):
-    with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
+    with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'):
       self.adapter_cls(self.dataset_input, y=self.dataset_input)
 
   def test_invalid_sample_weights_argument(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'`sample_weight` argument is not supported'):
+    with self.assertRaisesRegex(ValueError,
+                                r'`sample_weight` argument is not supported'):
       self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input)
 
 
@@ -703,12 +703,12 @@ class GeneratorDataAdapterTest(DataAdapterTestBase):
     self.assertIsNone(adapter.partial_batch_size())
 
   def test_invalid_targets_argument(self):
-    with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
+    with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'):
       self.adapter_cls(self.generator_input, y=self.generator_input)
 
   def test_invalid_sample_weights_argument(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'`sample_weight` argument is not supported'):
+    with self.assertRaisesRegex(ValueError,
+                                r'`sample_weight` argument is not supported'):
       self.adapter_cls(
           self.generator_input, sample_weights=self.generator_input)
 
@@ -770,12 +770,12 @@ class KerasSequenceAdapterTest(DataAdapterTestBase):
     self.assertIsNone(adapter.partial_batch_size())
 
   def test_invalid_targets_argument(self):
-    with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
+    with self.assertRaisesRegex(ValueError, r'`y` argument is not supported'):
       self.adapter_cls(self.sequence_input, y=self.sequence_input)
 
   def test_invalid_sample_weights_argument(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'`sample_weight` argument is not supported'):
+    with self.assertRaisesRegex(ValueError,
+                                r'`sample_weight` argument is not supported'):
       self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input)
 
 
@@ -958,7 +958,7 @@ class DataHandlerTest(keras_parameterized.TestCase):
                                       ([2],)], [([0],), ([1],), ([2],)]])
 
   def test_class_weight_user_errors(self):
-    with self.assertRaisesRegexp(ValueError, 'to be a dict with keys'):
+    with self.assertRaisesRegex(ValueError, 'to be a dict with keys'):
       data_adapter.DataHandler(
           x=[[0], [1], [2]],
           y=[[2], [1], [0]],
@@ -970,7 +970,7 @@ class DataHandlerTest(keras_parameterized.TestCase):
               3: 1.5  # Skips class `2`.
           })
 
-    with self.assertRaisesRegexp(ValueError, 'with a single output'):
+    with self.assertRaisesRegex(ValueError, 'with a single output'):
       data_adapter.DataHandler(
           x=np.ones((10, 1)),
           y=[np.ones((10, 1)), np.zeros((10, 1))],
@@ -1031,13 +1031,12 @@ class TestValidationSplit(keras_parameterized.TestCase):
     self.assertEqual(val_sw.numpy().tolist(), [16])
 
   def test_validation_split_user_error(self):
-    with self.assertRaisesRegexp(ValueError, 'is only supported for Tensors'):
+    with self.assertRaisesRegex(ValueError, 'is only supported for Tensors'):
       data_adapter.train_validation_split(
           lambda: np.ones((10, 1)), validation_split=0.2)
 
   def test_validation_split_examples_too_few(self):
-    with self.assertRaisesRegexp(
-        ValueError, 'not sufficient to split it'):
+    with self.assertRaisesRegex(ValueError, 'not sufficient to split it'):
       data_adapter.train_validation_split(
           np.ones((1, 10)), validation_split=0.2)
 
diff --git a/tensorflow/python/keras/engine/functional_test.py b/tensorflow/python/keras/engine/functional_test.py
index b8768a5e311..db7b3d696ab 100644
--- a/tensorflow/python/keras/engine/functional_test.py
+++ b/tensorflow/python/keras/engine/functional_test.py
@@ -131,26 +131,26 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
     self.assertEqual(network.get_layer(index=1), dense_a)
 
     # test invalid get_layer by index
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Was asked to retrieve layer at index ' + str(3) +
         ' but model only has ' + str(len(network.layers)) + ' layers.'):
       network.get_layer(index=3)
 
     # test that only one between name and index is requested
-    with self.assertRaisesRegexp(ValueError,
-                                 'Provide only a layer name or a layer index'):
+    with self.assertRaisesRegex(ValueError,
+                                'Provide only a layer name or a layer index'):
       network.get_layer(index=1, name='dense_b')
 
     # test that a name or an index must be provided
-    with self.assertRaisesRegexp(ValueError,
-                                 'Provide either a layer name or layer index.'):
+    with self.assertRaisesRegex(ValueError,
+                                'Provide either a layer name or layer index.'):
       network.get_layer()
 
     # test various get_layer by name
     self.assertEqual(network.get_layer(name='dense_a'), dense_a)
 
     # test invalid get_layer by name
-    with self.assertRaisesRegexp(ValueError, 'No such layer: dense_c.'):
+    with self.assertRaisesRegex(ValueError, 'No such layer: dense_c.'):
       network.get_layer(name='dense_c')
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -1036,7 +1036,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
         batch_size=2)
     # Check that input was correctly doubled.
     self.assertEqual(history.history['loss'][0], 0.0)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, 'Layer double was passed non-JSON-serializable arguments.'):
       model.get_config()
 
@@ -1274,7 +1274,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
       def __init__(self):
         self._foo = [layers.Dense(10), layers.Dense(10)]
 
-    with self.assertRaisesRegexp(RuntimeError, 'forgot to call'):
+    with self.assertRaisesRegex(RuntimeError, 'forgot to call'):
       MyNetwork()
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -1291,12 +1291,12 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
     inputs = input_layer_lib.Input(shape=(32,))
     outputs = layers.Dense(4)(inputs)
 
-    with self.assertRaisesRegexp(TypeError,
-                                 'got an unexpected keyword argument'):
+    with self.assertRaisesRegex(TypeError,
+                                'got an unexpected keyword argument'):
       model = training_lib.Model(
           inputs, outputs, name='m', trainable=False, dtype='int64')
-    with self.assertRaisesRegexp(TypeError,
-                                 'got an unexpected keyword argument'):
+    with self.assertRaisesRegex(TypeError,
+                                'got an unexpected keyword argument'):
       model = training_lib.Model(
           inputs, outputs, name='m', trainable=False, dynamic=False)
 
@@ -1931,7 +1931,7 @@ class WeightAccessTest(keras_parameterized.TestCase):
     x3 = layers.Dense(1)
     model = sequential.Sequential([x1, x2, x3])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Weights for model .* have not yet been created'):
       _ = model.weights
 
@@ -1947,7 +1947,7 @@ class WeightAccessTest(keras_parameterized.TestCase):
 
     model = SubclassModel()
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Weights for model .* have not yet been created'):
       _ = model.weights
 
diff --git a/tensorflow/python/keras/engine/input_spec_test.py b/tensorflow/python/keras/engine/input_spec_test.py
index f788fcdf664..a87af5e56a6 100644
--- a/tensorflow/python/keras/engine/input_spec_test.py
+++ b/tensorflow/python/keras/engine/input_spec_test.py
@@ -26,9 +26,9 @@ class InputSpecTest(test.TestCase):
 
   def test_axes_initialization(self):
     input_spec.InputSpec(shape=[1, None, 2, 3], axes={3: 5, '2': 2})
-    with self.assertRaisesRegexp(ValueError, 'Axis 4 is greater than'):
+    with self.assertRaisesRegex(ValueError, 'Axis 4 is greater than'):
       input_spec.InputSpec(shape=[1, None, 2, 3], axes={4: 5})
-    with self.assertRaisesRegexp(TypeError, 'keys in axes must be integers'):
+    with self.assertRaisesRegex(TypeError, 'keys in axes must be integers'):
       input_spec.InputSpec(shape=[1, None, 2, 3], axes={'string': 5})
 
 
@@ -54,11 +54,11 @@ class InputSpecToTensorShapeTest(test.TestCase):
 
   def test_undefined_shapes(self):
     spec = input_spec.InputSpec(max_ndim=5)
-    with self.assertRaisesRegexp(ValueError, 'unknown TensorShape'):
+    with self.assertRaisesRegex(ValueError, 'unknown TensorShape'):
       input_spec.to_tensor_shape(spec).as_list()
 
     spec = input_spec.InputSpec(min_ndim=5, max_ndim=5)
-    with self.assertRaisesRegexp(ValueError, 'unknown TensorShape'):
+    with self.assertRaisesRegex(ValueError, 'unknown TensorShape'):
       input_spec.to_tensor_shape(spec).as_list()
 
 
diff --git a/tensorflow/python/keras/engine/sequential_test.py b/tensorflow/python/keras/engine/sequential_test.py
index 773ce003656..1c8510ff3c9 100644
--- a/tensorflow/python/keras/engine/sequential_test.py
+++ b/tensorflow/python/keras/engine/sequential_test.py
@@ -118,7 +118,7 @@ class TestSequential(keras_parameterized.TestCase):
         metrics=[keras.metrics.CategoricalAccuracy()],
         run_eagerly=testing_utils.should_run_eagerly())
     self.assertEqual(len(model.layers), 2)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Weights for model .* have not yet been created'):
       len(model.weights)
     self.assertFalse(model.built)
@@ -144,7 +144,7 @@ class TestSequential(keras_parameterized.TestCase):
         metrics=[keras.metrics.CategoricalAccuracy()],
         run_eagerly=testing_utils.should_run_eagerly())
     self.assertEqual(len(model.layers), 2)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Weights for model .* have not yet been created'):
       len(model.weights)
     self.assertFalse(model.built)
@@ -356,8 +356,8 @@ class TestSequential(keras_parameterized.TestCase):
     model = keras.models.Sequential()
     model.add(keras.layers.Dense(1))
     if context.executing_eagerly():
-      with self.assertRaisesRegexp(ValueError,
-                                   'expected min_ndim=2, found ndim=0'):
+      with self.assertRaisesRegex(ValueError,
+                                  'expected min_ndim=2, found ndim=0'):
         model(1.0)
 
   @keras_parameterized.run_all_keras_modes
@@ -378,19 +378,19 @@ class TestSequential(keras_parameterized.TestCase):
       def call(self, inputs):
         return inputs, inputs
 
-    with self.assertRaisesRegexp(
-        ValueError, 'should have a single output tensor'):
+    with self.assertRaisesRegex(ValueError,
+                                'should have a single output tensor'):
       keras.Sequential([MultiOutputLayer(input_shape=(3,))])
 
-    with self.assertRaisesRegexp(
-        ValueError, 'should have a single output tensor'):
+    with self.assertRaisesRegex(ValueError,
+                                'should have a single output tensor'):
       keras.Sequential([
           keras.layers.Dense(1, input_shape=(3,)),
           MultiOutputLayer()])
 
     # Should also raise error in a deferred build mode
-    with self.assertRaisesRegexp(
-        ValueError, 'should have a single output tensor'):
+    with self.assertRaisesRegex(ValueError,
+                                'should have a single output tensor'):
       keras.Sequential([MultiOutputLayer()])(np.zeros((10, 10)))
 
   @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@@ -442,7 +442,7 @@ class TestSequential(keras_parameterized.TestCase):
   def test_name_unicity(self):
     model = keras.Sequential()
     model.add(keras.layers.Dense(3, name='specific_name'))
-    with self.assertRaisesRegexp(ValueError, 'should have unique names'):
+    with self.assertRaisesRegex(ValueError, 'should have unique names'):
       model.add(keras.layers.Dense(3, name='specific_name'))
 
 
diff --git a/tensorflow/python/keras/engine/training_dataset_test.py b/tensorflow/python/keras/engine/training_dataset_test.py
index 0d47dcb0443..92c199ef1f3 100644
--- a/tensorflow/python/keras/engine/training_dataset_test.py
+++ b/tensorflow/python/keras/engine/training_dataset_test.py
@@ -112,7 +112,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
 
     # Test with sample weight.
     sample_weight = np.random.random((10,))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'`sample_weight` argument is not supported .+dataset'):
       model.fit(
           dataset,
@@ -121,7 +121,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
           verbose=0,
           sample_weight=sample_weight)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, '(you should not specify a target)|'
         '(`y` argument is not supported when using dataset as input.)'):
       model.fit(dataset, dataset,
@@ -314,7 +314,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
       dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
       dataset = dataset.repeat(100)
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
       ):
@@ -327,8 +327,8 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
       dataset = dataset.repeat(100)
       dataset = dataset.batch(10)
 
-      with self.assertRaisesRegexp(ValueError,
-                                   r'expected (.*?) to have shape \(3,\)'):
+      with self.assertRaisesRegex(ValueError,
+                                  r'expected (.*?) to have shape \(3,\)'):
         model.train_on_batch(dataset)
 
   @keras_parameterized.run_with_all_model_types
diff --git a/tensorflow/python/keras/engine/training_generator_test.py b/tensorflow/python/keras/engine/training_generator_test.py
index 0844523f81b..3837763c494 100644
--- a/tensorflow/python/keras/engine/training_generator_test.py
+++ b/tensorflow/python/keras/engine/training_generator_test.py
@@ -446,11 +446,11 @@ class TestGeneratorMethodsWithSequences(keras_parameterized.TestCase):
     model.evaluate(CustomSequence())
     model.predict(CustomSequence())
 
-    with self.assertRaisesRegexp(ValueError, '`y` argument is not supported'):
+    with self.assertRaisesRegex(ValueError, '`y` argument is not supported'):
       model.fit(CustomSequence(), y=np.ones([10, 1]))
 
-    with self.assertRaisesRegexp(ValueError,
-                                 '`sample_weight` argument is not supported'):
+    with self.assertRaisesRegex(ValueError,
+                                '`sample_weight` argument is not supported'):
       model.fit(CustomSequence(), sample_weight=np.ones([10, 1]))
 
     model.compile(rmsprop.RMSprop(0.001), 'binary_crossentropy')
diff --git a/tensorflow/python/keras/engine/training_test.py b/tensorflow/python/keras/engine/training_test.py
index 8cb3f99ddb0..2885422ac42 100644
--- a/tensorflow/python/keras/engine/training_test.py
+++ b/tensorflow/python/keras/engine/training_test.py
@@ -93,8 +93,8 @@ class TrainingTest(keras_parameterized.TestCase):
   def test_fit_on_empty(self):
     model = sequential.Sequential([layers_module.Dense(1)])
     model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
-    with self.assertRaisesRegexp(
-        ValueError, 'Expect x to be a non-empty array or dataset.'):
+    with self.assertRaisesRegex(ValueError,
+                                'Expect x to be a non-empty array or dataset.'):
       model.fit(x=np.array([]), y=np.array([]))
 
   @keras_parameterized.run_all_keras_modes
@@ -123,7 +123,7 @@ class TrainingTest(keras_parameterized.TestCase):
       getattr(model, method_name)(1)
 
     error_msg = 'inside a `tf.function`'
-    with self.assertRaisesRegexp(RuntimeError, error_msg):
+    with self.assertRaisesRegex(RuntimeError, error_msg):
       my_fn()
 
   @keras_parameterized.run_all_keras_modes
@@ -1084,8 +1084,8 @@ class TrainingTest(keras_parameterized.TestCase):
     outputs = layers_module.Dense(1, activation='sigmoid')(inputs)
     model = training_module.Model(inputs, outputs)
     model.compile(optimizer_v2.adam.Adam(0.001), 'binary_crossentropy')
-    with self.assertRaisesRegexp(ValueError,
-                                 'incompatible with the specified batch size'):
+    with self.assertRaisesRegex(ValueError,
+                                'incompatible with the specified batch size'):
       model.fit(x, y, batch_size=4)
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -1099,8 +1099,8 @@ class TrainingTest(keras_parameterized.TestCase):
     input1 = input_layer.Input(batch_size=2, shape=(10,))
     input2 = input_layer.Input(batch_size=3, shape=(10,))
     outputs = MyLayer()([input1, input2])
-    with self.assertRaisesRegexp(ValueError,
-                                 'specified batch sizes of the Input Layers'):
+    with self.assertRaisesRegex(ValueError,
+                                'specified batch sizes of the Input Layers'):
       training_module.Model([input1, input2], outputs)
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -1226,7 +1226,7 @@ class TrainingTest(keras_parameterized.TestCase):
         'mse',
         run_eagerly=testing_utils.should_run_eagerly())
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, '`validation_steps` should not be specified if '
         '`validation_data` is None.'):
       model.fit(x, y, epochs=4, validation_data=None, validation_steps=3)
@@ -1648,10 +1648,8 @@ class TestExceptionsAndWarnings(keras_parameterized.TestCase):
   def test_sparse_op_with_op_layer(self):
     inputs = layers_module.Input(shape=(2,), sparse=True, name='sparse_tensor')
     output = sparse_ops.sparse_minimum(inputs, inputs)
-    with self.assertRaisesRegexp(
-        ValueError,
-        'not supported by Keras automatic op wrapping'
-    ):
+    with self.assertRaisesRegex(ValueError,
+                                'not supported by Keras automatic op wrapping'):
       training_module.Model([inputs], output)
 
   @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@@ -1661,10 +1659,8 @@ class TestExceptionsAndWarnings(keras_parameterized.TestCase):
     model = training_module.Model(inputs=inputs, outputs=outputs)
     model.compile(loss='mse')
 
-    with self.assertRaisesRegexp(
-        ValueError,
-        'Expect x to be a non-empty array or dataset.'
-    ):
+    with self.assertRaisesRegex(ValueError,
+                                'Expect x to be a non-empty array or dataset.'):
       model.predict(np.array([]))
 
   @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@@ -1676,15 +1672,15 @@ class TestExceptionsAndWarnings(keras_parameterized.TestCase):
     model = training_module.Model([input_node1, input_node2], output_node)
     model.compile(loss='mse')
 
-    with self.assertRaisesRegexp(ValueError, 'Data cardinality is ambiguous'):
+    with self.assertRaisesRegex(ValueError, 'Data cardinality is ambiguous'):
       model.train_on_batch([np.ones((10, 5)), np.ones((10, 5))],
                            np.ones((11, 4)))
 
-    with self.assertRaisesRegexp(ValueError, 'Data cardinality is ambiguous'):
+    with self.assertRaisesRegex(ValueError, 'Data cardinality is ambiguous'):
       model.test_on_batch([np.ones((10, 5)), np.ones((10, 5))],
                           np.ones((11, 4)))
 
-    with self.assertRaisesRegexp(ValueError, 'Data cardinality is ambiguous'):
+    with self.assertRaisesRegex(ValueError, 'Data cardinality is ambiguous'):
       model.predict_on_batch([np.ones((10, 5)), np.ones((11, 5))])
 
 
@@ -3210,7 +3206,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
 
     x = np.ones(shape=(10, 1))
     y = np.ones(shape=(10, 2))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'Please provide different names for the metrics you have added. '
         'We found 2 metrics with the name: "metric_1"'):
@@ -3366,13 +3362,13 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
     x = layers_module.Input(shape=(1,))
     y = layers_module.Dense(1, kernel_initializer='ones')(x)
     model = training_module.Model(x, y)
-    with self.assertRaisesRegexp(ValueError,
-                                 'only `mean` sample-wise metric aggregation'):
+    with self.assertRaisesRegex(ValueError,
+                                'only `mean` sample-wise metric aggregation'):
       model.add_metric(
           math_ops.reduce_sum(y), name='metric_1', aggregation='sum')
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'only `mean` sample-wise metric aggregation'):
+    with self.assertRaisesRegex(ValueError,
+                                'only `mean` sample-wise metric aggregation'):
       model.add_metric(
           math_ops.reduce_sum(y), name='metric_1', aggregation=None)
 
diff --git a/tensorflow/python/keras/engine/training_utils_test.py b/tensorflow/python/keras/engine/training_utils_test.py
index 1a6917e2e21..bc2c4c91268 100644
--- a/tensorflow/python/keras/engine/training_utils_test.py
+++ b/tensorflow/python/keras/engine/training_utils_test.py
@@ -203,9 +203,8 @@ class DatasetUtilsTest(test.TestCase, parameterized.TestCase):
 
     with test.mock.patch.object(logging, 'warning') as mock_log:
       training_utils.verify_dataset_shuffled(dataset)
-      self.assertRegexpMatches(
-          str(mock_log.call_args),
-          'input dataset `x` is not shuffled.')
+      self.assertRegex(
+          str(mock_log.call_args), 'input dataset `x` is not shuffled.')
 
     shuffled_dataset = dataset.shuffle(10)
     training_utils.verify_dataset_shuffled(shuffled_dataset)
@@ -398,14 +397,14 @@ class AggregationTest(keras_parameterized.TestCase):
     training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
     training_utils.SliceAggregator._MAX_COPY_SECONDS = 0.1
     training_utils._COPY_POOL._func_wrapper = add_sleep
-    with self.assertRaisesRegexp(ValueError, 'Timed out waiting for copy'):
+    with self.assertRaisesRegex(ValueError, 'Timed out waiting for copy'):
       self._run_without_steps()
 
   def test_async_copy_reraise(self):
     training_utils.SliceAggregator._BINARY_SIZE_THRESHOLD = 15
     training_utils.SliceAggregator._MAX_COPY_SECONDS = 1.
     training_utils._COPY_POOL._func_wrapper = cause_error
-    with self.assertRaisesRegexp(TypeError, 'NoneType'):
+    with self.assertRaisesRegex(TypeError, 'NoneType'):
       self._run_without_steps()
 
 
diff --git a/tensorflow/python/keras/feature_column/dense_features_test.py b/tensorflow/python/keras/feature_column/dense_features_test.py
index 76b91dd605f..ef132b67707 100644
--- a/tensorflow/python/keras/feature_column/dense_features_test.py
+++ b/tensorflow/python/keras/feature_column/dense_features_test.py
@@ -201,12 +201,12 @@ class DenseFeaturesTest(test.TestCase):
       self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
 
   def test_raises_if_empty_feature_columns(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'feature_columns must not be empty'):
+    with self.assertRaisesRegex(ValueError,
+                                'feature_columns must not be empty'):
       df.DenseFeatures(feature_columns=[])(features={})
 
   def test_should_be_dense_column(self):
-    with self.assertRaisesRegexp(ValueError, 'must be a .*DenseColumn'):
+    with self.assertRaisesRegex(ValueError, 'must be a .*DenseColumn'):
       df.DenseFeatures(feature_columns=[
           fc.categorical_column_with_hash_bucket('wire_cast', 4)
       ])(
@@ -215,7 +215,7 @@ class DenseFeaturesTest(test.TestCase):
           })
 
   def test_does_not_support_dict_columns(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Expected feature_columns to be iterable, found dict.'):
       df.DenseFeatures(feature_columns={'a': fc.numeric_column('a')})(
           features={
@@ -244,7 +244,7 @@ class DenseFeaturesTest(test.TestCase):
       self.assertAllClose([[0., 1.]], self.evaluate(net))
 
   def test_raises_if_duplicate_name(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Duplicate feature column name found for columns'):
       df.DenseFeatures(
           feature_columns=[fc.numeric_column('a'),
@@ -297,7 +297,7 @@ class DenseFeaturesTest(test.TestCase):
     price = fc.numeric_column('price', shape=2)
     with ops.Graph().as_default():
       features = {'price': [[1.], [5.]]}
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
         df.DenseFeatures([price])(features)
@@ -367,7 +367,7 @@ class DenseFeaturesTest(test.TestCase):
               sparse_tensor.SparseTensor(
                   indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
       }
-      with self.assertRaisesRegexp(Exception, 'must be a .*DenseColumn'):
+      with self.assertRaisesRegex(Exception, 'must be a .*DenseColumn'):
         df.DenseFeatures([animal])(features)
 
   def test_static_batch_size_mismatch(self):
@@ -378,7 +378,7 @@ class DenseFeaturesTest(test.TestCase):
           'price1': [[1.], [5.], [7.]],  # batchsize = 3
           'price2': [[3.], [4.]]  # batchsize = 2
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         df.DenseFeatures([price1, price2])(features)
@@ -393,7 +393,7 @@ class DenseFeaturesTest(test.TestCase):
           'price2': [[3.], [4.]],  # batchsize = 2
           'price3': [[3.], [4.], [5.]]  # batchsize = 3
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         df.DenseFeatures([price1, price2, price3])(features)
@@ -408,8 +408,8 @@ class DenseFeaturesTest(test.TestCase):
       }
       net = df.DenseFeatures([price1, price2])(features)
       with _initialized_session() as sess:
-        with self.assertRaisesRegexp(errors.OpError,
-                                     'Dimensions of inputs should match'):
+        with self.assertRaisesRegex(errors.OpError,
+                                    'Dimensions of inputs should match'):
           sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
 
   def test_runtime_batch_size_matches(self):
@@ -665,7 +665,7 @@ class DenseFeaturesTest(test.TestCase):
     self.assertEqual(0, features['price'].shape.ndims)
 
     # Static rank 0 should fail
-    with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+    with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
       df.DenseFeatures([price])(features)
 
     # Dynamic rank 0 should fail
@@ -1098,7 +1098,7 @@ class SequenceFeatureColumnsTest(test.TestCase):
         categorical_column_a, dimension=2)
 
     input_layer = df.DenseFeatures([embedding_column_a])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'In embedding_column: aaa_embedding\. categorical_column must not be '
         r'of type SequenceCategoricalColumn\.'):
@@ -1119,7 +1119,7 @@ class SequenceFeatureColumnsTest(test.TestCase):
     indicator_column_a = fc.indicator_column(categorical_column_a)
 
     input_layer = df.DenseFeatures([indicator_column_a])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'In indicator_column: aaa_indicator\. categorical_column must not be '
         r'of type SequenceCategoricalColumn\.'):
diff --git a/tensorflow/python/keras/feature_column/dense_features_v2_test.py b/tensorflow/python/keras/feature_column/dense_features_v2_test.py
index 95fc8b7ac1e..384d6424f47 100644
--- a/tensorflow/python/keras/feature_column/dense_features_v2_test.py
+++ b/tensorflow/python/keras/feature_column/dense_features_v2_test.py
@@ -182,12 +182,12 @@ class DenseFeaturesTest(test.TestCase):
                           self.evaluate(predict_mode))
 
   def test_raises_if_empty_feature_columns(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'feature_columns must not be empty'):
+    with self.assertRaisesRegex(ValueError,
+                                'feature_columns must not be empty'):
       df.DenseFeatures(feature_columns=[])(features={})
 
   def test_should_be_dense_column(self):
-    with self.assertRaisesRegexp(ValueError, 'must be a .*DenseColumn'):
+    with self.assertRaisesRegex(ValueError, 'must be a .*DenseColumn'):
       df.DenseFeatures(feature_columns=[
           fc.categorical_column_with_hash_bucket('wire_cast', 4)
       ])(
@@ -196,7 +196,7 @@ class DenseFeaturesTest(test.TestCase):
           })
 
   def test_does_not_support_dict_columns(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Expected feature_columns to be iterable, found dict.'):
       df.DenseFeatures(feature_columns={'a': fc.numeric_column('a')})(
           features={
@@ -225,7 +225,7 @@ class DenseFeaturesTest(test.TestCase):
       self.assertAllClose([[0., 1.]], self.evaluate(net))
 
   def test_raises_if_duplicate_name(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Duplicate feature column name found for columns'):
       df.DenseFeatures(
           feature_columns=[fc.numeric_column('a'),
@@ -278,7 +278,7 @@ class DenseFeaturesTest(test.TestCase):
     price = fc.numeric_column('price', shape=2)
     with ops.Graph().as_default():
       features = {'price': [[1.], [5.]]}
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           Exception,
           r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
         df.DenseFeatures([price])(features)
@@ -348,7 +348,7 @@ class DenseFeaturesTest(test.TestCase):
               sparse_tensor.SparseTensor(
                   indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
       }
-      with self.assertRaisesRegexp(Exception, 'must be a .*DenseColumn'):
+      with self.assertRaisesRegex(Exception, 'must be a .*DenseColumn'):
         df.DenseFeatures([animal])(features)
 
   def test_static_batch_size_mismatch(self):
@@ -359,7 +359,7 @@ class DenseFeaturesTest(test.TestCase):
           'price1': [[1.], [5.], [7.]],  # batchsize = 3
           'price2': [[3.], [4.]]  # batchsize = 2
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         df.DenseFeatures([price1, price2])(features)
@@ -374,7 +374,7 @@ class DenseFeaturesTest(test.TestCase):
           'price2': [[3.], [4.]],  # batchsize = 2
           'price3': [[3.], [4.], [5.]]  # batchsize = 3
       }
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r'Batch size \(first dimension\) of each feature must be same.'):  # pylint: disable=anomalous-backslash-in-string
         df.DenseFeatures([price1, price2, price3])(features)
@@ -389,8 +389,8 @@ class DenseFeaturesTest(test.TestCase):
       }
       net = df.DenseFeatures([price1, price2])(features)
       with _initialized_session() as sess:
-        with self.assertRaisesRegexp(errors.OpError,
-                                     'Dimensions of inputs should match'):
+        with self.assertRaisesRegex(errors.OpError,
+                                    'Dimensions of inputs should match'):
           sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
 
   def test_runtime_batch_size_matches(self):
@@ -646,7 +646,7 @@ class DenseFeaturesTest(test.TestCase):
     self.assertEqual(0, features['price'].shape.ndims)
 
     # Static rank 0 should fail
-    with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
+    with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
       df.DenseFeatures([price])(features)
 
     # Dynamic rank 0 should fail
diff --git a/tensorflow/python/keras/feature_column/sequence_feature_column_test.py b/tensorflow/python/keras/feature_column/sequence_feature_column_test.py
index f6e24a586f2..8374aa6f671 100644
--- a/tensorflow/python/keras/feature_column/sequence_feature_column_test.py
+++ b/tensorflow/python/keras/feature_column/sequence_feature_column_test.py
@@ -167,7 +167,7 @@ class SequenceFeaturesTest(test.TestCase, parameterized.TestCase):
     embedding_column_a = fc.embedding_column(
         categorical_column_a, dimension=2)
     sequence_input_layer = ksfc.SequenceFeatures([embedding_column_a])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'In embedding_column: aaa_embedding\. categorical_column must be of '
         r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
@@ -266,7 +266,7 @@ class SequenceFeaturesTest(test.TestCase, parameterized.TestCase):
         [categorical_column_a, categorical_column_b], dimension=2)
 
     sequence_input_layer = ksfc.SequenceFeatures(shared_embedding_columns)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'In embedding_column: aaa_shared_embedding\. categorical_column must '
         r'be of type SequenceCategoricalColumn to use SequenceFeatures\.'):
@@ -357,7 +357,7 @@ class SequenceFeaturesTest(test.TestCase, parameterized.TestCase):
     indicator_column_a = fc.indicator_column(categorical_column_a)
 
     sequence_input_layer = ksfc.SequenceFeatures([indicator_column_a])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'In indicator_column: aaa_indicator\. categorical_column must be of '
         r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
@@ -464,8 +464,8 @@ class SequenceFeaturesTest(test.TestCase, parameterized.TestCase):
     sequence_input_layer = ksfc.SequenceFeatures(
         [numeric_column_a, numeric_column_b])
 
-    with self.assertRaisesRegexp(
-        errors.InvalidArgumentError, r'Condition x == y did not hold.*'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'Condition x == y did not hold.*'):
       _, sequence_length = sequence_input_layer({
           'aaa': sparse_input_a,
           'bbb': sparse_input_b
diff --git a/tensorflow/python/keras/integration_test/function_test.py b/tensorflow/python/keras/integration_test/function_test.py
index 9f37bd25a61..7eed6856752 100644
--- a/tensorflow/python/keras/integration_test/function_test.py
+++ b/tensorflow/python/keras/integration_test/function_test.py
@@ -80,8 +80,8 @@ class FunctionTest(tf.test.TestCase):
     # matmul to fail, due to incompatible dims.  What would have been a graph
     # build time error (layer would complain about the inner dim being 4).
     with self.captureWritesToStream(sys.stderr) as printed:
-      with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
-                                   r'Matrix size-incompatible'):
+      with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
+                                  r'Matrix size-incompatible'):
         fn(tf.ones((3, 4)))
 
   def testDefunKerasModelCall(self):
diff --git a/tensorflow/python/keras/layers/advanced_activations_test.py b/tensorflow/python/keras/layers/advanced_activations_test.py
index 2145dab7f6d..80eae8e72de 100644
--- a/tensorflow/python/keras/layers/advanced_activations_test.py
+++ b/tensorflow/python/keras/layers/advanced_activations_test.py
@@ -76,12 +76,12 @@ class AdvancedActivationsTest(keras_parameterized.TestCase):
       self.assertTrue('Relu6' in keras.layers.ReLU(max_value=6)(x).name)
 
   def test_relu_with_invalid_arg(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'max_value of Relu layer cannot be negative value: -10'):
       testing_utils.layer_test(keras.layers.ReLU,
                                kwargs={'max_value': -10},
                                input_shape=(2, 3, 4))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'negative_slope of Relu layer cannot be negative value: -2'):
       with self.cached_session():
diff --git a/tensorflow/python/keras/layers/convolutional_test.py b/tensorflow/python/keras/layers/convolutional_test.py
index 2c6f6def8ab..1661f843dc9 100644
--- a/tensorflow/python/keras/layers/convolutional_test.py
+++ b/tensorflow/python/keras/layers/convolutional_test.py
@@ -423,9 +423,9 @@ class GroupedConvTest(keras_parameterized.TestCase):
       ('Conv3D', keras.layers.Conv3D),
   )
   def test_group_conv_incorrect_use(self, layer):
-    with self.assertRaisesRegexp(ValueError, 'The number of filters'):
+    with self.assertRaisesRegex(ValueError, 'The number of filters'):
       layer(16, 3, groups=3)
-    with self.assertRaisesRegexp(ValueError, 'The number of input channels'):
+    with self.assertRaisesRegex(ValueError, 'The number of input channels'):
       layer(16, 3, groups=4).build((32, 12, 12, 3))
 
   @parameterized.named_parameters(
diff --git a/tensorflow/python/keras/layers/core_test.py b/tensorflow/python/keras/layers/core_test.py
index 15cd8157c0c..f6509814249 100644
--- a/tensorflow/python/keras/layers/core_test.py
+++ b/tensorflow/python/keras/layers/core_test.py
@@ -324,7 +324,7 @@ class TestStatefulLambda(keras_parameterized.TestCase):
     (    )?  <tf.Variable \'.*shift_and_scale/shift:0\'.+
     (    )?The layer cannot safely ensure proper Variable reuse.+''')
 
-    with self.assertRaisesRegexp(ValueError, expected_error):
+    with self.assertRaisesRegex(ValueError, expected_error):
       layer = keras.layers.Lambda(lambda_fn, name='shift_and_scale')
       model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
       model(array_ops.ones((4, 1)))
@@ -342,7 +342,7 @@ class TestStatefulLambda(keras_parameterized.TestCase):
     (    )?  <tf.Variable \'.*bias_dense/dense/kernel:0\'.+
     (    )?The layer cannot safely ensure proper Variable reuse.+''')
 
-    with self.assertRaisesRegexp(ValueError, expected_error):
+    with self.assertRaisesRegex(ValueError, expected_error):
       layer = keras.layers.Lambda(bad_lambda_fn, name='bias_dense')
       model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
       model(array_ops.ones((4, 1)))
@@ -365,7 +365,7 @@ class TestStatefulLambda(keras_parameterized.TestCase):
       raise ValueError(msg)
     layer._warn = patched_warn
 
-    with self.assertRaisesRegexp(ValueError, expected_warning):
+    with self.assertRaisesRegex(ValueError, expected_warning):
       model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
       model(array_ops.ones((4, 1)))
 
@@ -448,13 +448,13 @@ class CoreLayersTest(keras_parameterized.TestCase):
         keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
 
   def test_permute_errors_on_invalid_starting_dims_index(self):
-    with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
+    with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
       testing_utils.layer_test(
           keras.layers.Permute,
           kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))
 
   def test_permute_errors_on_invalid_set_of_dims_indices(self):
-    with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
+    with self.assertRaisesRegex(ValueError, r'Invalid permutation .*dims.*'):
       testing_utils.layer_test(
           keras.layers.Permute,
           kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))
diff --git a/tensorflow/python/keras/layers/dense_attention_test.py b/tensorflow/python/keras/layers/dense_attention_test.py
index 750ec0d08d1..504c4ab6984 100644
--- a/tensorflow/python/keras/layers/dense_attention_test.py
+++ b/tensorflow/python/keras/layers/dense_attention_test.py
@@ -401,39 +401,37 @@ class AttentionTest(test.TestCase, parameterized.TestCase):
   def test_inputs_not_list(self):
     attention_layer = dense_attention.Attention()
     q = np.array([[[1.1]]], dtype=np.float32)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Attention layer must be called on a list of inputs'):
       attention_layer(q)
 
   def test_inputs_too_short(self):
     attention_layer = dense_attention.Attention()
     q = np.array([[[1.1]]], dtype=np.float32)
-    with self.assertRaisesRegexp(
-        ValueError,
-        'Attention layer accepts inputs list of length 2 or 3'):
+    with self.assertRaisesRegex(
+        ValueError, 'Attention layer accepts inputs list of length 2 or 3'):
       attention_layer([q])
 
   def test_inputs_too_long(self):
     attention_layer = dense_attention.Attention()
     q = np.array([[[1.1]]], dtype=np.float32)
-    with self.assertRaisesRegexp(
-        ValueError,
-        'Attention layer accepts inputs list of length 2 or 3'):
+    with self.assertRaisesRegex(
+        ValueError, 'Attention layer accepts inputs list of length 2 or 3'):
       attention_layer([q, q, q, q])
 
   def test_mask_not_list(self):
     attention_layer = dense_attention.Attention()
     q = np.array([[[1.1]]], dtype=np.float32)
     mask = np.array([[True]], dtype=np.bool_)
-    with self.assertRaisesRegexp(
-        ValueError, 'Attention layer mask must be a list'):
+    with self.assertRaisesRegex(ValueError,
+                                'Attention layer mask must be a list'):
       attention_layer([q, q], mask=mask)
 
   def test_mask_too_short(self):
     attention_layer = dense_attention.Attention()
     q = np.array([[[1.1]]], dtype=np.float32)
     mask = np.array([[True]], dtype=np.bool_)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Attention layer mask must be a list of length 2'):
       attention_layer([q, q], mask=[mask])
 
@@ -441,7 +439,7 @@ class AttentionTest(test.TestCase, parameterized.TestCase):
     attention_layer = dense_attention.Attention()
     q = np.array([[[1.1]]], dtype=np.float32)
     mask = np.array([[True]], dtype=np.bool_)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Attention layer mask must be a list of length 2'):
       attention_layer([q, q], mask=[mask, mask, mask])
 
diff --git a/tensorflow/python/keras/layers/einsum_dense_test.py b/tensorflow/python/keras/layers/einsum_dense_test.py
index e9ae7271130..f7ab34aed3b 100644
--- a/tensorflow/python/keras/layers/einsum_dense_test.py
+++ b/tensorflow/python/keras/layers/einsum_dense_test.py
@@ -282,7 +282,7 @@ class TestEinsumLayerAPI(keras_parameterized.TestCase):
     input_tensor = keras.Input(shape=(32,))
     layer = einsum_dense.EinsumDense(
         equation="ab,bc->ac", output_shape=64, bias_axes="y")
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, ".*is not a part of the output specification.*"):
       _ = layer(input_tensor)
 
@@ -290,7 +290,7 @@ class TestEinsumLayerAPI(keras_parameterized.TestCase):
     input_tensor = keras.Input(shape=(32, 64))
     layer = einsum_dense.EinsumDense(
         equation="abc,cd->abd", output_shape=(10, 96))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, ".*Input shape and output shape do not match at shared "
         "dimension 'b'.*"):
       _ = layer(input_tensor)
@@ -298,7 +298,7 @@ class TestEinsumLayerAPI(keras_parameterized.TestCase):
   def test_unspecified_output_dim_fails(self):
     input_tensor = keras.Input(shape=(32,))
     layer = einsum_dense.EinsumDense(equation="ab,bc->cd", output_shape=64)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, ".*Dimension 'd' was specified in the output 'cd' but has "
         "no corresponding dim.*"):
       _ = layer(input_tensor)
@@ -306,8 +306,8 @@ class TestEinsumLayerAPI(keras_parameterized.TestCase):
   def test_unspecified_weight_dim_fails(self):
     input_tensor = keras.Input(shape=(32,))
     layer = einsum_dense.EinsumDense(equation="ab,zd->ad", output_shape=64)
-    with self.assertRaisesRegexp(
-        ValueError, ".*Weight dimension 'z' did not have a match "):
+    with self.assertRaisesRegex(ValueError,
+                                ".*Weight dimension 'z' did not have a match "):
       _ = layer(input_tensor)
 
 
diff --git a/tensorflow/python/keras/layers/kernelized_test.py b/tensorflow/python/keras/layers/kernelized_test.py
index a6a9d88423f..3c836f1ccde 100644
--- a/tensorflow/python/keras/layers/kernelized_test.py
+++ b/tensorflow/python/keras/layers/kernelized_test.py
@@ -87,18 +87,18 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
     self.assertAllClose(output_data, new_output_data, atol=1e-4)
 
   def test_invalid_output_dim(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'`output_dim` should be a positive integer. Given: -3.'):
       _ = kernel_layers.RandomFourierFeatures(output_dim=-3, scale=2.0)
 
   def test_unsupported_kernel_type(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'Unsupported kernel type: \'unsupported_kernel\'.'):
       _ = kernel_layers.RandomFourierFeatures(
           3, 'unsupported_kernel', stddev=2.0)
 
   def test_invalid_scale(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'When provided, `scale` should be a positive float. Given: 0.0.'):
       _ = kernel_layers.RandomFourierFeatures(output_dim=10, scale=0.0)
@@ -106,7 +106,7 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
   def test_invalid_input_shape(self):
     inputs = random_ops.random_uniform((3, 2, 4), seed=1)
     rff_layer = kernel_layers.RandomFourierFeatures(output_dim=10, scale=3.0)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'The rank of the input tensor should be 2. Got 3 instead.'):
       _ = rff_layer(inputs)
@@ -166,7 +166,7 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
           output_dim=5,
           kernel_initializer=initializer,
           name='random_fourier_features')
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, r'The last dimension of the inputs to '
           '`RandomFourierFeatures` should be defined. Found `None`.'):
         rff_layer(inputs)
@@ -176,7 +176,7 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
           output_dim=5,
           kernel_initializer=initializer,
           name='random_fourier_features')
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, r'The last dimension of the inputs to '
           '`RandomFourierFeatures` should be defined. Found `None`.'):
         rff_layer(inputs)
@@ -201,7 +201,7 @@ class RandomFourierFeaturesTest(test.TestCase, parameterized.TestCase):
     with self.assertRaises(ValueError):
       rff_layer.compute_output_shape(tensor_shape.TensorShape([3, 2, 3]))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'The innermost dimension of input shape must be defined.'):
       rff_layer.compute_output_shape(tensor_shape.TensorShape([3, None]))
 
diff --git a/tensorflow/python/keras/layers/merge_test.py b/tensorflow/python/keras/layers/merge_test.py
index af8f2ea550c..16d3701b7e3 100644
--- a/tensorflow/python/keras/layers/merge_test.py
+++ b/tensorflow/python/keras/layers/merge_test.py
@@ -61,11 +61,11 @@ class MergeLayersTest(keras_parameterized.TestCase):
                 add_layer.compute_mask(
                     [i1, i2], [K.variable(x1), K.variable(x2)]))))
 
-    with self.assertRaisesRegexp(ValueError, '`mask` should be a list.'):
+    with self.assertRaisesRegex(ValueError, '`mask` should be a list.'):
       add_layer.compute_mask([i1, i2, i3], x1)
-    with self.assertRaisesRegexp(ValueError, '`inputs` should be a list.'):
+    with self.assertRaisesRegex(ValueError, '`inputs` should be a list.'):
       add_layer.compute_mask(i1, [None, None, None])
-    with self.assertRaisesRegexp(ValueError, ' should have the same length.'):
+    with self.assertRaisesRegex(ValueError, ' should have the same length.'):
       add_layer.compute_mask([i1, i2, i3], [None, None])
 
   def test_merge_subtract(self):
@@ -92,15 +92,15 @@ class MergeLayersTest(keras_parameterized.TestCase):
                 subtract_layer.compute_mask(
                     [i1, i2], [K.variable(x1), K.variable(x2)]))))
 
-    with self.assertRaisesRegexp(ValueError, '`mask` should be a list.'):
+    with self.assertRaisesRegex(ValueError, '`mask` should be a list.'):
       subtract_layer.compute_mask([i1, i2], x1)
-    with self.assertRaisesRegexp(ValueError, '`inputs` should be a list.'):
+    with self.assertRaisesRegex(ValueError, '`inputs` should be a list.'):
       subtract_layer.compute_mask(i1, [None, None])
-    with self.assertRaisesRegexp(ValueError,
-                                 'layer should be called on exactly 2 inputs'):
+    with self.assertRaisesRegex(ValueError,
+                                'layer should be called on exactly 2 inputs'):
       subtract_layer([i1, i2, i3])
-    with self.assertRaisesRegexp(ValueError,
-                                 'layer should be called on exactly 2 inputs'):
+    with self.assertRaisesRegex(ValueError,
+                                'layer should be called on exactly 2 inputs'):
       subtract_layer([i1])
 
   def test_merge_multiply(self):
@@ -183,14 +183,14 @@ class MergeLayersTest(keras_parameterized.TestCase):
                 concat_layer.compute_mask(
                     [i1, i2], [K.variable(x1), K.variable(x2)]))))
 
-    with self.assertRaisesRegexp(ValueError, '`mask` should be a list.'):
+    with self.assertRaisesRegex(ValueError, '`mask` should be a list.'):
       concat_layer.compute_mask([i1, i2], x1)
-    with self.assertRaisesRegexp(ValueError, '`inputs` should be a list.'):
+    with self.assertRaisesRegex(ValueError, '`inputs` should be a list.'):
       concat_layer.compute_mask(i1, [None, None])
-    with self.assertRaisesRegexp(ValueError, 'should have the same length'):
+    with self.assertRaisesRegex(ValueError, 'should have the same length'):
       concat_layer.compute_mask([i1, i2], [None])
-    with self.assertRaisesRegexp(ValueError,
-                                 'layer should be called on a list of inputs'):
+    with self.assertRaisesRegex(ValueError,
+                                'layer should be called on a list of inputs'):
       concat_layer(i1)
 
   def test_merge_dot(self):
@@ -280,11 +280,11 @@ class MergeLayersTestNoExecution(test.TestCase):
   def test_concatenate_errors(self):
     i1 = keras.layers.Input(shape=(4, 5))
     i2 = keras.layers.Input(shape=(3, 5))
-    with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'):
+    with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
       keras.layers.concatenate([i1, i2], axis=-1)
-    with self.assertRaisesRegexp(ValueError, 'called on a list'):
+    with self.assertRaisesRegex(ValueError, 'called on a list'):
       keras.layers.concatenate(i1, axis=-1)
-    with self.assertRaisesRegexp(ValueError, 'called on a list'):
+    with self.assertRaisesRegex(ValueError, 'called on a list'):
       keras.layers.concatenate([i1], axis=-1)
 
   def test_concatenate_with_partial_shape(self):
@@ -298,7 +298,7 @@ class MergeLayersTestNoExecution(test.TestCase):
     keras.layers.concatenate([i1, i2], axis=-1)
 
     # Different rank
-    with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'):
+    with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
       keras.layers.concatenate([i1, i3], axis=-1)
 
     # Valid case with partial dimension information
@@ -309,10 +309,10 @@ class MergeLayersTestNoExecution(test.TestCase):
     keras.layers.concatenate([i1, i5], axis=1)
 
     # Mismatch in batch dimension.
-    with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'):
+    with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
       keras.layers.concatenate([i1, i4], axis=-1)
 
-    with self.assertRaisesRegexp(ValueError, 'inputs with matching shapes'):
+    with self.assertRaisesRegex(ValueError, 'inputs with matching shapes'):
       keras.layers.concatenate([i1, i2, i4], axis=-1)
 
   def test_dot_errors(self):
diff --git a/tensorflow/python/keras/layers/normalization_test.py b/tensorflow/python/keras/layers/normalization_test.py
index 39992f7580a..e60f34720a2 100644
--- a/tensorflow/python/keras/layers/normalization_test.py
+++ b/tensorflow/python/keras/layers/normalization_test.py
@@ -302,26 +302,26 @@ class BatchNormalizationV2Test(keras_parameterized.TestCase):
     norm(inp)
     self.assertEqual(norm.fused, True)
 
-    with self.assertRaisesRegexp(ValueError, 'fused.*renorm'):
+    with self.assertRaisesRegex(ValueError, 'fused.*renorm'):
       normalization_v2.BatchNormalization(fused=True, renorm=True)
 
-    with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
+    with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
       normalization_v2.BatchNormalization(fused=True, axis=2)
 
-    with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'):
+    with self.assertRaisesRegex(ValueError, 'fused.*when axis is 1 or 3'):
       normalization_v2.BatchNormalization(fused=True, axis=[1, 3])
 
-    with self.assertRaisesRegexp(ValueError, 'fused.*virtual_batch_size'):
+    with self.assertRaisesRegex(ValueError, 'fused.*virtual_batch_size'):
       normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2)
 
-    with self.assertRaisesRegexp(ValueError, 'fused.*adjustment'):
+    with self.assertRaisesRegex(ValueError, 'fused.*adjustment'):
       normalization_v2.BatchNormalization(fused=True,
                                           adjustment=lambda _: (1, 0))
 
     norm = normalization_v2.BatchNormalization(fused=True)
     self.assertEqual(norm.fused, True)
     inp = keras.layers.Input(shape=(4, 4))
-    with self.assertRaisesRegexp(ValueError, '4D input tensors'):
+    with self.assertRaisesRegex(ValueError, '4D input tensors'):
       norm(inp)
 
   def test_updates_in_wrap_function(self):
@@ -593,19 +593,19 @@ class LayerNormalizationTest(keras_parameterized.TestCase):
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def testIncorrectAxisType(self):
-    with self.assertRaisesRegexp(
-        TypeError, r'Expected an int or a list/tuple of ints'):
+    with self.assertRaisesRegex(TypeError,
+                                r'Expected an int or a list/tuple of ints'):
       _ = normalization.LayerNormalization(axis={'axis': -1})
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def testInvalidAxis(self):
-    with self.assertRaisesRegexp(ValueError, r'Invalid axis: 3'):
+    with self.assertRaisesRegex(ValueError, r'Invalid axis: 3'):
       layer_norm = normalization.LayerNormalization(axis=3)
       layer_norm.build(input_shape=(2, 2, 2))
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def testDuplicateAxis(self):
-    with self.assertRaisesRegexp(ValueError, r'Duplicate axis:'):
+    with self.assertRaisesRegex(ValueError, r'Duplicate axis:'):
       layer_norm = normalization.LayerNormalization(axis=[-1, -1])
       layer_norm.build(input_shape=(2, 2, 2))
 
diff --git a/tensorflow/python/keras/layers/preprocessing/hashing_test.py b/tensorflow/python/keras/layers/preprocessing/hashing_test.py
index 9bc5e75819f..2e5e5f7005c 100644
--- a/tensorflow/python/keras/layers/preprocessing/hashing_test.py
+++ b/tensorflow/python/keras/layers/preprocessing/hashing_test.py
@@ -225,7 +225,7 @@ class HashingTest(keras_parameterized.TestCase):
     inp_data_2 = ragged_factory_ops.constant(
         [['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
         dtype=dtypes.string)
-    with self.assertRaisesRegexp(ValueError, 'not supported yet'):
+    with self.assertRaisesRegex(ValueError, 'not supported yet'):
       _ = layer([inp_data_1, inp_data_2])
 
   def test_hash_ragged_int_input_farmhash(self):
@@ -274,7 +274,7 @@ class HashingTest(keras_parameterized.TestCase):
     inp_data_2 = ragged_factory_ops.constant(
         [['omar', 'stringer', 'marlo', 'wire'], ['marlo', 'skywalker', 'wire']],
         dtype=dtypes.string)
-    with self.assertRaisesRegexp(ValueError, 'not supported yet'):
+    with self.assertRaisesRegex(ValueError, 'not supported yet'):
       _ = layer([inp_data_1, inp_data_2])
 
   def test_hash_ragged_int_input_siphash(self):
@@ -292,15 +292,15 @@ class HashingTest(keras_parameterized.TestCase):
     self.assertAllClose(out_data, model.predict(inp_data))
 
   def test_invalid_inputs(self):
-    with self.assertRaisesRegexp(ValueError, 'cannot be `None`'):
+    with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
       _ = hashing.Hashing(num_bins=None)
-    with self.assertRaisesRegexp(ValueError, 'cannot be `None`'):
+    with self.assertRaisesRegex(ValueError, 'cannot be `None`'):
       _ = hashing.Hashing(num_bins=-1)
-    with self.assertRaisesRegexp(ValueError, 'can only be a tuple of size 2'):
+    with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
       _ = hashing.Hashing(num_bins=2, salt='string')
-    with self.assertRaisesRegexp(ValueError, 'can only be a tuple of size 2'):
+    with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
       _ = hashing.Hashing(num_bins=2, salt=[1])
-    with self.assertRaisesRegexp(ValueError, 'can only be a tuple of size 2'):
+    with self.assertRaisesRegex(ValueError, 'can only be a tuple of size 2'):
       _ = hashing.Hashing(num_bins=1, salt=constant_op.constant([133, 137]))
 
   def test_hash_compute_output_signature(self):
diff --git a/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py b/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py
index f5210589b82..a3540fca6df 100644
--- a/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py
+++ b/tensorflow/python/keras/layers/preprocessing/image_preprocessing_test.py
@@ -188,8 +188,8 @@ class CenterCropTest(keras_parameterized.TestCase):
       ('center_crop_10_by_8', 10, 8),
       ('center_crop_10_by_12', 10, 12))
   def test_invalid_center_crop(self, expected_height, expected_width):
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'assertion failed'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'assertion failed'):
       self._run_test(expected_height, expected_width)
 
   def test_config_with_custom_name(self):
diff --git a/tensorflow/python/keras/layers/preprocessing/table_utils_test.py b/tensorflow/python/keras/layers/preprocessing/table_utils_test.py
index ab7e80b628c..bbb4d2a97a6 100644
--- a/tensorflow/python/keras/layers/preprocessing/table_utils_test.py
+++ b/tensorflow/python/keras/layers/preprocessing/table_utils_test.py
@@ -114,7 +114,7 @@ class CategoricalEncodingInputTest(
 
     table = get_table(dtype=dtypes.int64, oov_tokens=[1, 2])
 
-    with self.assertRaisesRegexp(ValueError, "must be 1-dimensional"):
+    with self.assertRaisesRegex(ValueError, "must be 1-dimensional"):
       table.insert(key_data, value_data)
 
 
diff --git a/tensorflow/python/keras/layers/recurrent_test.py b/tensorflow/python/keras/layers/recurrent_test.py
index b6afe2a0e03..c2c3d135f68 100644
--- a/tensorflow/python/keras/layers/recurrent_test.py
+++ b/tensorflow/python/keras/layers/recurrent_test.py
@@ -1021,8 +1021,8 @@ class RNNTest(keras_parameterized.TestCase):
 
   def test_get_initial_state(self):
     cell = keras.layers.SimpleRNNCell(5)
-    with self.assertRaisesRegexp(ValueError,
-                                 'batch_size and dtype cannot be None'):
+    with self.assertRaisesRegex(ValueError,
+                                'batch_size and dtype cannot be None'):
       cell.get_initial_state(None, None, None)
 
     if not context.executing_eagerly():
@@ -1359,7 +1359,7 @@ class RNNTest(keras_parameterized.TestCase):
     cell = keras.layers.SimpleRNNCell(5)
     x = keras.Input((None, 5))
     layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
-    with self.assertRaisesRegexp(ValueError, 'Cannot unroll a RNN.*'):
+    with self.assertRaisesRegex(ValueError, 'Cannot unroll a RNN.*'):
       layer(x)
 
   def test_full_input_spec(self):
@@ -1385,11 +1385,11 @@ class RNNTest(keras_parameterized.TestCase):
 
   def test_reset_states(self):
     # See https://github.com/tensorflow/tensorflow/issues/25852
-    with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'):
+    with self.assertRaisesRegex(ValueError, 'it needs to know its batch size'):
       simple_rnn = keras.layers.SimpleRNN(1, stateful=True)
       simple_rnn.reset_states()
 
-    with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'):
+    with self.assertRaisesRegex(ValueError, 'it needs to know its batch size'):
       cell = Minimal2DRNNCell(1, 2)
       custom_rnn = keras.layers.RNN(cell, stateful=True)
       custom_rnn.reset_states()
@@ -1608,8 +1608,8 @@ class RNNTest(keras_parameterized.TestCase):
 
     # Must raise error when unroll is set to True
     unroll_rnn_layer = layer(3, unroll=True)
-    with self.assertRaisesRegexp(ValueError,
-                                 'The input received contains RaggedTensors *'):
+    with self.assertRaisesRegex(ValueError,
+                                'The input received contains RaggedTensors *'):
       unroll_rnn_layer(inputs)
 
     # Check if return sequences outputs are correct
diff --git a/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py b/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py
index 8491ed0098e..b0fd5189b17 100644
--- a/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py
+++ b/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py
@@ -222,11 +222,11 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
     wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper
     cell = layers.LSTMCell(10)
 
-    with self.assertRaisesRegexp(ValueError, "does not work with "):
+    with self.assertRaisesRegex(ValueError, "does not work with "):
       wrapper_cls(cell)
 
     cell = layers.LSTMCellV2(10)
-    with self.assertRaisesRegexp(ValueError, "does not work with "):
+    with self.assertRaisesRegex(ValueError, "does not work with "):
       wrapper_cls(cell)
 
 
diff --git a/tensorflow/python/keras/layers/wrappers_test.py b/tensorflow/python/keras/layers/wrappers_test.py
index 5ee794dd1ef..75d951a4a7a 100644
--- a/tensorflow/python/keras/layers/wrappers_test.py
+++ b/tensorflow/python/keras/layers/wrappers_test.py
@@ -149,9 +149,8 @@ class TimeDistributedTest(keras_parameterized.TestCase):
 
   def test_timedistributed_invalid_init(self):
     x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
-    with self.assertRaisesRegexp(
-        ValueError,
-        'Please initialize `TimeDistributed` layer with a '
+    with self.assertRaisesRegex(
+        ValueError, 'Please initialize `TimeDistributed` layer with a '
         '`tf.keras.layers.Layer` instance.'):
       keras.layers.TimeDistributed(x)
 
@@ -306,13 +305,13 @@ class TimeDistributedTest(keras_parameterized.TestCase):
     self.assertEqual(out_2.shape.as_list(), [None, 1, 5])
 
     ph_3 = keras.backend.placeholder(shape=(None, 1, 18))
-    with self.assertRaisesRegexp(ValueError, 'is incompatible with layer'):
+    with self.assertRaisesRegex(ValueError, 'is incompatible with layer'):
       time_dist(ph_3)
 
   def test_TimeDistributed_with_invalid_dimensions(self):
     time_dist = keras.layers.TimeDistributed(keras.layers.Dense(5))
     ph = keras.backend.placeholder(shape=(None, 10))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         '`TimeDistributed` Layer should be passed an `input_shape `'):
       time_dist(ph)
@@ -511,7 +510,7 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
 
   def test_bidirectional_invalid_init(self):
     x = constant_op.constant(np.zeros((1, 1)).astype('float32'))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         'Please initialize `Bidirectional` layer with a `Layer` instance.'):
       keras.layers.Bidirectional(x)
@@ -1053,15 +1052,15 @@ class BidirectionalTest(test.TestCase, parameterized.TestCase):
     forward_layer = rnn(units)
     backward_layer = rnn(units)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'should have different `go_backwards` value.'):
+    with self.assertRaisesRegex(ValueError,
+                                'should have different `go_backwards` value.'):
       keras.layers.Bidirectional(
           forward_layer, merge_mode='concat', backward_layer=backward_layer)
 
     for attr in ('stateful', 'return_sequences', 'return_state'):
       kwargs = {attr: True}
       backward_layer = rnn(units, go_backwards=True, **kwargs)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'expected to have the same value for attribute ' + attr):
         keras.layers.Bidirectional(
             forward_layer, merge_mode='concat', backward_layer=backward_layer)
diff --git a/tensorflow/python/keras/legacy_tf_layers/base_test.py b/tensorflow/python/keras/legacy_tf_layers/base_test.py
index 0a61d77ba76..85185a8c22c 100644
--- a/tensorflow/python/keras/legacy_tf_layers/base_test.py
+++ b/tensorflow/python/keras/legacy_tf_layers/base_test.py
@@ -151,7 +151,7 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
   def testInvalidTrainableSynchronizationCombination(self):
     layer = base_layers.Layer(name='my_layer')
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Synchronization value can be set to '
         'VariableSynchronization.ON_READ only for non-trainable variables. '
         'You have specified trainable=True and '
@@ -278,11 +278,11 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
 
     if not context.executing_eagerly():
       layer = CustomerLayer()
-      with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
+      with self.assertRaisesRegex(ValueError, r'requires a defined rank'):
         layer.apply(array_ops.placeholder('int32'))
 
     layer = CustomerLayer()
-    with self.assertRaisesRegexp(ValueError, r'expected ndim=2'):
+    with self.assertRaisesRegex(ValueError, r'expected ndim=2'):
       layer.apply(constant_op.constant([1]))
 
     # Note that we re-create the layer since in Eager mode, input spec checks
@@ -305,11 +305,11 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
 
     if not context.executing_eagerly():
       layer = CustomerLayer()
-      with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
+      with self.assertRaisesRegex(ValueError, r'requires a defined rank'):
         layer.apply(array_ops.placeholder('int32'))
 
     layer = CustomerLayer()
-    with self.assertRaisesRegexp(ValueError, r'expected min_ndim=2'):
+    with self.assertRaisesRegex(ValueError, r'expected min_ndim=2'):
       layer.apply(constant_op.constant([1]))
 
     # Works
@@ -333,11 +333,11 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
 
     if not context.executing_eagerly():
       layer = CustomerLayer()
-      with self.assertRaisesRegexp(ValueError, r'requires a defined rank'):
+      with self.assertRaisesRegex(ValueError, r'requires a defined rank'):
         layer.apply(array_ops.placeholder('int32'))
 
     layer = CustomerLayer()
-    with self.assertRaisesRegexp(ValueError, r'expected max_ndim=2'):
+    with self.assertRaisesRegex(ValueError, r'expected max_ndim=2'):
       layer.apply(constant_op.constant([[[1], [2]]]))
 
     # Works
@@ -360,7 +360,7 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
         return inputs
 
     layer = CustomerLayer()
-    with self.assertRaisesRegexp(ValueError, r'expected dtype=float32'):
+    with self.assertRaisesRegex(ValueError, r'expected dtype=float32'):
       layer.apply(constant_op.constant(1, dtype=dtypes.int32))
 
     # Works
@@ -380,7 +380,7 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
         return inputs
 
     layer = CustomerLayer()
-    with self.assertRaisesRegexp(ValueError, r'expected axis'):
+    with self.assertRaisesRegex(ValueError, r'expected axis'):
       layer.apply(constant_op.constant([1, 2, 3]))
 
     # Works
@@ -402,7 +402,7 @@ class BaseLayerTest(test.TestCase, parameterized.TestCase):
         return inputs
 
     layer = CustomerLayer()
-    with self.assertRaisesRegexp(ValueError, r'expected shape'):
+    with self.assertRaisesRegex(ValueError, r'expected shape'):
       layer.apply(constant_op.constant([[1, 2]]))
 
     # Works
diff --git a/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py b/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py
index b0eeede8737..a6a4bc7a088 100644
--- a/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py
+++ b/tensorflow/python/keras/legacy_tf_layers/convolutional_test.py
@@ -39,25 +39,25 @@ class ConvTest(test.TestCase):
   def testInvalidDataFormat(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'data_format'):
+    with self.assertRaisesRegex(ValueError, 'data_format'):
       conv_layers.conv2d(images, 32, 3, data_format='invalid')
 
   def testInvalidStrides(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.conv2d(images, 32, 3, strides=(1, 2, 3))
 
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.conv2d(images, 32, 3, strides=None)
 
   def testInvalidKernelSize(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.conv2d(images, 32, (1, 2, 3))
 
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.conv2d(images, 32, None)
 
   @test_util.run_deprecated_v1
@@ -104,16 +104,16 @@ class ConvTest(test.TestCase):
   def testUnknownInputChannels(self):
     images = array_ops.placeholder(dtypes.float32, (5, 7, 9, None))
     layer = conv_layers.Conv2D(32, [3, 3], activation=nn_ops.relu)
-    with self.assertRaisesRegexp(ValueError,
-                                 'The channel dimension of the inputs '
-                                 'should be defined. Found `None`.'):
+    with self.assertRaisesRegex(
+        ValueError, 'The channel dimension of the inputs '
+        'should be defined. Found `None`.'):
       _ = layer.apply(images)
 
     images = array_ops.placeholder(dtypes.float32, (5, None, 7, 9))
     layer = conv_layers.Conv2D(32, [3, 3], data_format='channels_first')
-    with self.assertRaisesRegexp(ValueError,
-                                 'The channel dimension of the inputs '
-                                 'should be defined. Found `None`.'):
+    with self.assertRaisesRegex(
+        ValueError, 'The channel dimension of the inputs '
+        'should be defined. Found `None`.'):
       _ = layer.apply(images)
 
   def testConv2DPaddingSame(self):
@@ -175,16 +175,16 @@ class ConvTest(test.TestCase):
   def testUnknownInputChannelsConv1D(self):
     data = array_ops.placeholder(dtypes.float32, (5, 4, None))
     layer = conv_layers.Conv1D(32, 3, activation=nn_ops.relu)
-    with self.assertRaisesRegexp(ValueError,
-                                 'The channel dimension of the inputs '
-                                 'should be defined. Found `None`.'):
+    with self.assertRaisesRegex(
+        ValueError, 'The channel dimension of the inputs '
+        'should be defined. Found `None`.'):
       _ = layer.apply(data)
 
     data = array_ops.placeholder(dtypes.float32, (5, None, 4))
     layer = conv_layers.Conv1D(32, 3, data_format='channels_first')
-    with self.assertRaisesRegexp(ValueError,
-                                 'The channel dimension of the inputs '
-                                 'should be defined. Found `None`.'):
+    with self.assertRaisesRegex(
+        ValueError, 'The channel dimension of the inputs '
+        'should be defined. Found `None`.'):
       _ = layer.apply(data)
 
   @test_util.run_deprecated_v1
@@ -203,9 +203,9 @@ class ConvTest(test.TestCase):
   def testUnknownInputChannelsConv3D(self):
     volumes = array_ops.placeholder(dtypes.float32, (5, 6, 7, 9, None))
     layer = conv_layers.Conv3D(32, [3, 3, 3], activation=nn_ops.relu)
-    with self.assertRaisesRegexp(ValueError,
-                                 'The channel dimension of the inputs '
-                                 'should be defined. Found `None`.'):
+    with self.assertRaisesRegex(
+        ValueError, 'The channel dimension of the inputs '
+        'should be defined. Found `None`.'):
       _ = layer.apply(volumes)
 
   @test_util.run_deprecated_v1
@@ -354,25 +354,25 @@ class SeparableConv1DTest(test.TestCase):
   def testInvalidDataFormat(self):
     length = 9
     data = random_ops.random_uniform((5, length, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'data_format'):
+    with self.assertRaisesRegex(ValueError, 'data_format'):
       conv_layers.separable_conv1d(data, 32, 3, data_format='invalid')
 
   def testInvalidStrides(self):
     length = 9
     data = random_ops.random_uniform((5, length, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.separable_conv1d(data, 32, 3, strides=(1, 2))
 
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.separable_conv1d(data, 32, 3, strides=None)
 
   def testInvalidKernelSize(self):
     length = 9
     data = random_ops.random_uniform((5, length, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.separable_conv1d(data, 32, (1, 2))
 
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.separable_conv1d(data, 32, None)
 
   @test_util.run_deprecated_v1
@@ -528,25 +528,25 @@ class SeparableConv2DTest(test.TestCase):
   def testInvalidDataFormat(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'data_format'):
+    with self.assertRaisesRegex(ValueError, 'data_format'):
       conv_layers.separable_conv2d(images, 32, 3, data_format='invalid')
 
   def testInvalidStrides(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.separable_conv2d(images, 32, 3, strides=(1, 2, 3))
 
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.separable_conv2d(images, 32, 3, strides=None)
 
   def testInvalidKernelSize(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.separable_conv2d(images, 32, (1, 2, 3))
 
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.separable_conv2d(images, 32, None)
 
   @test_util.run_deprecated_v1
@@ -786,25 +786,25 @@ class Conv2DTransposeTest(test.TestCase):
   def testInvalidDataFormat(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'data_format'):
+    with self.assertRaisesRegex(ValueError, 'data_format'):
       conv_layers.conv2d_transpose(images, 32, 3, data_format='invalid')
 
   def testInvalidStrides(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.conv2d_transpose(images, 32, 3, strides=(1, 2, 3))
 
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.conv2d_transpose(images, 32, 3, strides=None)
 
   def testInvalidKernelSize(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.conv2d_transpose(images, 32, (1, 2, 3))
 
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.conv2d_transpose(images, 32, None)
 
   @test_util.run_deprecated_v1
@@ -981,25 +981,25 @@ class Conv3DTransposeTest(test.TestCase):
   def testInvalidDataFormat(self):
     depth, height, width = 5, 7, 9
     volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'data_format'):
+    with self.assertRaisesRegex(ValueError, 'data_format'):
       conv_layers.conv3d_transpose(volumes, 4, 3, data_format='invalid')
 
   def testInvalidStrides(self):
     depth, height, width = 5, 7, 9
     volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.conv3d_transpose(volumes, 4, 3, strides=(1, 2))
 
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       conv_layers.conv3d_transpose(volumes, 4, 3, strides=None)
 
   def testInvalidKernelSize(self):
     depth, height, width = 5, 7, 9
     volumes = random_ops.random_uniform((5, depth, height, width, 32), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.conv3d_transpose(volumes, 4, (1, 2))
 
-    with self.assertRaisesRegexp(ValueError, 'kernel_size'):
+    with self.assertRaisesRegex(ValueError, 'kernel_size'):
       conv_layers.conv3d_transpose(volumes, 4, None)
 
   @test_util.run_deprecated_v1
diff --git a/tensorflow/python/keras/legacy_tf_layers/pooling_test.py b/tensorflow/python/keras/legacy_tf_layers/pooling_test.py
index 0fd63ed335f..597b4a762c0 100644
--- a/tensorflow/python/keras/legacy_tf_layers/pooling_test.py
+++ b/tensorflow/python/keras/legacy_tf_layers/pooling_test.py
@@ -30,25 +30,25 @@ class PoolingTest(test.TestCase):
   def testInvalidDataFormat(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'data_format'):
+    with self.assertRaisesRegex(ValueError, 'data_format'):
       pooling_layers.max_pooling2d(images, 3, strides=2, data_format='invalid')
 
   def testInvalidStrides(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       pooling_layers.max_pooling2d(images, 3, strides=(1, 2, 3))
 
-    with self.assertRaisesRegexp(ValueError, 'strides'):
+    with self.assertRaisesRegex(ValueError, 'strides'):
       pooling_layers.max_pooling2d(images, 3, strides=None)
 
   def testInvalidPoolSize(self):
     height, width = 7, 9
     images = random_ops.random_uniform((5, height, width, 3), seed=1)
-    with self.assertRaisesRegexp(ValueError, 'pool_size'):
+    with self.assertRaisesRegex(ValueError, 'pool_size'):
       pooling_layers.max_pooling2d(images, (1, 2, 3), strides=2)
 
-    with self.assertRaisesRegexp(ValueError, 'pool_size'):
+    with self.assertRaisesRegex(ValueError, 'pool_size'):
       pooling_layers.max_pooling2d(images, None, strides=2)
 
   def testCreateMaxPooling2D(self):
diff --git a/tensorflow/python/keras/losses_test.py b/tensorflow/python/keras/losses_test.py
index 26a586b872b..34213c8308a 100644
--- a/tensorflow/python/keras/losses_test.py
+++ b/tensorflow/python/keras/losses_test.py
@@ -227,13 +227,13 @@ class KerasLossesTest(test.TestCase, parameterized.TestCase):
     self.assertAllClose(self.evaluate(loss), 16, 1e-2)
 
   def test_invalid_reduction(self):
-    with self.assertRaisesRegexp(ValueError, 'Invalid Reduction Key Foo.'):
+    with self.assertRaisesRegex(ValueError, 'Invalid Reduction Key Foo.'):
       losses.MeanSquaredError(reduction='Foo')
 
     mse_obj = losses.MeanSquaredError()
     y = constant_op.constant([1])
     mse_obj.reduction = 'Bar'
-    with self.assertRaisesRegexp(ValueError, 'Invalid Reduction Key Bar.'):
+    with self.assertRaisesRegex(ValueError, 'Invalid Reduction Key Bar.'):
       mse_obj(y, y)
 
   def test_deserialization_error(self):
@@ -308,9 +308,9 @@ class MeanSquaredErrorTest(test.TestCase):
     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
     sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
-    with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
-                                 (r'Incompatible shapes: \[2,3\] vs. \[2,2\]|'
-                                  'Dimensions must be equal')):
+    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
+                                (r'Incompatible shapes: \[2,3\] vs. \[2,2\]|'
+                                 'Dimensions must be equal')):
       mse_obj(y_true, y_pred, sample_weight=sample_weight)
 
   def test_no_reduction(self):
@@ -400,9 +400,9 @@ class MeanAbsoluteErrorTest(test.TestCase):
     y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
     y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
     sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
-    with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
-                                 (r'Incompatible shapes: \[2,3\] vs. \[2,2\]|'
-                                  'Dimensions must be equal')):
+    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
+                                (r'Incompatible shapes: \[2,3\] vs. \[2,2\]|'
+                                 'Dimensions must be equal')):
       mae_obj(y_true, y_pred, sample_weight=sample_weight)
 
   def test_no_reduction(self):
@@ -912,7 +912,7 @@ class CategoricalCrossentropyTest(test.TestCase):
                                    [.05, .01, .94]])
 
     cce_obj = losses.CategoricalCrossentropy()
-    with self.assertRaisesRegexp(ValueError, 'Shapes .+ are incompatible'):
+    with self.assertRaisesRegex(ValueError, 'Shapes .+ are incompatible'):
       cce_obj(y_true, y_pred)
 
 
diff --git a/tensorflow/python/keras/metrics_confusion_matrix_test.py b/tensorflow/python/keras/metrics_confusion_matrix_test.py
index 186c3f0328f..58c84557ec9 100644
--- a/tensorflow/python/keras/metrics_confusion_matrix_test.py
+++ b/tensorflow/python/keras/metrics_confusion_matrix_test.py
@@ -106,12 +106,12 @@ class FalsePositivesTest(test.TestCase, parameterized.TestCase):
     self.assertAllClose([125., 42., 12.], self.evaluate(result))
 
   def test_threshold_limit(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'Threshold values must be in \[0, 1\]. Invalid values: \[-1, 2\]'):
       metrics.FalsePositives(thresholds=[-1, 0.5, 2])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'Threshold values must be in \[0, 1\]. Invalid values: \[None\]'):
       metrics.FalsePositives(thresholds=[None])
@@ -817,12 +817,12 @@ class SensitivityAtSpecificityTest(test.TestCase, parameterized.TestCase):
     self.assertAlmostEqual(0.675, self.evaluate(result))
 
   def test_invalid_specificity(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'`specificity` must be in the range \[0, 1\].'):
       metrics.SensitivityAtSpecificity(-1)
 
   def test_invalid_num_thresholds(self):
-    with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
+    with self.assertRaisesRegex(ValueError, '`num_thresholds` must be > 0.'):
       metrics.SensitivityAtSpecificity(0.4, num_thresholds=-1)
 
 
@@ -913,12 +913,12 @@ class SpecificityAtSensitivityTest(test.TestCase, parameterized.TestCase):
     self.assertAlmostEqual(0.4, self.evaluate(result))
 
   def test_invalid_sensitivity(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'`sensitivity` must be in the range \[0, 1\].'):
       metrics.SpecificityAtSensitivity(-1)
 
   def test_invalid_num_thresholds(self):
-    with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
+    with self.assertRaisesRegex(ValueError, '`num_thresholds` must be > 0.'):
       metrics.SpecificityAtSensitivity(0.4, num_thresholds=-1)
 
 
@@ -1012,12 +1012,12 @@ class PrecisionAtRecallTest(test.TestCase, parameterized.TestCase):
     self.assertAlmostEqual(0.7, self.evaluate(result))
 
   def test_invalid_sensitivity(self):
-    with self.assertRaisesRegexp(
-        ValueError, r'`recall` must be in the range \[0, 1\].'):
+    with self.assertRaisesRegex(ValueError,
+                                r'`recall` must be in the range \[0, 1\].'):
       metrics.PrecisionAtRecall(-1)
 
   def test_invalid_num_thresholds(self):
-    with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
+    with self.assertRaisesRegex(ValueError, '`num_thresholds` must be > 0.'):
       metrics.PrecisionAtRecall(0.4, num_thresholds=-1)
 
 
@@ -1127,12 +1127,12 @@ class RecallAtPrecisionTest(test.TestCase, parameterized.TestCase):
     self.assertAlmostEqual(0, self.evaluate(result))
 
   def test_invalid_sensitivity(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r'`precision` must be in the range \[0, 1\].'):
+    with self.assertRaisesRegex(ValueError,
+                                r'`precision` must be in the range \[0, 1\].'):
       metrics.RecallAtPrecision(-1)
 
   def test_invalid_num_thresholds(self):
-    with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 0.'):
+    with self.assertRaisesRegex(ValueError, '`num_thresholds` must be > 0.'):
       metrics.RecallAtPrecision(0.4, num_thresholds=-1)
 
 
@@ -1381,19 +1381,19 @@ class AUCTest(test.TestCase, parameterized.TestCase):
     self.assertAllClose(self.evaluate(result), expected_result, 1e-3)
 
   def test_invalid_num_thresholds(self):
-    with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
+    with self.assertRaisesRegex(ValueError, '`num_thresholds` must be > 1.'):
       metrics.AUC(num_thresholds=-1)
 
-    with self.assertRaisesRegexp(ValueError, '`num_thresholds` must be > 1.'):
+    with self.assertRaisesRegex(ValueError, '`num_thresholds` must be > 1.'):
       metrics.AUC(num_thresholds=1)
 
   def test_invalid_curve(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 'Invalid AUC curve value "Invalid".'):
+    with self.assertRaisesRegex(ValueError,
+                                'Invalid AUC curve value "Invalid".'):
       metrics.AUC(curve='Invalid')
 
   def test_invalid_summation_method(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Invalid AUC summation method value "Invalid".'):
       metrics.AUC(summation_method='Invalid')
 
diff --git a/tensorflow/python/keras/metrics_test.py b/tensorflow/python/keras/metrics_test.py
index fee65be18c9..90d87b4041e 100644
--- a/tensorflow/python/keras/metrics_test.py
+++ b/tensorflow/python/keras/metrics_test.py
@@ -1392,7 +1392,7 @@ class MeanTensorTest(test.TestCase, parameterized.TestCase):
       self.assertEqual(m.dtype, dtypes.float32)
       self.assertEmpty(m.variables)
 
-      with self.assertRaisesRegexp(ValueError, 'does not have any result yet'):
+      with self.assertRaisesRegex(ValueError, 'does not have any result yet'):
         m.result()
 
       self.evaluate(m([[3], [5], [3]]))
@@ -1469,7 +1469,7 @@ class MeanTensorTest(test.TestCase, parameterized.TestCase):
   def test_invalid_value_shape(self):
     m = metrics.MeanTensor(dtype=dtypes.float64)
     m([1])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'MeanTensor input values must always have the same shape'):
       m([1, 5])
 
diff --git a/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py b/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py
index 964118136d4..48fa93459a7 100644
--- a/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py
+++ b/tensorflow/python/keras/mixed_precision/experimental/autocast_variable_test.py
@@ -285,15 +285,15 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
         self.assertAllClose(3., self.evaluate(x.assign_sub(v1)))
 
         # Attempt to assign float16 values
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError,
             'conversion requested dtype float32 for Tensor with dtype float16'):
           self.evaluate(x.assign(v2))
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError,
             'conversion requested dtype float32 for Tensor with dtype float16'):
           self.evaluate(x.assign_add(v2))
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError,
             'conversion requested dtype float32 for Tensor with dtype float16'):
           self.evaluate(x.assign_sub(v2))
@@ -391,13 +391,13 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
   def test_invalid_wrapped_variable(self, distribution):
     with distribution.scope():
       # Wrap a non-variable
-      with self.assertRaisesRegexp(ValueError, 'variable must be of type'):
+      with self.assertRaisesRegex(ValueError, 'variable must be of type'):
         x = constant_op.constant([1.], dtype=dtypes.float32)
         autocast_variable.create_autocast_variable(x)
 
       # Wrap a non-floating point variable
-      with self.assertRaisesRegexp(ValueError,
-                                   'variable must be a floating point'):
+      with self.assertRaisesRegex(ValueError,
+                                  'variable must be a floating point'):
         x = get_var(1, dtypes.int32)
         autocast_variable.create_autocast_variable(x)
 
@@ -435,11 +435,10 @@ class AutoCastVariableTest(test.TestCase, parameterized.TestCase):
     with mirrored_strategy.MirroredStrategy(['/cpu:1', '/cpu:2']).scope():
       x = get_var(1., dtypes.float32)
       x = autocast_variable.create_autocast_variable(x)
-      self.assertRegexpMatches(
+      self.assertRegex(
           repr(x).replace('\n', ' '),
           '<AutoCastDistributedVariable dtype=float32 true_dtype=float32 '
-          'inner_variable=MirroredVariable.*>'
-      )
+          'inner_variable=MirroredVariable.*>')
 
   @parameterized.named_parameters(
       ('v1', gradient_descent_v1.GradientDescentOptimizer),
diff --git a/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check_test.py b/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check_test.py
index 33d5b9ddaa3..ccefa250d2d 100644
--- a/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check_test.py
+++ b/tensorflow/python/keras/mixed_precision/experimental/device_compatibility_check_test.py
@@ -45,10 +45,10 @@ class DeviceCompatibilityCheckTest(test.TestCase):
       device_compatibility_check._log_device_compatibility_check(
           policy_name, device_attr_list)
     if should_warn:
-      self.assertRegexpMatches(mock_warn.call_args[0][0], expected_regex)
+      self.assertRegex(mock_warn.call_args[0][0], expected_regex)
       mock_info.assert_not_called()
     else:
-      self.assertRegexpMatches(mock_info.call_args[0][0], expected_regex)
+      self.assertRegex(mock_info.call_args[0][0], expected_regex)
       mock_warn.assert_not_called()
 
   def test_supported(self):
diff --git a/tensorflow/python/keras/mixed_precision/experimental/get_layer_policy_test.py b/tensorflow/python/keras/mixed_precision/experimental/get_layer_policy_test.py
index eeba73550c2..f38bdfaf482 100644
--- a/tensorflow/python/keras/mixed_precision/experimental/get_layer_policy_test.py
+++ b/tensorflow/python/keras/mixed_precision/experimental/get_layer_policy_test.py
@@ -39,7 +39,7 @@ class GetLayerPolicyTest(test.TestCase):
     self.assertEqual(get_layer_policy.get_layer_policy(layer).name, 'float64')
 
   def test_error(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'get_policy can only be called on a layer, but got: 1'):
       get_layer_policy.get_layer_policy(1)
 
diff --git a/tensorflow/python/keras/mixed_precision/experimental/keras_test.py b/tensorflow/python/keras/mixed_precision/experimental/keras_test.py
index d2e80cfaf72..cfa2fbca080 100644
--- a/tensorflow/python/keras/mixed_precision/experimental/keras_test.py
+++ b/tensorflow/python/keras/mixed_precision/experimental/keras_test.py
@@ -227,9 +227,9 @@ class KerasLayerTest(keras_parameterized.TestCase):
         self.assertEqual(layer.v.dtype, dtypes.float64)
 
   def test_error_passing_policy_string_to_layer(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, "Cannot convert value 'mixed_float16' to a "
-                   "TensorFlow DType"):
+        'TensorFlow DType'):
       # This is not allowed, as otherwise a "mixed_float16" policy could be
       # created without an API call that has the name "experimental" in it.
       mp_test_util.MultiplyLayer(dtype='mixed_float16')
@@ -413,12 +413,12 @@ class KerasLayerTest(keras_parameterized.TestCase):
 
   def test_unsupported_strategy(self):
     strategy = create_central_storage_strategy()
-    with strategy.scope(), self.assertRaisesRegexp(
+    with strategy.scope(), self.assertRaisesRegex(
         ValueError, 'Mixed precision is not supported with the '
-                    'tf.distribute.Strategy: CentralStorageStrategy. Either '
-                    'stop using mixed precision by removing the use of the '
-                    '"mixed_float16" policy or use a different Strategy, e.g. '
-                    'a MirroredStrategy.'):
+        'tf.distribute.Strategy: CentralStorageStrategy. Either '
+        'stop using mixed precision by removing the use of the '
+        '"mixed_float16" policy or use a different Strategy, e.g. '
+        'a MirroredStrategy.'):
       mp_test_util.MultiplyLayer(dtype=policy.Policy('mixed_float16'))
     # Non-mixed policies are fine
     mp_test_util.MultiplyLayer(dtype=policy.Policy('float64'))
@@ -851,7 +851,7 @@ class KerasModelTest(keras_parameterized.TestCase):
         error_msg = 'Use a `tf.keras` Optimizer instead'
       else:
         error_msg = 'optimizer" must be an instance of '
-      with self.assertRaisesRegexp(ValueError, error_msg):
+      with self.assertRaisesRegex(ValueError, error_msg):
         model.compile(optimizers.SGD(1.), 'mse')
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
diff --git a/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py b/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
index 31d75642adb..350cfe6a09c 100644
--- a/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
+++ b/tensorflow/python/keras/mixed_precision/experimental/loss_scale_optimizer_test.py
@@ -330,7 +330,7 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
 
   def testPassingNoneToLossScale(self):
     opt = gradient_descent.SGD()
-    with self.assertRaisesRegexp(ValueError, r'loss_scale cannot be None'):
+    with self.assertRaisesRegex(ValueError, r'loss_scale cannot be None'):
       loss_scale_optimizer.LossScaleOptimizer(opt, None)
 
   @parameterized.named_parameters(*TESTCASES)
@@ -366,11 +366,11 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
     opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=10.)
     # Test that attributes defined by OptimizerV2 subclasses are not exposed in
     # LossScaleOptimizer, and that the error message is sensible.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         AttributeError,
         "'LossScaleOptimizer' object has no attribute 'epsilon'"):
       opt.epsilon  # pylint: disable=pointless-statement
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         AttributeError,
         "'LossScaleOptimizer' object has no attribute 'beta_1'"):
       opt.beta_1  # pylint: disable=pointless-statement
@@ -606,14 +606,14 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
         'Loss scaling is not supported with the tf.distribute.Strategy: '
         'CentralStorageStrategy. Try using a different Strategy, e.g. a '
         'MirroredStrategy')
-    with strategy.scope(), self.assertRaisesRegexp(ValueError, expected_error):
+    with strategy.scope(), self.assertRaisesRegex(ValueError, expected_error):
       loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD(), 1.)
     opt = loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD(), 1.)
     with strategy.scope():
       var = variables.Variable(1.0)
       loss = lambda: var * 2.0
       run_fn = lambda: opt.minimize(loss, [var])
-      with self.assertRaisesRegexp(ValueError, expected_error):
+      with self.assertRaisesRegex(ValueError, expected_error):
         strategy.experimental_run(run_fn)
 
 
diff --git a/tensorflow/python/keras/mixed_precision/experimental/mixed_precision_graph_rewrite_test.py b/tensorflow/python/keras/mixed_precision/experimental/mixed_precision_graph_rewrite_test.py
index d7454a89bad..b2c5f80544e 100644
--- a/tensorflow/python/keras/mixed_precision/experimental/mixed_precision_graph_rewrite_test.py
+++ b/tensorflow/python/keras/mixed_precision/experimental/mixed_precision_graph_rewrite_test.py
@@ -73,9 +73,9 @@ class MixedPrecisionTest(test.TestCase, parameterized.TestCase):
   def test_optimizer_errors(self):
     opt = gradient_descent_v2.SGD(1.0)
     opt = loss_scale_optimizer_v2.LossScaleOptimizer(opt, 'dynamic')
-    with self.assertRaisesRegexp(ValueError,
-                                 '"opt" must not already be an instance of a '
-                                 'LossScaleOptimizer.'):
+    with self.assertRaisesRegex(
+        ValueError, '"opt" must not already be an instance of a '
+        'LossScaleOptimizer.'):
       enable_mixed_precision_graph_rewrite(opt)
     self.assertFalse(config.get_optimizer_experimental_options()
                      .get('auto_mixed_precision', False))
@@ -83,8 +83,8 @@ class MixedPrecisionTest(test.TestCase, parameterized.TestCase):
   @testing_utils.enable_v2_dtype_behavior
   def test_error_if_policy_is_set(self):
     with policy.policy_scope('mixed_float16'):
-      with self.assertRaisesRegexp(
-          ValueError, 'the global Keras dtype Policy has been set'):
+      with self.assertRaisesRegex(ValueError,
+                                  'the global Keras dtype Policy has been set'):
         enable_mixed_precision_graph_rewrite(gradient_descent_v2.SGD(1.0))
     # Test no error is thrown when the policy is currently the default.
     enable_mixed_precision_graph_rewrite(gradient_descent_v2.SGD(1.0))
diff --git a/tensorflow/python/keras/mixed_precision/experimental/policy_test.py b/tensorflow/python/keras/mixed_precision/experimental/policy_test.py
index 81d461c304d..94880a9b239 100644
--- a/tensorflow/python/keras/mixed_precision/experimental/policy_test.py
+++ b/tensorflow/python/keras/mixed_precision/experimental/policy_test.py
@@ -73,42 +73,42 @@ class PolicyTest(test.TestCase, parameterized.TestCase):
   def test_policy_errors(self):
     # Test passing invalid strings
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Cannot convert value abc to a mixed precision Policy.'):
       mp_policy.Policy('abc')
 
     # Test passing a DType
-    with self.assertRaisesRegexp(TypeError,
-                                 "'name' must be a string, not a DType. "
-                                 "Instead, pass DType.name. Got: float16"):
+    with self.assertRaisesRegex(
+        TypeError, "'name' must be a string, not a DType. "
+        'Instead, pass DType.name. Got: float16'):
       mp_policy.Policy(dtypes.float16)
 
     # Test passing a non-DType invalid type
-    with self.assertRaisesRegexp(TypeError,
-                                 "'name' must be a string, but got: 5"):
+    with self.assertRaisesRegex(TypeError,
+                                "'name' must be a string, but got: 5"):
       mp_policy.Policy(5)
 
     # Test passing a now-removed policy ending in float32_vars
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Policies ending in \'_float32_vars\' have been removed '
-                    'from TensorFlow. Please use the \'mixed_float16\' or '
-                    '\'mixed_bfloat16\' policy instead. Got policy name: '
-                    '\'infer_float32_vars\''):
+        'from TensorFlow. Please use the \'mixed_float16\' or '
+        '\'mixed_bfloat16\' policy instead. Got policy name: '
+        '\'infer_float32_vars\''):
       mp_policy.Policy('infer_float32_vars')
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Policies ending in \'_float32_vars\' have been removed '
-                    'from TensorFlow. Please use the \'mixed_float16\' policy '
-                    'instead. Got policy name: \'float16_with_float32_vars\''):
+        'from TensorFlow. Please use the \'mixed_float16\' policy '
+        'instead. Got policy name: \'float16_with_float32_vars\''):
       mp_policy.Policy('float16_with_float32_vars')
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Policies ending in \'_float32_vars\' have been removed '
-                    'from TensorFlow. Please use the \'mixed_bfloat16\' policy '
-                    'instead. Got policy name: \'bfloat16_with_float32_vars\''):
+        'from TensorFlow. Please use the \'mixed_bfloat16\' policy '
+        'instead. Got policy name: \'bfloat16_with_float32_vars\''):
       mp_policy.Policy('bfloat16_with_float32_vars')
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Policies ending in \'_float32_vars\' have been removed '
-                    'from TensorFlow. Got policy name: '
-                    '\'int8_with_float32_vars\''):
+        'from TensorFlow. Got policy name: '
+        '\'int8_with_float32_vars\''):
       mp_policy.Policy('int8_with_float32_vars')
 
   @testing_utils.enable_v2_dtype_behavior
@@ -181,7 +181,7 @@ class PolicyTest(test.TestCase, parameterized.TestCase):
       if config_module.list_physical_devices('GPU'):
         mock_warn.assert_not_called()
       else:
-        self.assertRegexpMatches(
+        self.assertRegex(
             mock_warn.call_args[0][0],
             r'Mixed precision compatibility check \(mixed_float16\): WARNING.*')
 
@@ -292,9 +292,9 @@ class PolicyTest(test.TestCase, parameterized.TestCase):
     try:
       mixed_precision.enable_mixed_precision_graph_rewrite(
           gradient_descent.SGD(1.))
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'cannot be set to "mixed_float16", .* the mixed '
-                      'precision graph rewrite has already been enabled'):
+          'precision graph rewrite has already been enabled'):
         mp_policy.set_policy('mixed_float16')
       with mp_policy.policy_scope('float64'):
         pass  # Non-mixed policies are allowed
@@ -304,19 +304,16 @@ class PolicyTest(test.TestCase, parameterized.TestCase):
   @testing_utils.disable_v2_dtype_behavior
   def test_v1_dtype_behavior(self):
     # Setting global policies are not allowed with V1 dtype behavior
-    with self.assertRaisesRegexp(
-        ValueError,
-        'global policy can only be set in TensorFlow 2'):
+    with self.assertRaisesRegex(
+        ValueError, 'global policy can only be set in TensorFlow 2'):
       with mp_policy.policy_scope(mp_policy.Policy('_infer')):
         pass
-    with self.assertRaisesRegexp(
-        ValueError,
-        'global policy can only be set in TensorFlow 2'):
+    with self.assertRaisesRegex(
+        ValueError, 'global policy can only be set in TensorFlow 2'):
       with mp_policy.policy_scope(mp_policy.Policy('float32')):
         pass
-    with self.assertRaisesRegexp(
-        ValueError,
-        'global policy can only be set in TensorFlow 2'):
+    with self.assertRaisesRegex(
+        ValueError, 'global policy can only be set in TensorFlow 2'):
       with mp_policy.policy_scope(mp_policy.Policy('mixed_float16')):
         pass
 
diff --git a/tensorflow/python/keras/models_test.py b/tensorflow/python/keras/models_test.py
index a2887e24b56..ea0dc148326 100644
--- a/tensorflow/python/keras/models_test.py
+++ b/tensorflow/python/keras/models_test.py
@@ -363,16 +363,16 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
 
     model = _get_model()
 
-    with self.assertRaisesRegexp(ValueError, 'has not been compiled'):
+    with self.assertRaisesRegex(ValueError, 'has not been compiled'):
       models.clone_and_build_model(model, compile_clone=True)
 
     is_subclassed = (testing_utils.get_model_type() == 'subclass')
     # With placeholder creation
     new_model = models.clone_and_build_model(
         model, compile_clone=False, in_place_reset=is_subclassed)
-    with self.assertRaisesRegexp(RuntimeError, 'must compile'):
+    with self.assertRaisesRegex(RuntimeError, 'must compile'):
       new_model.evaluate(inp, out)
-    with self.assertRaisesRegexp(RuntimeError, 'must compile'):
+    with self.assertRaisesRegex(RuntimeError, 'must compile'):
       new_model.train_on_batch(inp, out)
     new_model.compile(
         testing_utils.get_v2_optimizer('rmsprop'),
@@ -387,9 +387,9 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
         input_tensors=input_a,
         compile_clone=False,
         in_place_reset=is_subclassed)
-    with self.assertRaisesRegexp(RuntimeError, 'must compile'):
+    with self.assertRaisesRegex(RuntimeError, 'must compile'):
       new_model.evaluate(inp, out)
-    with self.assertRaisesRegexp(RuntimeError, 'must compile'):
+    with self.assertRaisesRegex(RuntimeError, 'must compile'):
       new_model.train_on_batch(inp, out)
     new_model.compile(
         testing_utils.get_v2_optimizer('rmsprop'),
@@ -512,8 +512,7 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
         optimizer_config = optimizer.get_config()
     with ops.Graph().as_default():
       with self.session():
-        with self.assertRaisesRegexp(ValueError,
-                                     'Cannot use the given session'):
+        with self.assertRaisesRegex(ValueError, 'Cannot use the given session'):
           models.clone_and_build_model(model, compile_clone=True)
         # The optimizer_config object allows the model to be cloned in a
         # different graph.
diff --git a/tensorflow/python/keras/optimizer_v2/gradient_descent_test.py b/tensorflow/python/keras/optimizer_v2/gradient_descent_test.py
index d87e6fd1dfa..0084f04bdd9 100644
--- a/tensorflow/python/keras/optimizer_v2/gradient_descent_test.py
+++ b/tensorflow/python/keras/optimizer_v2/gradient_descent_test.py
@@ -686,7 +686,7 @@ class MomentumOptimizerTest(test.TestCase, parameterized.TestCase):
     self.assertTrue(opt3.nesterov)
 
   def testNesterovWithoutMomentum(self):
-    with self.assertRaisesRegexp(ValueError, "must be between"):
+    with self.assertRaisesRegex(ValueError, "must be between"):
       gradient_descent.SGD(learning_rate=1.0, momentum=2.0)
 
   def testConstructMomentumWithLR(self):
diff --git a/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py b/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py
index 9069fc44b9c..05f4b1d17f3 100644
--- a/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py
+++ b/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py
@@ -166,7 +166,7 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
         var1 = variables.Variable([3.0, 4.0], dtype=dtype)
         loss = lambda: 5 * var0  # pylint: disable=cell-var-from-loop
         sgd_op = gradient_descent.SGD(3.0)
-        with self.assertRaisesRegexp(ValueError, 'No gradients'):
+        with self.assertRaisesRegex(ValueError, 'No gradients'):
           # var1 has no gradient
           sgd_op.minimize(loss, var_list=[var1])
 
@@ -179,8 +179,8 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
         loss = lambda: constant_op.constant(5.0)
 
         sgd_op = gradient_descent.SGD(3.0)
-        with self.assertRaisesRegexp(ValueError,
-                                     'No gradients provided for any variable'):
+        with self.assertRaisesRegex(ValueError,
+                                    'No gradients provided for any variable'):
           sgd_op.minimize(loss, var_list=[var0, var1])
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -190,8 +190,8 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
         var0 = variables.Variable([1.0, 2.0], dtype=dtype)
         var1 = variables.Variable([3.0, 4.0], dtype=dtype)
         sgd_op = gradient_descent.SGD(3.0)
-        with self.assertRaisesRegexp(ValueError,
-                                     'No gradients provided for any variable'):
+        with self.assertRaisesRegex(ValueError,
+                                    'No gradients provided for any variable'):
           sgd_op.apply_gradients([(None, var0), (None, var1)])
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -357,12 +357,12 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def testInvalidClipNorm(self):
-    with self.assertRaisesRegexp(ValueError, '>= 0'):
+    with self.assertRaisesRegex(ValueError, '>= 0'):
       gradient_descent.SGD(learning_rate=1.0, clipnorm=-1.0)
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def testInvalidKwargs(self):
-    with self.assertRaisesRegexp(TypeError, 'Unexpected keyword argument'):
+    with self.assertRaisesRegex(TypeError, 'Unexpected keyword argument'):
       gradient_descent.SGD(learning_rate=1.0, invalidkwargs=1.0)
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
@@ -396,7 +396,7 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
       # Assert set_weights with ValueError since weight list does not match.
       self.evaluate(variables.global_variables_initializer())
       weights = opt1.get_weights()
-      with self.assertRaisesRegexp(ValueError, 'but the optimizer was'):
+      with self.assertRaisesRegex(ValueError, 'but the optimizer was'):
         opt2.set_weights(weights)
 
       # Assert set_weights and variables get updated to same value.
@@ -566,7 +566,7 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
     loss = lambda: losses.mean_squared_error(model(x), y)
     var_list = lambda: model.trainable_weights
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Weights for model .* have not yet been created'):
       var_list()
     train_op = opt.minimize(loss, var_list)
diff --git a/tensorflow/python/keras/saving/hdf5_format_test.py b/tensorflow/python/keras/saving/hdf5_format_test.py
index b079bf8cac8..22e19e4859c 100644
--- a/tensorflow/python/keras/saving/hdf5_format_test.py
+++ b/tensorflow/python/keras/saving/hdf5_format_test.py
@@ -312,10 +312,10 @@ class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
       model.compile(loss=keras.losses.MSE,
                     optimizer='rmsprop',
                     metrics=[keras.metrics.categorical_accuracy])
-      with self.assertRaisesRegexp(ValueError,
-                                   r'Layer #0 \(named \"d1\"\) expects 1 '
-                                   r'weight\(s\), but the saved weights have 2 '
-                                   r'element\(s\)\.'):
+      with self.assertRaisesRegex(
+          ValueError, r'Layer #0 \(named \"d1\"\) expects 1 '
+          r'weight\(s\), but the saved weights have 2 '
+          r'element\(s\)\.'):
         hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
 
       hdf5_format.load_weights_from_hdf5_group_by_name(
@@ -355,12 +355,12 @@ class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
       model.compile(loss=keras.losses.MSE,
                     optimizer=keras.optimizers.RMSprop(lr=0.0001),
                     metrics=[keras.metrics.categorical_accuracy])
-      with self.assertRaisesRegexp(ValueError,
-                                   r'Layer #0 \(named "d1"\), weight '
-                                   r'<tf\.Variable \'d1_1\/kernel:0\' '
-                                   r'shape=\(3, 10\) dtype=float32> has '
-                                   r'shape \(3, 10\), but the saved weight has '
-                                   r'shape \(3, 5\)\.'):
+      with self.assertRaisesRegex(
+          ValueError, r'Layer #0 \(named "d1"\), weight '
+          r'<tf\.Variable \'d1_1\/kernel:0\' '
+          r'shape=\(3, 10\) dtype=float32> has '
+          r'shape \(3, 10\), but the saved weight has '
+          r'shape \(3, 5\)\.'):
         hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
 
       hdf5_format.load_weights_from_hdf5_group_by_name(
@@ -773,8 +773,7 @@ class TestWholeModelSaving(keras_parameterized.TestCase):
       return
 
     h5file = h5py.File(saved_model_dir, 'r')
-    self.assertRegexpMatches(
-        h5file.attrs['keras_version'], r'^[\d]+\.[\d]+\.[\S]+$')
+    self.assertRegex(h5file.attrs['keras_version'], r'^[\d]+\.[\d]+\.[\S]+$')
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def test_functional_model_with_custom_loss_and_metric(self):
@@ -968,9 +967,7 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase, parameterized.TestCase):
       prefix = os.path.join(temp_dir, 'ckpt')
       with test.mock.patch.object(logging, 'warning') as mock_log:
         model.save_weights(prefix)
-        self.assertRegexpMatches(
-            str(mock_log.call_args),
-            'Keras optimizer')
+        self.assertRegex(str(mock_log.call_args), 'Keras optimizer')
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
   def test_tensorflow_format_overwrite(self):
@@ -1181,12 +1178,12 @@ class TestWeightSavingAndLoadingTFFormat(test.TestCase, parameterized.TestCase):
     save_path = trackable.Checkpoint().save(
         os.path.join(self.get_temp_dir(), 'ckpt'))
     m = DummySubclassModel()
-    with self.assertRaisesRegexp(AssertionError, 'Nothing to load'):
+    with self.assertRaisesRegex(AssertionError, 'Nothing to load'):
       m.load_weights(save_path)
     m.dense = keras.layers.Dense(2)
     m.dense(constant_op.constant([[1.]]))
-    with self.assertRaisesRegexp(
-        AssertionError, 'Nothing except the root object matched'):
+    with self.assertRaisesRegex(AssertionError,
+                                'Nothing except the root object matched'):
       m.load_weights(save_path)
 
   @combinations.generate(combinations.combine(mode=['graph', 'eager']))
diff --git a/tensorflow/python/keras/saving/save_test.py b/tensorflow/python/keras/saving/save_test.py
index 5c5846fe738..59fe6c2c756 100644
--- a/tensorflow/python/keras/saving/save_test.py
+++ b/tensorflow/python/keras/saving/save_test.py
@@ -76,7 +76,7 @@ class TestSaveModel(test.TestCase, parameterized.TestCase):
     path = os.path.join(self.get_temp_dir(), 'model')
     save.save_model(self.model, path, save_format='h5')
     self.assert_h5_format(path)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         NotImplementedError,
         'requires the model to be a Functional model or a Sequential model.'):
       save.save_model(self.subclassed_model, path, save_format='h5')
@@ -86,7 +86,7 @@ class TestSaveModel(test.TestCase, parameterized.TestCase):
     path = os.path.join(self.get_temp_dir(), 'model')
     save.save_model(self.model, path, save_format='tf')
     self.assert_saved_model(path)
-    with self.assertRaisesRegexp(ValueError, 'input shapes have not been set'):
+    with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
       save.save_model(self.subclassed_model, path, save_format='tf')
     self.subclassed_model.predict(np.random.random((3, 5)))
     save.save_model(self.subclassed_model, path, save_format='tf')
diff --git a/tensorflow/python/keras/saving/saved_model/saved_model_test.py b/tensorflow/python/keras/saving/saved_model/saved_model_test.py
index 3f55d5f40b5..e76e524f93b 100644
--- a/tensorflow/python/keras/saving/saved_model/saved_model_test.py
+++ b/tensorflow/python/keras/saving/saved_model/saved_model_test.py
@@ -855,8 +855,7 @@ class TestModelSavingAndLoadingV2(keras_parameterized.TestCase):
 
     loaded = keras_load.load(saved_model_dir)
     self.assertAllEqual([[1.0]], self.evaluate(loaded(inp)))
-    with self.assertRaisesRegexp(ValueError,
-                                 'call function was not serialized'):
+    with self.assertRaisesRegex(ValueError, 'call function was not serialized'):
       loaded.layer(inp)
 
 
@@ -1039,8 +1038,8 @@ class MetricTest(test.TestCase, parameterized.TestCase):
 
       self.evaluate([v.initializer for v in metric.variables])
 
-      with self.assertRaisesRegexp(ValueError,
-                                   'Unable to restore custom object'):
+      with self.assertRaisesRegex(ValueError,
+                                  'Unable to restore custom object'):
         self._test_metric_save_and_load(metric, save_dir, num_tensor_args)
       with generic_utils.CustomObjectScope({'CustomMetric': CustomMetric}):
         loaded = self._test_metric_save_and_load(
diff --git a/tensorflow/python/keras/saving/saved_model_experimental_test.py b/tensorflow/python/keras/saving/saved_model_experimental_test.py
index 2f3cf7cf9c9..281a58a1076 100644
--- a/tensorflow/python/keras/saving/saved_model_experimental_test.py
+++ b/tensorflow/python/keras/saving/saved_model_experimental_test.py
@@ -496,12 +496,12 @@ class TestModelSavedModelExport(test.TestCase, parameterized.TestCase):
   def testSaveSequentialModelWithoutInputShapes(self):
     model = sequential_model_without_input_shape(True)
     # A Sequential model that hasn't been built should raise an error.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Weights for sequential model have not yet been created'):
       keras_saved_model.export_saved_model(model, '')
 
     # Even with input_signature, the model's weights has not been created.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'Weights for sequential model have not yet been created'):
       saved_model_dir = self._save_model_dir()
       keras_saved_model.export_saved_model(
diff --git a/tensorflow/python/keras/saving/saving_utils_test.py b/tensorflow/python/keras/saving/saving_utils_test.py
index bc0ea6edf11..574e42a2aff 100644
--- a/tensorflow/python/keras/saving/saving_utils_test.py
+++ b/tensorflow/python/keras/saving/saving_utils_test.py
@@ -70,8 +70,7 @@ class TraceModelCallTest(keras_parameterized.TestCase):
     inputs = array_ops.ones((8, 5))
 
     if input_dim is None:
-      with self.assertRaisesRegexp(ValueError,
-                                   'input shapes have not been set'):
+      with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
         saving_utils.trace_model_call(model)
       model._set_inputs(inputs)
 
@@ -130,8 +129,7 @@ class TraceModelCallTest(keras_parameterized.TestCase):
     input_b_np = np.random.random((10, input_dim)).astype(np.float32)
 
     if testing_utils.get_model_type() == 'subclass':
-      with self.assertRaisesRegexp(ValueError,
-                                   'input shapes have not been set'):
+      with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
         saving_utils.trace_model_call(model)
 
     model.compile(
@@ -182,7 +180,7 @@ class TraceModelCallTest(keras_parameterized.TestCase):
     model = testing_utils.get_small_sequential_mlp(10, 3, None)
     inputs = array_ops.ones((8, 5))
 
-    with self.assertRaisesRegexp(ValueError, 'input shapes have not been set'):
+    with self.assertRaisesRegex(ValueError, 'input shapes have not been set'):
       saving_utils.trace_model_call(model)
 
     fn = saving_utils.trace_model_call(
diff --git a/tensorflow/python/keras/tests/add_loss_correctness_test.py b/tensorflow/python/keras/tests/add_loss_correctness_test.py
index f99b285489d..8494d6e31a0 100644
--- a/tensorflow/python/keras/tests/add_loss_correctness_test.py
+++ b/tensorflow/python/keras/tests/add_loss_correctness_test.py
@@ -435,7 +435,7 @@ class TestAddLossCorrectness(keras_parameterized.TestCase):
       inputs = Input(shape=(1,))
       outputs = testing_utils.Bias()(inputs)
       model = Model(inputs, outputs)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'Expected a symbolic Tensors or a callable for the loss value'):
         model.add_loss(1.)
@@ -446,7 +446,7 @@ class TestAddLossCorrectness(keras_parameterized.TestCase):
       inputs = Input(shape=(1,))
       outputs = testing_utils.Bias()(inputs)
       model = Model(inputs, outputs)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           'Expected a symbolic Tensors or a callable for the loss value'):
         model.add_loss(model.weights[0])
diff --git a/tensorflow/python/keras/tests/model_subclassing_test.py b/tensorflow/python/keras/tests/model_subclassing_test.py
index 8096b0f7586..7985f79fc08 100644
--- a/tensorflow/python/keras/tests/model_subclassing_test.py
+++ b/tensorflow/python/keras/tests/model_subclassing_test.py
@@ -79,7 +79,7 @@ class ModelSubclassingTest(keras_parameterized.TestCase):
         return 1.
 
     m = ModelWithProperty()
-    with self.assertRaisesRegexp(AttributeError, 'read_only'):
+    with self.assertRaisesRegex(AttributeError, 'read_only'):
       m.read_only = 2.
 
   def test_custom_build_with_fit(self):
@@ -140,8 +140,8 @@ class ModelSubclassingTest(keras_parameterized.TestCase):
     self.assertFalse(model.built, 'Model should not have been built')
     self.assertFalse(model.weights, ('Model should have no weights since it '
                                      'has not been built.'))
-    with self.assertRaisesRegexp(
-        ValueError, 'input shape is not one of the valid types'):
+    with self.assertRaisesRegex(ValueError,
+                                'input shape is not one of the valid types'):
       model.build(input_shape=tensor_shape.Dimension(input_dim))
 
   def test_embed_dtype_with_subclass_build(self):
@@ -177,7 +177,7 @@ class ModelSubclassingTest(keras_parameterized.TestCase):
     self.assertFalse(model.built, 'Model should not have been built')
     self.assertFalse(model.weights, ('Model should have no weights since it '
                                      'has not been built.'))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'if your layers do not support float type inputs'):
       model.build(input_shape=(35, 20))
 
@@ -652,8 +652,8 @@ class CustomCallSignatureTests(test.TestCase, parameterized.TestCase):
     self.assertFalse(model.built, 'Model should not have been built')
     self.assertFalse(model.weights, ('Model should have no weights since it '
                                      'has not been built.'))
-    with self.assertRaisesRegexp(
-        ValueError, 'cannot build your model if it has positional'):
+    with self.assertRaisesRegex(ValueError,
+                                'cannot build your model if it has positional'):
       model.build(input_shape=[first_input_shape, second_input_shape])
 
   def test_kwargs_in_signature(self):
@@ -689,20 +689,20 @@ class CustomCallSignatureTests(test.TestCase, parameterized.TestCase):
     y = np.ones((10, 1))
     m = ModelWithPositionalArgs()
     m.compile('sgd', 'mse')
-    with self.assertRaisesRegexp(ValueError, r'Models passed to `fit`'):
+    with self.assertRaisesRegex(ValueError, r'Models passed to `fit`'):
       m.fit(x, y, batch_size=2)
-    with self.assertRaisesRegexp(ValueError, r'Models passed to `evaluate`'):
+    with self.assertRaisesRegex(ValueError, r'Models passed to `evaluate`'):
       m.evaluate(x, y, batch_size=2)
-    with self.assertRaisesRegexp(ValueError, r'Models passed to `predict`'):
+    with self.assertRaisesRegex(ValueError, r'Models passed to `predict`'):
       m.predict(x, batch_size=2)
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Models passed to `train_on_batch`'):
+    with self.assertRaisesRegex(ValueError,
+                                r'Models passed to `train_on_batch`'):
       m.train_on_batch(x, y)
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Models passed to `test_on_batch`'):
+    with self.assertRaisesRegex(ValueError,
+                                r'Models passed to `test_on_batch`'):
       m.test_on_batch(x, y)
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Models passed to `predict_on_batch`'):
+    with self.assertRaisesRegex(ValueError,
+                                r'Models passed to `predict_on_batch`'):
       m.predict_on_batch(x)
 
   def test_deepcopy(self):
diff --git a/tensorflow/python/keras/tests/saver_test.py b/tensorflow/python/keras/tests/saver_test.py
index d3c87cf1d88..28c65961a53 100644
--- a/tensorflow/python/keras/tests/saver_test.py
+++ b/tensorflow/python/keras/tests/saver_test.py
@@ -131,8 +131,8 @@ class TrackableCompatibilityTests(test.TestCase):
       saver.restore(sess=sess, save_path=save_path)
       self.assertEqual(before_second_restore_ops,
                        restore_graph.get_operations())
-      with self.assertRaisesRegexp(errors.NotFoundError,
-                                   "Could not find some variables"):
+      with self.assertRaisesRegex(errors.NotFoundError,
+                                  "Could not find some variables"):
         saver.restore(sess=sess, save_path=second_path)
 
   def testLoadFromObjectBasedEager(self):
diff --git a/tensorflow/python/keras/tests/summary_ops_test.py b/tensorflow/python/keras/tests/summary_ops_test.py
index a62abdccbba..5c9fee91b9c 100644
--- a/tensorflow/python/keras/tests/summary_ops_test.py
+++ b/tensorflow/python/keras/tests/summary_ops_test.py
@@ -90,7 +90,7 @@ class SummaryOpsTest(test_util.TensorFlowTestCase):
     with test.mock.patch.object(logging, 'warn') as mock_log:
       self.assertFalse(
           summary_ops.keras_model(name='my_name', data=model, step=1))
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_log.call_args), 'Model failed to serialize as JSON.')
 
   @test_util.run_v2_only
@@ -102,7 +102,7 @@ class SummaryOpsTest(test_util.TensorFlowTestCase):
         mock_to_json.side_effect = Exception('oops')
         self.assertFalse(
             summary_ops.keras_model(name='my_name', data=model, step=1))
-        self.assertRegexpMatches(
+        self.assertRegex(
             str(mock_log.call_args),
             'Model failed to serialize as JSON. Ignoring... oops')
 
diff --git a/tensorflow/python/keras/tests/tracking_test.py b/tensorflow/python/keras/tests/tracking_test.py
index b5ce6911d92..a05706eec7a 100644
--- a/tensorflow/python/keras/tests/tracking_test.py
+++ b/tensorflow/python/keras/tests/tracking_test.py
@@ -308,7 +308,7 @@ class MappingTests(test.TestCase):
     model = training.Model()
     model.sub = a
     save_path = os.path.join(self.get_temp_dir(), "ckpt")
-    with self.assertRaisesRegexp(ValueError, "non-string key"):
+    with self.assertRaisesRegex(ValueError, "non-string key"):
       model.save_weights(save_path)
 
   def testDictWrapperNoDependency(self):
@@ -361,7 +361,7 @@ class MappingTests(test.TestCase):
     model.d["a"] = []
     model.d.pop("a")
     save_path = os.path.join(self.get_temp_dir(), "ckpt")
-    with self.assertRaisesRegexp(ValueError, "Unable to save"):
+    with self.assertRaisesRegex(ValueError, "Unable to save"):
       model.save_weights(save_path)
 
   def testExternalModificationNoSave(self):
@@ -370,7 +370,7 @@ class MappingTests(test.TestCase):
     model.d = external_reference
     external_reference["a"] = []
     save_path = os.path.join(self.get_temp_dir(), "ckpt")
-    with self.assertRaisesRegexp(ValueError, "modified outside the wrapper"):
+    with self.assertRaisesRegex(ValueError, "modified outside the wrapper"):
       model.save_weights(save_path)
 
   def testOverwriteCanStillSave(self):
@@ -602,7 +602,7 @@ class InterfaceTests(test.TestCase):
     checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
     a.l2 = []
     a.l2.insert(1, module.Module())
-    with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
+    with self.assertRaisesRegex(ValueError, "A list element was replaced"):
       checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
 
 
diff --git a/tensorflow/python/keras/tests/tracking_util_test.py b/tensorflow/python/keras/tests/tracking_util_test.py
index ee5d7428fcc..1c55c366d82 100644
--- a/tensorflow/python/keras/tests/tracking_util_test.py
+++ b/tensorflow/python/keras/tests/tracking_util_test.py
@@ -515,7 +515,7 @@ class CheckpointingTests(parameterized.TestCase, test.TestCase):
     new_root.optimizer = adam.Adam(0.1)
     slot_status.assert_existing_objects_matched()
     if not context.executing_eagerly():
-      with self.assertRaisesRegexp(AssertionError, "Unresolved object"):
+      with self.assertRaisesRegex(AssertionError, "Unresolved object"):
         slot_status.assert_consumed()
     self.assertEqual(12., self.evaluate(new_root.var))
     if context.executing_eagerly():
@@ -847,11 +847,11 @@ class CheckpointCompatibilityTests(test.TestCase):
       else:
         # When graph building, we haven't read any keys, so we don't know
         # whether the restore will be complete.
-        with self.assertRaisesRegexp(AssertionError, "not restored"):
+        with self.assertRaisesRegex(AssertionError, "not restored"):
           status.assert_consumed()
-        with self.assertRaisesRegexp(AssertionError, "not restored"):
+        with self.assertRaisesRegex(AssertionError, "not restored"):
           status.assert_existing_objects_matched()
-        with self.assertRaisesRegexp(AssertionError, "not restored"):
+        with self.assertRaisesRegex(AssertionError, "not restored"):
           status.assert_nontrivial_match()
       status.run_restore_ops()
       self._check_sentinels(root)
diff --git a/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py b/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py
index d38ab320592..1a699803e1a 100644
--- a/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py
+++ b/tensorflow/python/keras/tests/tracking_util_with_v1_optimizers_test.py
@@ -649,11 +649,11 @@ class CheckpointCompatibilityTests(test.TestCase):
       else:
         # When graph building, we haven't read any keys, so we don't know
         # whether the restore will be complete.
-        with self.assertRaisesRegexp(AssertionError, "not restored"):
+        with self.assertRaisesRegex(AssertionError, "not restored"):
           status.assert_consumed()
-        with self.assertRaisesRegexp(AssertionError, "not restored"):
+        with self.assertRaisesRegex(AssertionError, "not restored"):
           status.assert_existing_objects_matched()
-        with self.assertRaisesRegexp(AssertionError, "not restored"):
+        with self.assertRaisesRegex(AssertionError, "not restored"):
           status.assert_nontrivial_match()
       status.run_restore_ops()
       self._check_sentinels(root)
diff --git a/tensorflow/python/keras/utils/version_utils_test.py b/tensorflow/python/keras/utils/version_utils_test.py
index 41370e316af..65eda4d2bbd 100644
--- a/tensorflow/python/keras/utils/version_utils_test.py
+++ b/tensorflow/python/keras/utils/version_utils_test.py
@@ -145,7 +145,7 @@ class SplitUtilsTest(keras_parameterized.TestCase):
       def call(self, inputs):
         return 2 * inputs
 
-    with self.assertRaisesRegexp(TypeError, 'instantiate abstract class'):
+    with self.assertRaisesRegex(TypeError, 'instantiate abstract class'):
       AbstractModel()
 
     model = MyModel()
@@ -181,7 +181,7 @@ class SplitUtilsTest(keras_parameterized.TestCase):
     model.compile('sgd', 'mse')
     x, y = np.ones((10, 10)), np.ones((10, 1))
     with ops.get_default_graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'instance was constructed with eager mode enabled'):
         model.fit(x, y, batch_size=2)
 
diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py
index dbff3a1b2f7..6bf6311aafd 100644
--- a/tensorflow/python/kernel_tests/array_ops_test.py
+++ b/tensorflow/python/kernel_tests/array_ops_test.py
@@ -117,7 +117,7 @@ class BatchMatrixTransposeTest(test_util.TensorFlowTestCase):
 
   def testTensorWithStaticRankLessThanTwoRaisesBecauseNotAMatrix(self):
     vector = [1, 2, 3]
-    with self.assertRaisesRegexp(ValueError, "should be a "):
+    with self.assertRaisesRegex(ValueError, "should be a "):
       array_ops.matrix_transpose(vector)
 
 
@@ -249,28 +249,28 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
     with self.cached_session():
       tensor = array_ops.placeholder(dtypes.int32, shape=[None, 2])
       mask = array_ops.placeholder(dtypes.bool, shape=None)
-      with self.assertRaisesRegexp(ValueError, "dimensions must be specified"):
+      with self.assertRaisesRegex(ValueError, "dimensions must be specified"):
         array_ops.boolean_mask(tensor, mask)
 
   def testMaskHasMoreDimsThanTensorRaises(self):
     mask = [[True, True], [False, False]]
     tensor = [1, 2, 3, 4]
     with self.cached_session():
-      with self.assertRaisesRegexp(ValueError, "incompatible"):
+      with self.assertRaisesRegex(ValueError, "incompatible"):
         array_ops.boolean_mask(tensor, mask).eval()
 
   def testMaskIsScalarRaises(self):
     mask = True
     tensor = 1
     with self.cached_session():
-      with self.assertRaisesRegexp(ValueError, "mask.*scalar"):
+      with self.assertRaisesRegex(ValueError, "mask.*scalar"):
         array_ops.boolean_mask(tensor, mask).eval()
 
   def testMaskShapeDifferentThanFirstPartOfTensorShapeRaises(self):
     mask = [True, True, True]
     tensor = [[1, 2], [3, 4]]
     with self.cached_session():
-      with self.assertRaisesRegexp(ValueError, "incompatible"):
+      with self.assertRaisesRegex(ValueError, "incompatible"):
         array_ops.boolean_mask(tensor, mask).eval()
 
   @test_util.run_deprecated_v1
@@ -331,7 +331,7 @@ class OperatorShapeTest(test_util.TensorFlowTestCase):
     matrix_squeezed = array_ops.squeeze(matrix, [0])
     self.assertEqual(matrix_squeezed.get_shape(), (3))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         Exception, "Can not squeeze dim.1., expected a dimension of 1, got 3"):
       matrix_squeezed = array_ops.squeeze(matrix, [1])
 
@@ -341,8 +341,8 @@ class OperatorShapeTest(test_util.TensorFlowTestCase):
     self.assertEqual(matrix_squeezed.get_shape(), (3))
 
   def testExpandDimsWithNonScalarDim(self):
-    with self.assertRaisesRegexp(Exception,
-                                 "must be a tensor with a single value"):
+    with self.assertRaisesRegex(Exception,
+                                "must be a tensor with a single value"):
       array_ops.expand_dims(1, axis=[0, 1])
 
 
@@ -403,11 +403,11 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
   @test_util.run_deprecated_v1
   def testInvalidAxis(self):
     x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
-    with self.assertRaisesRegexp(ValueError, "is out of valid range"):
+    with self.assertRaisesRegex(ValueError, "is out of valid range"):
       array_ops.reverse_v2(x_np, [-30])
-    with self.assertRaisesRegexp(ValueError, "is out of valid range"):
+    with self.assertRaisesRegex(ValueError, "is out of valid range"):
       array_ops.reverse_v2(x_np, [2])
-    with self.assertRaisesRegexp(ValueError, "axis 0 specified more than once"):
+    with self.assertRaisesRegex(ValueError, "axis 0 specified more than once"):
       array_ops.reverse_v2(x_np, [0, -2])
 
   # This is the version of reverse that uses axis indices rather than
@@ -421,13 +421,13 @@ class ReverseV2Test(test_util.TensorFlowTestCase):
     x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
     axis = array_ops.placeholder(dtypes.int32)
     with self.cached_session():
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "is out of.*range"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "is out of.*range"):
         array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [-30]})
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "is out of.*range"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "is out of.*range"):
         array_ops.reverse_v2(x_np, axis).eval(feed_dict={axis: [2]})
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "(axis 0 specified more than once|canonicalized axis 0 was repeated.)"
       ):
@@ -726,7 +726,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
       # ellipsis at middle
       _ = checker[0:1, ..., 0:1]
       # multiple ellipses not allowed
-      with self.assertRaisesRegexp(ValueError, "Multiple ellipses"):
+      with self.assertRaisesRegex(ValueError, "Multiple ellipses"):
         _ = checker[..., :, ...].eval()
 
   @test_util.run_deprecated_v1
@@ -765,17 +765,17 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
     with self.session(use_gpu=True):
       checker = StridedSliceChecker(self, StridedSliceChecker.REF_TENSOR)
       expected = re.escape(array_ops._SLICE_TYPE_ERROR)
-      with self.assertRaisesRegexp(TypeError, expected):
+      with self.assertRaisesRegex(TypeError, expected):
         _ = checker["foo"]
-      with self.assertRaisesRegexp(TypeError, expected):
+      with self.assertRaisesRegex(TypeError, expected):
         _ = checker[constant_op.constant("foo")]
-      with self.assertRaisesRegexp(TypeError, expected):
+      with self.assertRaisesRegex(TypeError, expected):
         _ = checker[0.0]
-      with self.assertRaisesRegexp(TypeError, expected):
+      with self.assertRaisesRegex(TypeError, expected):
         _ = checker[constant_op.constant(0.0)]
-      with self.assertRaisesRegexp(TypeError, expected):
+      with self.assertRaisesRegex(TypeError, expected):
         _ = checker[constant_op.constant([1, 2, 3])]
-      with self.assertRaisesRegexp(TypeError, expected):
+      with self.assertRaisesRegex(TypeError, expected):
         _ = checker[[2.1, -0.7, 1.5]]
 
   @test_util.run_deprecated_v1
@@ -973,9 +973,9 @@ class StridedSliceGradTest(test_util.TensorFlowTestCase):
       _ = grad[3:0:-2, 1:3, 2]
       _ = grad[:, -1, :]
       _ = grad[:, -2, :]
-      with self.assertRaisesRegexp(ValueError, "out of bounds"):
+      with self.assertRaisesRegex(ValueError, "out of bounds"):
         _ = grad[:, -200, :]
-      with self.assertRaisesRegexp(ValueError, "out of bounds"):
+      with self.assertRaisesRegex(ValueError, "out of bounds"):
         _ = grad[:, 200, :]
 
       # Test numpy array type mask
@@ -1046,7 +1046,7 @@ class StridedSliceGradTypeTest(test_util.TensorFlowTestCase):
       begin = constant_op.constant([0, 0, 0], dtype=dtypes.int32)
       end = constant_op.constant([4, 1, 1], dtype=dtypes.int64)
       strides = constant_op.constant([1, 1, 1], dtype=dtypes.int64)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           TypeError, "Input 'begin' of 'StridedSliceGrad' Op has type int32"
           " that does not match type int64 of argument 'shape'"):
         dx = array_ops.strided_slice_grad(original_shape, begin, end, strides,
@@ -1148,7 +1148,7 @@ class SliceAssignTest(test_util.TensorFlowTestCase):
   def testInvalidSlice(self):
     with self.cached_session() as sess:
       foo = constant_op.constant([1, 2, 3])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Sliced assignment"
           " is only supported for variables"):
         bar = foo[:2].assign(constant_op.constant([1, 2]))
@@ -1196,7 +1196,7 @@ class SliceAssignTest(test_util.TensorFlowTestCase):
 
   @test_util.run_v1_only("b/120545219")
   def testUninitialized(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.FailedPreconditionError,
         "Attempting to use uninitialized value Variable"):
       with self.cached_session() as sess:
@@ -1268,7 +1268,7 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
 
   def testExceptions(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(ValueError, "maxlen must be scalar"):
+      with self.assertRaisesRegex(ValueError, "maxlen must be scalar"):
         array_ops.sequence_mask([10, 20], [10, 20])
 
   @test_util.run_deprecated_v1
@@ -1440,8 +1440,8 @@ class UnravelIndexTest(test_util.TensorFlowTestCase):
   def testUnravelIndexZeroDim(self):
     with self.cached_session():
       for dtype in [dtypes.int32, dtypes.int64]:
-        with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                     "index is out of bound as with dims"):
+        with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                    "index is out of bound as with dims"):
           indices = constant_op.constant([2, 5, 7], dtype=dtype)
           dims = constant_op.constant([3, 0], dtype=dtype)
           self.evaluate(array_ops.unravel_index(indices=indices, dims=dims))
diff --git a/tensorflow/python/kernel_tests/barrier_ops_test.py b/tensorflow/python/kernel_tests/barrier_ops_test.py
index 60fe6f0eecd..9aef798bc90 100644
--- a/tensorflow/python/kernel_tests/barrier_ops_test.py
+++ b/tensorflow/python/kernel_tests/barrier_ops_test.py
@@ -78,17 +78,17 @@ class BarrierTest(test.TestCase):
       insert_0_op = b.insert_many(0, keys, [10.0, 20.0, 30.0])
       insert_1_op = b.insert_many(1, keys, [100.0, 200.0, 300.0])
 
-      self.assertEquals(size_t.eval(), [0])
+      self.assertEqual(size_t.eval(), [0])
       insert_0_op.run()
-      self.assertEquals(size_t.eval(), [0])
+      self.assertEqual(size_t.eval(), [0])
       insert_1_op.run()
-      self.assertEquals(size_t.eval(), [3])
+      self.assertEqual(size_t.eval(), [3])
 
   def testInsertManyEmptyTensor(self):
     with self.cached_session():
       error_message = ("Empty tensors are not supported, but received shape "
                        r"\'\(0,\)\' at index 1")
-      with self.assertRaisesRegexp(ValueError, error_message):
+      with self.assertRaisesRegex(ValueError, error_message):
         data_flow_ops.Barrier(
             (dtypes.float32, dtypes.float32), shapes=((1,), (0,)), name="B")
 
@@ -100,7 +100,7 @@ class BarrierTest(test.TestCase):
       self.assertEqual([], size_t.get_shape())
       keys = [b"a", b"b", b"c"]
       insert_0_op = b.insert_many(0, keys, np.array([[], [], []], np.float32))
-      self.assertEquals(size_t.eval(), [0])
+      self.assertEqual(size_t.eval(), [0])
       with self.assertRaisesOpError(
           ".*Tensors with no elements are not supported.*"):
         insert_0_op.run()
@@ -120,7 +120,7 @@ class BarrierTest(test.TestCase):
 
       insert_0_op.run()
       insert_1_op.run()
-      self.assertEquals(size_t.eval(), [3])
+      self.assertEqual(size_t.eval(), [3])
 
       indices_val, keys_val, values_0_val, values_1_val = sess.run(
           [take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
@@ -157,8 +157,8 @@ class BarrierTest(test.TestCase):
       close_op.run()
       # Now we have a closed barrier with 2 ready elements. Running take_t
       # should return a reduced batch with 2 elements only.
-      self.assertEquals(size_i.eval(), [2])  # assert that incomplete size = 2
-      self.assertEquals(size_t.eval(), [2])  # assert that ready size = 2
+      self.assertEqual(size_i.eval(), [2])  # assert that incomplete size = 2
+      self.assertEqual(size_t.eval(), [2])  # assert that ready size = 2
       _, keys_val, values_0_val, values_1_val = sess.run(
           [index_t, key_t, value_list_t[0], value_list_t[1]])
       # Check that correct values have been returned.
@@ -170,8 +170,8 @@ class BarrierTest(test.TestCase):
       # The next insert completes the element with key "c". The next take_t
       # should return a batch with just 1 element.
       insert_1_2_op.run()
-      self.assertEquals(size_i.eval(), [1])  # assert that incomplete size = 1
-      self.assertEquals(size_t.eval(), [1])  # assert that ready size = 1
+      self.assertEqual(size_i.eval(), [1])  # assert that incomplete size = 1
+      self.assertEqual(size_t.eval(), [1])  # assert that ready size = 1
       _, keys_val, values_0_val, values_1_val = sess.run(
           [index_t, key_t, value_list_t[0], value_list_t[1]])
       # Check that correct values have been returned.
@@ -212,7 +212,7 @@ class BarrierTest(test.TestCase):
 
       insert_0_op.run()
       insert_1_op.run()
-      self.assertEquals(size_t.eval(), [3])
+      self.assertEqual(size_t.eval(), [3])
 
       indices_val, keys_val, values_0_val, values_1_val = sess.run(
           [take_t[0], take_t[1], take_t[2][0], take_t[2][1]])
@@ -237,7 +237,7 @@ class BarrierTest(test.TestCase):
       take_t = b.take_many(10)
 
       self.evaluate(insert_ops)
-      self.assertEquals(size_t.eval(), [10])
+      self.assertEqual(size_t.eval(), [10])
 
       indices_val, keys_val, values_val = sess.run(
           [take_t[0], take_t[1], take_t[2][0]])
@@ -258,7 +258,7 @@ class BarrierTest(test.TestCase):
       take_t = [b.take_many(1) for _ in keys]
 
       insert_op.run()
-      self.assertEquals(size_t.eval(), [10])
+      self.assertEqual(size_t.eval(), [10])
 
       index_fetches = []
       key_fetches = []
@@ -360,7 +360,7 @@ class BarrierTest(test.TestCase):
       for t in insert_threads:
         t.join()
 
-      self.assertEquals(len(taken), num_iterations)
+      self.assertEqual(len(taken), num_iterations)
       flatten = lambda l: [item for sublist in l for item in sublist]
       all_indices = sorted(flatten([t_i["indices"] for t_i in taken]))
       all_keys = sorted(flatten([t_i["keys"] for t_i in taken]))
@@ -402,11 +402,11 @@ class BarrierTest(test.TestCase):
       take_t = b.take_many(3)
       take_too_many_t = b.take_many(4)
 
-      self.assertEquals(size_t.eval(), [0])
-      self.assertEquals(incomplete_t.eval(), [0])
+      self.assertEqual(size_t.eval(), [0])
+      self.assertEqual(incomplete_t.eval(), [0])
       insert_0_op.run()
-      self.assertEquals(size_t.eval(), [0])
-      self.assertEquals(incomplete_t.eval(), [3])
+      self.assertEqual(size_t.eval(), [0])
+      self.assertEqual(incomplete_t.eval(), [3])
       close_op.run()
 
       # This op should fail because the barrier is closed.
@@ -416,8 +416,8 @@ class BarrierTest(test.TestCase):
       # This op should succeed because the barrier has not canceled
       # pending enqueues
       insert_1_op.run()
-      self.assertEquals(size_t.eval(), [3])
-      self.assertEquals(incomplete_t.eval(), [0])
+      self.assertEqual(size_t.eval(), [3])
+      self.assertEqual(incomplete_t.eval(), [0])
 
       # This op should fail because the barrier is closed.
       with self.assertRaisesOpError("is closed"):
@@ -462,11 +462,11 @@ class BarrierTest(test.TestCase):
       take_t = b.take_many(2)
       take_too_many_t = b.take_many(3)
 
-      self.assertEquals(size_t.eval(), [0])
+      self.assertEqual(size_t.eval(), [0])
       insert_0_op.run()
       insert_1_op.run()
-      self.assertEquals(size_t.eval(), [2])
-      self.assertEquals(incomplete_t.eval(), [1])
+      self.assertEqual(size_t.eval(), [2])
+      self.assertEqual(incomplete_t.eval(), [1])
       cancel_op.run()
 
       # This op should fail because the queue is closed.
diff --git a/tensorflow/python/kernel_tests/base64_ops_test.py b/tensorflow/python/kernel_tests/base64_ops_test.py
index d5a5dc8c013..52a086de1eb 100644
--- a/tensorflow/python/kernel_tests/base64_ops_test.py
+++ b/tensorflow/python/kernel_tests/base64_ops_test.py
@@ -107,7 +107,7 @@ class Base64OpsTest(test_util.TensorFlowTestCase):
       # Invalid length.
       msg = np.random.bytes(99)
       enc = base64.urlsafe_b64encode(msg)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, "1 modulo 4"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, "1 modulo 4"):
         try_decode(enc + b"a")
 
       # Invalid char used in encoding.
diff --git a/tensorflow/python/kernel_tests/benchmark_test.py b/tensorflow/python/kernel_tests/benchmark_test.py
index 3fa2054847d..f4548baddaa 100644
--- a/tensorflow/python/kernel_tests/benchmark_test.py
+++ b/tensorflow/python/kernel_tests/benchmark_test.py
@@ -184,7 +184,7 @@ class BenchmarkTest(test.TestCase):
       def read_benchmark_entry(f):
         s = gfile.GFile(f, "rb").read()
         entries = test_log_pb2.BenchmarkEntries.FromString(s)
-        self.assertEquals(1, len(entries.entry))
+        self.assertEqual(1, len(entries.entry))
         return entries.entry[0]
 
       read_benchmark_1 = read_benchmark_entry(expected_output_file)
@@ -194,8 +194,8 @@ class BenchmarkTest(test.TestCase):
       self.assertProtoEquals(expected_2, read_benchmark_2)
 
       read_benchmark_3 = read_benchmark_entry(expected_output_file_3)
-      self.assertEquals(expected_3.name, read_benchmark_3.name)
-      self.assertEquals(expected_3.iters, read_benchmark_3.iters)
+      self.assertEqual(expected_3.name, read_benchmark_3.name)
+      self.assertEqual(expected_3.iters, read_benchmark_3.iters)
       self.assertGreater(read_benchmark_3.wall_time, 0)
 
       # Trace is not stored in benchmark entry. Instead we get it from
diff --git a/tensorflow/python/kernel_tests/betainc_op_test.py b/tensorflow/python/kernel_tests/betainc_op_test.py
index c564c822918..727e15b1661 100644
--- a/tensorflow/python/kernel_tests/betainc_op_test.py
+++ b/tensorflow/python/kernel_tests/betainc_op_test.py
@@ -97,7 +97,7 @@ class BetaincTest(test.TestCase):
             rtol=rtol,
             atol=atol)
 
-      with self.assertRaisesRegexp(ValueError, "must be equal"):
+      with self.assertRaisesRegex(ValueError, "must be equal"):
         math_ops.betainc(0.5, [0.5], [[0.5]])
 
       with self.cached_session():
diff --git a/tensorflow/python/kernel_tests/bincount_op_test.py b/tensorflow/python/kernel_tests/bincount_op_test.py
index 22ac9f8e99d..efa68fd6521 100644
--- a/tensorflow/python/kernel_tests/bincount_op_test.py
+++ b/tensorflow/python/kernel_tests/bincount_op_test.py
@@ -122,11 +122,11 @@ class BincountTest(test_util.TensorFlowTestCase):
   @test_util.run_deprecated_v1
   def test_shape_function(self):
     # size must be scalar.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Shape must be rank 0 but is rank 1 for .*Bincount"):
       gen_math_ops.bincount([1, 2, 3, -1, 6, 8], [1], [])
     # size must be positive.
-    with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+    with self.assertRaisesRegex(ValueError, "must be non-negative"):
       gen_math_ops.bincount([1, 2, 3, -1, 6, 8], -5, [])
     # if size is a constant then the shape is known.
     v1 = gen_math_ops.bincount([1, 2, 3, -1, 6, 8], 5, [])
@@ -324,7 +324,7 @@ class BincountOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   @test_util.run_deprecated_v1
   def test_invalid_rank(self):
-    with self.assertRaisesRegexp(ValueError, "at most rank 2"):
+    with self.assertRaisesRegex(ValueError, "at most rank 2"):
       with test_util.use_gpu():
         self.evaluate(
             gen_math_ops.dense_bincount(
diff --git a/tensorflow/python/kernel_tests/bitcast_op_test.py b/tensorflow/python/kernel_tests/bitcast_op_test.py
index b4f9a21a899..60ed92d2173 100644
--- a/tensorflow/python/kernel_tests/bitcast_op_test.py
+++ b/tensorflow/python/kernel_tests/bitcast_op_test.py
@@ -64,7 +64,7 @@ class BitcastTest(test.TestCase):
   def testErrors(self):
     x = np.zeros([1, 1], np.int8)
     datatype = dtypes.int32
-    with self.assertRaisesRegexp(ValueError, "Cannot bitcast due to shape"):
+    with self.assertRaisesRegex(ValueError, "Cannot bitcast due to shape"):
       array_ops.bitcast(x, datatype, None)
 
   def testEmpty(self):
diff --git a/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py b/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py
index c5f58f1f6b2..73098ed3084 100644
--- a/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py
+++ b/tensorflow/python/kernel_tests/boosted_trees/stats_ops_test.py
@@ -214,7 +214,7 @@ class StatsOpsTest(test_util.TensorFlowTestCase):
     stats_summaries = self._get_stats_summary_for_split()
     stats_summaries = self.add_f_dim_and_append_zeros(stats_summaries)
 
-    with self.assertRaisesRegexp(Exception, 'Incorrect split type'):
+    with self.assertRaisesRegex(Exception, 'Incorrect split type'):
       self.evaluate(
           boosted_trees_ops.calculate_best_feature_split_v2(
               node_id_range,
diff --git a/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py b/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py
index fbac51ea1fb..c802c5284f5 100644
--- a/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py
+++ b/tensorflow/python/kernel_tests/boosted_trees/training_ops_test.py
@@ -4117,8 +4117,8 @@ class UpdateTreeEnsembleOpTest(test_util.TensorFlowTestCase):
                                      dtype=np.float32)
       split_types = np.array(
           [_INEQUALITY_DEFAULT_LEFT, _INEQUALITY_DEFAULT_LEFT])
-      with self.assertRaisesRegexp(Exception,
-                                   r'Dimension 0 in both shapes must be equal'):
+      with self.assertRaisesRegex(Exception,
+                                  r'Dimension 0 in both shapes must be equal'):
         grow_op = boosted_trees_ops.update_ensemble_v2(
             tree_ensemble_handle,
             learning_rate=1.0,
diff --git a/tensorflow/python/kernel_tests/broadcast_to_ops_test.py b/tensorflow/python/kernel_tests/broadcast_to_ops_test.py
index f478ee9f643..742545ebe91 100644
--- a/tensorflow/python/kernel_tests/broadcast_to_ops_test.py
+++ b/tensorflow/python/kernel_tests/broadcast_to_ops_test.py
@@ -131,8 +131,8 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
 
   def testBroadcastToBadOutputShape(self):
     with context.eager_mode():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Unable to broadcast tensor of shape"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Unable to broadcast tensor of shape"):
         self.evaluate(
             array_ops.broadcast_to(
                 constant_op.constant([0, 1]), constant_op.constant([2, 1])))
diff --git a/tensorflow/python/kernel_tests/bucketize_op_test.py b/tensorflow/python/kernel_tests/bucketize_op_test.py
index 128cc17db15..59c30d8f2df 100644
--- a/tensorflow/python/kernel_tests/bucketize_op_test.py
+++ b/tensorflow/python/kernel_tests/bucketize_op_test.py
@@ -67,13 +67,12 @@ class BucketizationOpTest(test.TestCase):
     op = math_ops._bucketize(
         constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
     with self.session(use_gpu=True) as sess:
-      with self.assertRaisesRegexp(
-          errors_impl.InvalidArgumentError, "Expected sorted boundaries"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Expected sorted boundaries"):
         self.evaluate(op)
 
   def testBoundariesNotList(self):
-    with self.assertRaisesRegexp(
-        TypeError, "Expected list.*"):
+    with self.assertRaisesRegex(TypeError, "Expected list.*"):
       math_ops._bucketize(constant_op.constant([-5, 0]), boundaries=0)
 
 
diff --git a/tensorflow/python/kernel_tests/check_ops_test.py b/tensorflow/python/kernel_tests/check_ops_test.py
index 9bade548849..376b0058927 100644
--- a/tensorflow/python/kernel_tests/check_ops_test.py
+++ b/tensorflow/python/kernel_tests/check_ops_test.py
@@ -99,7 +99,7 @@ class AssertV2Asserts(test.TestCase):
         def failing_fn():
           fn(*failing_args, message="fail")  # pylint: disable=cell-var-from-loop
 
-        with self.assertRaisesRegexp(error, "fail"):
+        with self.assertRaisesRegex(error, "fail"):
           failing_fn()
 
         del failing_fn
@@ -110,32 +110,32 @@ class AssertProperIterableTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_single_tensor_raises(self):
     tensor = constant_op.constant(1)
-    with self.assertRaisesRegexp(TypeError, "proper"):
+    with self.assertRaisesRegex(TypeError, "proper"):
       check_ops.assert_proper_iterable(tensor)
 
   @test_util.run_in_graph_and_eager_modes
   def test_single_sparse_tensor_raises(self):
     ten = sparse_tensor.SparseTensor(
         indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
-    with self.assertRaisesRegexp(TypeError, "proper"):
+    with self.assertRaisesRegex(TypeError, "proper"):
       check_ops.assert_proper_iterable(ten)
 
   @test_util.run_in_graph_and_eager_modes
   def test_single_ndarray_raises(self):
     array = np.array([1, 2, 3])
-    with self.assertRaisesRegexp(TypeError, "proper"):
+    with self.assertRaisesRegex(TypeError, "proper"):
       check_ops.assert_proper_iterable(array)
 
   @test_util.run_in_graph_and_eager_modes
   def test_single_string_raises(self):
     mystr = "hello"
-    with self.assertRaisesRegexp(TypeError, "proper"):
+    with self.assertRaisesRegex(TypeError, "proper"):
       check_ops.assert_proper_iterable(mystr)
 
   @test_util.run_in_graph_and_eager_modes
   def test_non_iterable_object_raises(self):
     non_iterable = 1234
-    with self.assertRaisesRegexp(TypeError, "to be iterable"):
+    with self.assertRaisesRegex(TypeError, "to be iterable"):
       check_ops.assert_proper_iterable(non_iterable)
 
   @test_util.run_in_graph_and_eager_modes
@@ -165,7 +165,7 @@ class AssertEqualTest(test.TestCase):
   def test_scalar_comparison(self):
     const_true = constant_op.constant(True, name="true")
     const_false = constant_op.constant(False, name="false")
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
       check_ops.assert_equal(const_true, const_false, message="fail")
 
   def test_returns_none_with_eager(self):
@@ -180,7 +180,7 @@ class AssertEqualTest(test.TestCase):
     # Static check
     static_small = constant_op.constant([1, 2], name="small")
     static_big = constant_op.constant([3, 4], name="big")
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
       check_ops.assert_equal(static_big, static_small, message="fail")
 
   @test_util.run_deprecated_v1
@@ -239,15 +239,15 @@ First 2 elements of y:
     with context.eager_mode():
       big = constant_op.constant([[2, 2], [3, 3], [6, 6]])
       small = constant_op.constant([[20, 2], [3, 30], [60, 6]])
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   expected_error_msg_full):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  expected_error_msg_full):
         check_ops.assert_equal(big, small, message="big does not equal small",
                                summarize=10)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   expected_error_msg_default):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  expected_error_msg_default):
         check_ops.assert_equal(big, small, message="big does not equal small")
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   expected_error_msg_short):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  expected_error_msg_short):
         check_ops.assert_equal(big, small, message="big does not equal small",
                                summarize=2)
 
@@ -257,7 +257,7 @@ First 2 elements of y:
     # Static check
     static_small = constant_op.constant([3, 1], name="small")
     static_big = constant_op.constant([4, 2], name="big")
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
       check_ops.assert_equal(static_big, static_small, message="fail")
 
   @test_util.run_deprecated_v1
@@ -285,10 +285,9 @@ First 2 elements of y:
     # The exception in eager and non-eager mode is different because
     # eager mode relies on shape check done as part of the C++ op, while
     # graph mode does shape checks when creating the `Operation` instance.
-    with self.assertRaisesRegexp(
-        (errors.InvalidArgumentError, ValueError),
-        (r"Incompatible shapes: \[3\] vs. \[2\]|"
-         r"Dimensions must be equal, but are 3 and 2")):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                (r"Incompatible shapes: \[3\] vs. \[2\]|"
+                                 r"Dimensions must be equal, but are 3 and 2")):
       with ops.control_dependencies([check_ops.assert_equal(small, small_2)]):
         out = array_ops.identity(small)
       self.evaluate(out)
@@ -296,7 +295,7 @@ First 2 elements of y:
   @test_util.run_in_graph_and_eager_modes
   def test_raises_when_not_equal_and_broadcastable_shapes(self):
     cond = constant_op.constant([True, False], name="small")
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, "fail"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, "fail"):
       check_ops.assert_equal(cond, False, message="fail")
 
   @test_util.run_in_graph_and_eager_modes
@@ -354,10 +353,9 @@ class AssertNoneEqualTest(test.TestCase):
     # The exception in eager and non-eager mode is different because
     # eager mode relies on shape check done as part of the C++ op, while
     # graph mode does shape checks when creating the `Operation` instance.
-    with self.assertRaisesRegexp(
-        (ValueError, errors.InvalidArgumentError),
-        (r"Incompatible shapes: \[3\] vs. \[2\]|"
-         r"Dimensions must be equal, but are 3 and 2")):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                (r"Incompatible shapes: \[3\] vs. \[2\]|"
+                                 r"Dimensions must be equal, but are 3 and 2")):
       with ops.control_dependencies(
           [check_ops.assert_none_equal(small, big)]):
         out = array_ops.identity(small)
@@ -381,9 +379,8 @@ class AssertNoneEqualTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          "Custom error message"):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, "Custom error message"):
         check_ops.assert_none_equal(1, 1, message="Custom error message")
 
   def test_error_message_eager(self):
@@ -394,23 +391,19 @@ class AssertNoneEqualTest(test.TestCase):
     with context.eager_mode():
       t = constant_op.constant(
           np.array(range(6)), shape=[2, 3], dtype=np.float32)
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          expected_error_msg_full):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, expected_error_msg_full):
         check_ops.assert_none_equal(
             t, t, message="This is the error message.", summarize=10)
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          expected_error_msg_full):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, expected_error_msg_full):
         check_ops.assert_none_equal(
             t, t, message="This is the error message.", summarize=-1)
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          expected_error_msg_default):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, expected_error_msg_default):
         check_ops.assert_none_equal(t, t, message="This is the error message.")
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          expected_error_msg_short):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, expected_error_msg_short):
         check_ops.assert_none_equal(
             t, t, message="This is the error message.", summarize=2)
 
@@ -588,7 +581,7 @@ class AssertLessTest(test.TestCase):
     # The exception in eager and non-eager mode is different because
     # eager mode relies on shape check done as part of the C++ op, while
     # graph mode does shape checks when creating the `Operation` instance.
-    with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
+    with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
         (ValueError, errors.InvalidArgumentError),
         (r"Incompatible shapes: \[3\] vs. \[2\]|"
          "Dimensions must be equal, but are 3 and 2")):
@@ -613,9 +606,8 @@ class AssertLessTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          "Custom error message"):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, "Custom error message"):
         check_ops.assert_less(1, 1, message="Custom error message")
 
 
@@ -665,7 +657,7 @@ class AssertLessEqualTest(test.TestCase):
     # The exception in eager and non-eager mode is different because
     # eager mode relies on shape check done as part of the C++ op, while
     # graph mode does shape checks when creating the `Operation` instance.
-    with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
+    with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
         (errors.InvalidArgumentError, ValueError),
         (r"Incompatible shapes: \[2\] vs. \[3\]|"
          r"Dimensions must be equal, but are 2 and 3")):
@@ -685,9 +677,8 @@ class AssertLessEqualTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          "Custom error message"):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, "Custom error message"):
         check_ops.assert_less_equal(1, 0, message="Custom error message")
 
 
@@ -739,7 +730,7 @@ class AssertGreaterTest(test.TestCase):
     # The exception in eager and non-eager mode is different because
     # eager mode relies on shape check done as part of the C++ op, while
     # graph mode does shape checks when creating the `Operation` instance.
-    with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
+    with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
         (errors.InvalidArgumentError, ValueError),
         (r"Incompatible shapes: \[2\] vs. \[3\]|"
          r"Dimensions must be equal, but are 2 and 3")):
@@ -757,9 +748,8 @@ class AssertGreaterTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          "Custom error message"):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, "Custom error message"):
         check_ops.assert_greater(0, 1, message="Custom error message")
 
 
@@ -811,7 +801,7 @@ class AssertGreaterEqualTest(test.TestCase):
     # The exception in eager and non-eager mode is different because
     # eager mode relies on shape check done as part of the C++ op, while
     # graph mode does shape checks when creating the `Operation` instance.
-    with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
+    with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
         (errors.InvalidArgumentError, ValueError),
         (r"Incompatible shapes: \[2\] vs. \[3\]|"
          r"Dimensions must be equal, but are 2 and 3")):
@@ -831,9 +821,8 @@ class AssertGreaterEqualTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(  # pylint:disable=g-error-prone-assert-raises
-          errors.InvalidArgumentError,
-          "Custom error message"):
+      with self.assertRaisesRegex(  # pylint:disable=g-error-prone-assert-raises
+          errors.InvalidArgumentError, "Custom error message"):
         check_ops.assert_greater_equal(0, 1, message="Custom error message")
 
 
@@ -881,8 +870,8 @@ class AssertNegativeTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Custom error message"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Custom error message"):
         check_ops.assert_negative(1, message="Custom error message")
 
 
@@ -929,8 +918,8 @@ class AssertPositiveTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Custom error message"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Custom error message"):
         check_ops.assert_positive(-1, message="Custom error message")
 
 
@@ -1106,8 +1095,7 @@ class AssertRankTest(test.TestCase):
   def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
     tensor = constant_op.constant(1, name="my_tensor")
     desired_rank = 1
-    with self.assertRaisesRegexp(ValueError,
-                                 "fail.*must have rank 1"):
+    with self.assertRaisesRegex(ValueError, "fail.*must have rank 1"):
       with ops.control_dependencies(
           [check_ops.assert_rank(
               tensor, desired_rank, message="fail")]):
@@ -1145,7 +1133,7 @@ class AssertRankTest(test.TestCase):
   def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
     tensor = constant_op.constant([1, 2], name="my_tensor")
     desired_rank = 0
-    with self.assertRaisesRegexp(ValueError, "rank"):
+    with self.assertRaisesRegex(ValueError, "rank"):
       with ops.control_dependencies(
           [check_ops.assert_rank(tensor, desired_rank)]):
         self.evaluate(array_ops.identity(tensor))
@@ -1181,7 +1169,7 @@ class AssertRankTest(test.TestCase):
   def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
     tensor = constant_op.constant([1, 2], name="my_tensor")
     desired_rank = 2
-    with self.assertRaisesRegexp(ValueError, "rank"):
+    with self.assertRaisesRegex(ValueError, "rank"):
       with ops.control_dependencies(
           [check_ops.assert_rank(tensor, desired_rank)]):
         self.evaluate(array_ops.identity(tensor))
@@ -1199,7 +1187,7 @@ class AssertRankTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_raises_if_rank_is_not_scalar_static(self):
     tensor = constant_op.constant([1, 2], name="my_tensor")
-    with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
+    with self.assertRaisesRegex(ValueError, "Rank must be a scalar"):
       check_ops.assert_rank(tensor, np.array([], dtype=np.int32))
 
   @test_util.run_deprecated_v1
@@ -1216,8 +1204,7 @@ class AssertRankTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_raises_if_rank_is_not_integer_static(self):
     tensor = constant_op.constant([1, 2], name="my_tensor")
-    with self.assertRaisesRegexp(TypeError,
-                                 "must be of type <dtype: 'int32'>"):
+    with self.assertRaisesRegex(TypeError, "must be of type <dtype: 'int32'>"):
       check_ops.assert_rank(tensor, .5)
 
   @test_util.run_deprecated_v1
@@ -1226,8 +1213,8 @@ class AssertRankTest(test.TestCase):
       tensor = constant_op.constant(
           [1, 2], dtype=dtypes.float32, name="my_tensor")
       rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
-      with self.assertRaisesRegexp(TypeError,
-                                   "must be of type <dtype: 'int32'>"):
+      with self.assertRaisesRegex(TypeError,
+                                  "must be of type <dtype: 'int32'>"):
         with ops.control_dependencies(
             [check_ops.assert_rank(tensor, rank_tensor)]):
           array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
@@ -1238,8 +1225,7 @@ class AssertRankInTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_rank_zero_tensor_raises_if_rank_mismatch_static_rank(self):
     tensor_rank0 = constant_op.constant(42, name="my_tensor")
-    with self.assertRaisesRegexp(
-        ValueError, "fail.*must have rank.*in.*1.*2"):
+    with self.assertRaisesRegex(ValueError, "fail.*must have rank.*in.*1.*2"):
       with ops.control_dependencies([
           check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
         self.evaluate(array_ops.identity(tensor_rank0))
@@ -1292,7 +1278,7 @@ class AssertRankInTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_rank_one_tensor_raises_if_rank_mismatches_static_rank(self):
     tensor_rank1 = constant_op.constant((42, 43), name="my_tensor")
-    with self.assertRaisesRegexp(ValueError, "rank"):
+    with self.assertRaisesRegex(ValueError, "rank"):
       with ops.control_dependencies([
           check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
         self.evaluate(array_ops.identity(tensor_rank1))
@@ -1314,7 +1300,7 @@ class AssertRankInTest(test.TestCase):
     desired_ranks = (
         np.array(1, dtype=np.int32),
         np.array((2, 1), dtype=np.int32))
-    with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
+    with self.assertRaisesRegex(ValueError, "Rank must be a scalar"):
       check_ops.assert_rank_in(tensor, desired_ranks)
 
   @test_util.run_deprecated_v1
@@ -1336,8 +1322,7 @@ class AssertRankInTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_raises_if_rank_is_not_integer_static(self):
     tensor = constant_op.constant((42, 43), name="my_tensor")
-    with self.assertRaisesRegexp(TypeError,
-                                 "must be of type <dtype: 'int32'>"):
+    with self.assertRaisesRegex(TypeError, "must be of type <dtype: 'int32'>"):
       check_ops.assert_rank_in(tensor, (1, .5,))
 
   @test_util.run_deprecated_v1
@@ -1346,8 +1331,8 @@ class AssertRankInTest(test.TestCase):
       tensor = constant_op.constant(
           (42, 43), dtype=dtypes.float32, name="my_tensor")
       rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
-      with self.assertRaisesRegexp(TypeError,
-                                   "must be of type <dtype: 'int32'>"):
+      with self.assertRaisesRegex(TypeError,
+                                  "must be of type <dtype: 'int32'>"):
         with ops.control_dependencies(
             [check_ops.assert_rank_in(tensor, (1, rank_tensor))]):
           array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
@@ -1359,7 +1344,7 @@ class AssertRankAtLeastTest(test.TestCase):
   def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
     tensor = constant_op.constant(1, name="my_tensor")
     desired_rank = 1
-    with self.assertRaisesRegexp(ValueError, "rank at least 1"):
+    with self.assertRaisesRegex(ValueError, "rank at least 1"):
       with ops.control_dependencies(
           [check_ops.assert_rank_at_least(tensor, desired_rank)]):
         self.evaluate(array_ops.identity(tensor))
@@ -1429,7 +1414,7 @@ class AssertRankAtLeastTest(test.TestCase):
   def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
     tensor = constant_op.constant([1, 2], name="my_tensor")
     desired_rank = 2
-    with self.assertRaisesRegexp(ValueError, "rank at least 2"):
+    with self.assertRaisesRegex(ValueError, "rank at least 2"):
       with ops.control_dependencies(
           [check_ops.assert_rank_at_least(tensor, desired_rank)]):
         self.evaluate(array_ops.identity(tensor))
@@ -1476,8 +1461,8 @@ class AssertNonNegativeTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Custom error message"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Custom error message"):
         check_ops.assert_non_negative(-1, message="Custom error message")
 
 
@@ -1512,8 +1497,8 @@ class AssertNonPositiveTest(test.TestCase):
 
   def test_static_check_in_graph_mode(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Custom error message"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Custom error message"):
         check_ops.assert_non_positive(1, message="Custom error message")
 
 
@@ -1529,7 +1514,7 @@ class AssertIntegerTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_raises_when_float(self):
     floats = constant_op.constant([1.0, 2.0], name="floats")
-    with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
+    with self.assertRaisesRegex(TypeError, "Expected.*integer"):
       check_ops.assert_integer(floats)
 
 
@@ -1546,7 +1531,7 @@ class AssertTypeTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def test_raises_when_wrong_type(self):
     floats = constant_op.constant([1.0, 2.0], dtype=dtypes.float16)
-    with self.assertRaisesRegexp(TypeError, "must be of type.*float32"):
+    with self.assertRaisesRegex(TypeError, "must be of type.*float32"):
       check_ops.assert_type(floats, dtypes.float32)
 
 
@@ -1883,12 +1868,12 @@ class AssertShapesTest(test.TestCase):
     self.evaluate(out)
 
   def raises_static_error(self, shapes, regex):
-    with self.assertRaisesRegexp(ValueError, regex):
+    with self.assertRaisesRegex(ValueError, regex):
       check_ops.assert_shapes(shapes)
 
   def raises_dynamic_error(self, shapes, regex, feed_dict):
     with self.session() as sess:
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, regex):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, regex):
         assertion = check_ops.assert_shapes(shapes)
         with ops.control_dependencies([assertion]):
           out = array_ops.identity(0)
@@ -2024,7 +2009,7 @@ class AssertScalarTest(test.TestCase):
     check_ops.assert_scalar(constant_op.constant("foo"))
     check_ops.assert_scalar(3)
     check_ops.assert_scalar("foo")
-    with self.assertRaisesRegexp(ValueError, "Expected scalar"):
+    with self.assertRaisesRegex(ValueError, "Expected scalar"):
       check_ops.assert_scalar(constant_op.constant([3, 4]))
 
 
diff --git a/tensorflow/python/kernel_tests/cholesky_op_test.py b/tensorflow/python/kernel_tests/cholesky_op_test.py
index 5dc334c897b..b748a8ec864 100644
--- a/tensorflow/python/kernel_tests/cholesky_op_test.py
+++ b/tensorflow/python/kernel_tests/cholesky_op_test.py
@@ -167,7 +167,7 @@ class CholeskyOpTest(test.TestCase):
   def testNotInvertibleCPU(self):
     # The input should be invertible.
     with self.session(use_gpu=True):
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Cholesky decomposition was not successful. The"
           " input might not be valid."):
diff --git a/tensorflow/python/kernel_tests/concat_op_test.py b/tensorflow/python/kernel_tests/concat_op_test.py
index a83bfbab1c1..ba2d1abbd10 100644
--- a/tensorflow/python/kernel_tests/concat_op_test.py
+++ b/tensorflow/python/kernel_tests/concat_op_test.py
@@ -502,7 +502,7 @@ class ConcatOpTest(test.TestCase):
   def testConcatNoScalars(self):
     scalar = constant_op.constant(7)
     dim = array_ops.placeholder(dtypes.int32)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
       array_ops.concat([scalar, scalar, scalar], dim)
 
@@ -660,8 +660,8 @@ class ConcatOffsetTest(test.TestCase):
     s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
     s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
     off = gen_array_ops.concat_offset(cdim, [s0, s1])
-    with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                 r"should be a vector"):
+    with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                r"should be a vector"):
       self.evaluate(off)
 
   @test_util.run_deprecated_v1
@@ -670,8 +670,8 @@ class ConcatOffsetTest(test.TestCase):
     s0 = constant_op.constant([2, 3, 5], dtypes.int32)
     s1 = constant_op.constant([2, 7, 5], dtypes.int32)
     off = gen_array_ops.concat_offset(cdim, [s0, s1])
-    with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                 r"Concat dim is out of range: 4 vs. 3"):
+    with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                r"Concat dim is out of range: 4 vs. 3"):
       self.evaluate(off)
 
   @test_util.run_deprecated_v1
@@ -680,8 +680,8 @@ class ConcatOffsetTest(test.TestCase):
     s0 = constant_op.constant([2, 3, 5], dtypes.int32)
     s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
     off = gen_array_ops.concat_offset(cdim, [s0, s1])
-    with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                 r"should contain 3 elem"):
+    with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                r"should contain 3 elem"):
       self.evaluate(off)
 
   @test_util.run_deprecated_v1
@@ -691,7 +691,7 @@ class ConcatOffsetTest(test.TestCase):
     s0 = constant_op.constant([2, 3, 5], dtypes.int32)
     s1 = constant_op.constant([2, 7, 10], dtypes.int32)
     off = gen_array_ops.concat_offset(cdim, [s0, s1])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors_impl.InvalidArgumentError,
         r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
         r"and doesn't match input 0 with shape \[2 3 5\]."):
diff --git a/tensorflow/python/kernel_tests/cond_v2_test.py b/tensorflow/python/kernel_tests/cond_v2_test.py
index 1682f2275c1..52bd240019b 100644
--- a/tensorflow/python/kernel_tests/cond_v2_test.py
+++ b/tensorflow/python/kernel_tests/cond_v2_test.py
@@ -239,25 +239,23 @@ class CondV2Test(test.TestCase):
     with ops.Graph().as_default():
       _, cond_op = self._createCond(None)
       self.assertEqual(cond_op.name, "cond")
-      self.assertRegexpMatches(
-          cond_op.get_attr("then_branch").name, r"cond_true_\d*")
-      self.assertRegexpMatches(
-          cond_op.get_attr("else_branch").name, r"cond_false_\d*")
+      self.assertRegex(cond_op.get_attr("then_branch").name, r"cond_true_\d*")
+      self.assertRegex(cond_op.get_attr("else_branch").name, r"cond_false_\d*")
 
     with ops.Graph().as_default():
       with ops.name_scope("foo"):
         _, cond1_op = self._createCond("")
         self.assertEqual(cond1_op.name, "foo/cond")
-        self.assertRegexpMatches(
+        self.assertRegex(
             cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
-        self.assertRegexpMatches(
+        self.assertRegex(
             cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
 
         _, cond2_op = self._createCond(None)
         self.assertEqual(cond2_op.name, "foo/cond_1")
-        self.assertRegexpMatches(
+        self.assertRegex(
             cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
-        self.assertRegexpMatches(
+        self.assertRegex(
             cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
 
   @test_util.run_v2_only
@@ -1135,7 +1133,7 @@ class CondV2Test(test.TestCase):
     def false_fn():
       return ((x,), y * 3.0)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, "true_fn and false_fn arguments to tf.cond must have the "
         "same number, type, and overall structure of return values."):
       control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@@ -1254,7 +1252,7 @@ class CondV2CollectionTest(test.TestCase):
           return math_ops.add(x_const, y_const)
 
         cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
-        self.assertEquals(cnd.eval(), 7)
+        self.assertEqual(cnd.eval(), 7)
 
   def testCollectionTensorValueAccessInCond(self):
     """Read tensors from collections inside of cond_v2 & use them."""
@@ -1271,7 +1269,7 @@ class CondV2CollectionTest(test.TestCase):
           return math_ops.add(x_read, y_read)
 
         cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
-        self.assertEquals(cnd.eval(), 7)
+        self.assertEqual(cnd.eval(), 7)
 
   def testCollectionIntValueWriteInCond(self):
     """Make sure Int writes to collections work inside of cond_v2."""
@@ -1289,10 +1287,10 @@ class CondV2CollectionTest(test.TestCase):
           return math_ops.mul(x, z)
 
         cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
-        self.assertEquals(cnd.eval(), 14)
+        self.assertEqual(cnd.eval(), 14)
 
         read_z_collection = ops.get_collection("z")
-        self.assertEquals(read_z_collection, [7])
+        self.assertEqual(read_z_collection, [7])
 
 
 class CondV2ContainerTest(test.TestCase):
@@ -1363,11 +1361,11 @@ class CondV2ContainerTest(test.TestCase):
         with ops.container("l1"):
           cnd_true = cond_v2.cond_v2(
               constant_op.constant(True), true_fn, false_fn)
-          self.assertEquals(cnd_true.eval(), 2)
+          self.assertEqual(cnd_true.eval(), 2)
 
           cnd_false = cond_v2.cond_v2(
               constant_op.constant(False), true_fn, false_fn)
-          self.assertEquals(cnd_false.eval(), 6)
+          self.assertEqual(cnd_false.eval(), 6)
 
           v4 = variables.Variable([3])
           q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
@@ -1395,7 +1393,7 @@ class CondV2ColocationGroupAndDeviceTest(test.TestCase):
           return c
 
         with ops.colocate_with(a.op):
-          self.assertEquals(
+          self.assertEqual(
               cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
 
         def fn2():
@@ -1405,7 +1403,7 @@ class CondV2ColocationGroupAndDeviceTest(test.TestCase):
 
         with ops.colocate_with(a.op):
           with ops.colocate_with(b.op):
-            self.assertEquals(
+            self.assertEqual(
                 cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
 
   def testColocateWithInAndOutOfCond(self):
@@ -1422,7 +1420,7 @@ class CondV2ColocationGroupAndDeviceTest(test.TestCase):
             return c
 
         with ops.colocate_with(a.op):
-          self.assertEquals(
+          self.assertEqual(
               cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
 
           d = constant_op.constant([2.0], name="d")
@@ -1495,7 +1493,7 @@ class CondV2ColocationGroupAndDeviceTest(test.TestCase):
             return c
 
         with ops.device("/device:CPU:0"):
-          self.assertEquals(
+          self.assertEqual(
               cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
 
           d = constant_op.constant(4.0)
diff --git a/tensorflow/python/kernel_tests/confusion_matrix_test.py b/tensorflow/python/kernel_tests/confusion_matrix_test.py
index 04de4747a69..f7963b7fd1d 100644
--- a/tensorflow/python/kernel_tests/confusion_matrix_test.py
+++ b/tensorflow/python/kernel_tests/confusion_matrix_test.py
@@ -221,9 +221,9 @@ class ConfusionMatrixTest(test.TestCase):
   def testInputDifferentSize(self):
     labels = np.asarray([1, 2])
     predictions = np.asarray([1, 2, 3])
-    self.assertRaisesRegexp(ValueError, "must be equal",
-                            confusion_matrix.confusion_matrix, predictions,
-                            labels)
+    self.assertRaisesRegex(ValueError, "must be equal",
+                           confusion_matrix.confusion_matrix, predictions,
+                           labels)
 
   def testOutputIsInt32(self):
     labels = np.arange(2)
diff --git a/tensorflow/python/kernel_tests/constant_op_eager_test.py b/tensorflow/python/kernel_tests/constant_op_eager_test.py
index cc788219ef3..81f26f2f791 100644
--- a/tensorflow/python/kernel_tests/constant_op_eager_test.py
+++ b/tensorflow/python/kernel_tests/constant_op_eager_test.py
@@ -106,7 +106,7 @@ class ConstantTest(test.TestCase):
 
     # This integer is larger than all non-infinite numbers representable
     # by a double, raises an exception.
-    with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
+    with self.assertRaisesRegex(ValueError, "out-of-range integer"):
       constant_op.constant(10**310, dtypes_lib.float64)
 
   def testInt32(self):
@@ -128,7 +128,7 @@ class ConstantTest(test.TestCase):
     self.assertAllClose(np.array(orig), tf_ans.numpy())
 
     # Out of range for an int64
-    with self.assertRaisesRegexp(ValueError, "out-of-range integer"):
+    with self.assertRaisesRegex(ValueError, "out-of-range integer"):
       constant_op.constant([2**72])
 
   def testComplex64(self):
@@ -216,7 +216,7 @@ class ConstantTest(test.TestCase):
       constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
 
   def testShapeWrong(self):
-    with self.assertRaisesRegexp(TypeError, None):
+    with self.assertRaisesRegex(TypeError, None):
       constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
 
   def testShape(self):
@@ -250,17 +250,17 @@ class ConstantTest(test.TestCase):
       def __len__(self):
         return -1
 
-    with self.assertRaisesRegexp(ValueError, "should return >= 0"):
+    with self.assertRaisesRegex(ValueError, "should return >= 0"):
       constant_op.constant([BadList()])
-    with self.assertRaisesRegexp(ValueError, "mixed types"):
+    with self.assertRaisesRegex(ValueError, "mixed types"):
       constant_op.constant([1, 2, BadList()])
-    with self.assertRaisesRegexp(ValueError, "should return >= 0"):
+    with self.assertRaisesRegex(ValueError, "should return >= 0"):
       constant_op.constant(BadList())
-    with self.assertRaisesRegexp(ValueError, "should return >= 0"):
+    with self.assertRaisesRegex(ValueError, "should return >= 0"):
       constant_op.constant([[BadList(), 2], 3])
-    with self.assertRaisesRegexp(ValueError, "should return >= 0"):
+    with self.assertRaisesRegex(ValueError, "should return >= 0"):
       constant_op.constant([BadList(), [1, 2, 3]])
-    with self.assertRaisesRegexp(ValueError, "should return >= 0"):
+    with self.assertRaisesRegex(ValueError, "should return >= 0"):
       constant_op.constant([BadList(), []])
 
     # TODO(allenl, josh11b): These cases should return exceptions rather than
@@ -268,19 +268,19 @@ class ConstantTest(test.TestCase):
     # sequence recursively). Maybe the first one is fine, but the second one
     # silently truncating is rather bad.
 
-    # with self.assertRaisesRegexp(ValueError, "should return >= 0"):
+    # with self.assertRaisesRegex(ValueError, "should return >= 0"):
     #   constant_op.constant([[3, 2, 1], BadList()])
-    # with self.assertRaisesRegexp(ValueError, "should return >= 0"):
+    # with self.assertRaisesRegex(ValueError, "should return >= 0"):
     #   constant_op.constant([[], BadList()])
 
   def testSparseValuesRaiseErrors(self):
-    with self.assertRaisesRegexp(ValueError, "non-rectangular Python sequence"):
+    with self.assertRaisesRegex(ValueError, "non-rectangular Python sequence"):
       constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
 
-    with self.assertRaisesRegexp(ValueError, None):
+    with self.assertRaisesRegex(ValueError, None):
       constant_op.constant([[1, 2], [3]])
 
-    with self.assertRaisesRegexp(ValueError, None):
+    with self.assertRaisesRegex(ValueError, None):
       constant_op.constant([[1, 2], [3], [4, 5]])
 
   # TODO(ashankar): This test fails with graph construction since
diff --git a/tensorflow/python/kernel_tests/constant_op_test.py b/tensorflow/python/kernel_tests/constant_op_test.py
index 6780011b0b8..99d5278de0f 100644
--- a/tensorflow/python/kernel_tests/constant_op_test.py
+++ b/tensorflow/python/kernel_tests/constant_op_test.py
@@ -234,8 +234,7 @@ class ConstantTest(test.TestCase):
     self.assertEqual(c.get_shape(), [10])
 
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
-          TypeError, "Expected Tensor's shape"):
+      with self.assertRaisesRegex(TypeError, "Expected Tensor's shape"):
         c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10])
 
   def testPromotionShapes(self):
@@ -249,9 +248,9 @@ class ConstantTest(test.TestCase):
   # pylint: disable=g-long-lambda
   def testShapeWrong(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError, "Too many elements provided."):
+      with self.assertRaisesRegex(ValueError, "Too many elements provided."):
         constant_op.constant_v1([1, 2, 3, 4, 5, 6, 7], shape=[5])
-      with self.assertRaisesRegexp(TypeError, "Expected Tensor's shape"):
+      with self.assertRaisesRegex(TypeError, "Expected Tensor's shape"):
         constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5])
 
   # pylint: enable=g-long-lambda
@@ -260,7 +259,7 @@ class ConstantTest(test.TestCase):
   def _testTooLargeConstant(self):
     with ops.Graph().as_default():
       large_array = np.zeros((512, 1024, 1024), dtype=np.float32)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "Cannot create a tensor proto whose content is larger than 2GB."):
         c = constant_op.constant(large_array)
@@ -272,20 +271,20 @@ class ConstantTest(test.TestCase):
       large_array = np.zeros((256, 1024, 1024), dtype=np.float32)
       c = constant_op.constant(large_array)
       d = constant_op.constant(large_array)
-      with self.assertRaisesRegexp(ValueError,
-                                   "GraphDef cannot be larger than 2GB."):
+      with self.assertRaisesRegex(ValueError,
+                                  "GraphDef cannot be larger than 2GB."):
         g.as_graph_def()
 
   @test_util.run_deprecated_v1
   def testSparseValuesRaiseErrors(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 "setting an array element with a sequence"):
+    with self.assertRaisesRegex(ValueError,
+                                "setting an array element with a sequence"):
       c = constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32)
 
-    with self.assertRaisesRegexp(ValueError, "must be a dense"):
+    with self.assertRaisesRegex(ValueError, "must be a dense"):
       c = constant_op.constant([[1, 2], [3]])
 
-    with self.assertRaisesRegexp(ValueError, "must be a dense"):
+    with self.assertRaisesRegex(ValueError, "must be a dense"):
       c = constant_op.constant([[1, 2], [3], [4, 5]])
 
 
@@ -330,8 +329,8 @@ class AsTensorTest(test.TestCase):
       self.assertEqual(dtypes_lib.int64, x.dtype)
       self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
 
-      with self.assertRaisesRegexp(
-          ValueError, "a dimension is too large .2147483648."):
+      with self.assertRaisesRegex(ValueError,
+                                  "a dimension is too large .2147483648."):
         x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
                                   dtype=dtypes_lib.int32)
 
@@ -344,10 +343,10 @@ class AsTensorTest(test.TestCase):
           array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
       self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], self.evaluate(x))
 
-    with self.assertRaisesRegexp(ValueError, "partially known"):
+    with self.assertRaisesRegex(ValueError, "partially known"):
       ops.convert_to_tensor(tensor_shape.TensorShape(None))
 
-    with self.assertRaisesRegexp(ValueError, "partially known"):
+    with self.assertRaisesRegex(ValueError, "partially known"):
       ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64]))
 
     with self.assertRaises(TypeError):
@@ -368,14 +367,14 @@ class AsTensorTest(test.TestCase):
 
     shape = tensor_shape.TensorShape(None)
     if shape._v2_behavior:
-      with self.assertRaisesRegexp(ValueError, "None values not supported"):
+      with self.assertRaisesRegex(ValueError, "None values not supported"):
         ops.convert_to_tensor(shape[1])
-      with self.assertRaisesRegexp(ValueError, "None values not supported"):
+      with self.assertRaisesRegex(ValueError, "None values not supported"):
         ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
     else:
-      with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
+      with self.assertRaisesRegex(ValueError, "unknown Dimension"):
         ops.convert_to_tensor(shape[1])
-      with self.assertRaisesRegexp(ValueError, "unknown Dimension"):
+      with self.assertRaisesRegex(ValueError, "unknown Dimension"):
         ops.convert_to_tensor(tensor_shape.TensorShape([1, None, 64])[1])
 
 
diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
index eec7165d148..b03020c3bf5 100644
--- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
+++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py
@@ -384,7 +384,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     values = constant_op.constant(10)
     fn1 = lambda: math_ops.add(values, 1)
     fn2 = lambda: math_ops.subtract(values, 1)
-    with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
+    with self.assertRaisesRegex(TypeError, "must not be a Python bool"):
       _ = control_flow_ops.cond(False, fn1, fn2)
 
   @test_util.run_deprecated_v1
@@ -416,8 +416,8 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
           if graph.is_fetchable(t.op):
             sess.run(t, feed_dict={x: 3})
           else:
-            with self.assertRaisesRegexp(ValueError,
-                                         "has been marked as not fetchable"):
+            with self.assertRaisesRegex(ValueError,
+                                        "has been marked as not fetchable"):
               sess.run(t, feed_dict={x: 3})
 
   @test_util.disable_control_flow_v2("Not relevant")
@@ -436,7 +436,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
       for op in graph.get_operations():
         for t in op.inputs:
           if t not in feedable_tensors and t.dtype is dtypes.int32:
-            with self.assertRaisesRegexp(ValueError, "may not be fed"):
+            with self.assertRaisesRegex(ValueError, "may not be fed"):
               sess.run(r, feed_dict={t: 3})
 
   @test_util.run_v1_only("b/120545219")
@@ -461,8 +461,8 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
       values = constant_op.constant([10])
       indices = constant_op.constant([0])
       x = ops.IndexedSlices(values, indices)
-      with self.assertRaisesRegexp(
-          TypeError, "Cannot reconcile tf.cond 0-th outputs"):
+      with self.assertRaisesRegex(TypeError,
+                                  "Cannot reconcile tf.cond 0-th outputs"):
         control_flow_ops.cond(
             constant_op.constant(True),
             lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices),
@@ -813,7 +813,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     # rely on variable names.
     prefix = "cond/" if context.executing_eagerly() else ""
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "Tensor %strue_branch:0 in true_fn is accessed from false_fn." %
         prefix):
@@ -844,7 +844,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     # This was needed for backwards compatibility with TF2 Estimators which
     # rely on variable names.
     prefix = "switch_case/indexed_case/" if context.executing_eagerly() else ""
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Tensor %sbr1_identity:0 in branch 1 is "
         "accessed from branch 4." % prefix):
       f()
@@ -929,7 +929,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
       v1_msg = "The two structures don't have the same nested structure"
       v2_msg = ("true_fn and false_fn arguments to tf.cond must have the same "
                 "number, type, and overall structure of return values.")
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           TypeError if control_flow_util.ENABLE_CONTROL_FLOW_V2 else ValueError,
           v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
         control_flow_ops.cond(pred, fn1, fn2)
@@ -1092,7 +1092,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
 
       self.assertAllEqual(r, 10000.)
       grad = gradients_impl.gradients(r, [x])[0]
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           r"Connecting to invalid output 1 of source node cond which has 1 "
           r"outputs. Try using "
@@ -1667,14 +1667,14 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     loop_with_maxiter = create_while_loop(maximum_iterations=2)
     xla_context.Exit()
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"Cannot create a gradient accumulator for tensor '.+' inside "
         r"XLA while_loop because maximum_iterations was not passed to "
         r"the tf.while_loop call \('.+'\)."):
       _ = gradients_impl.gradients(loop_no_maxiter, v)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
         r"while_loop. maximum_iterations tensor '.+' for while_loop context "
@@ -1705,8 +1705,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     if control_flow_util.ENABLE_CONTROL_FLOW_V2:
       xla_context = control_flow_ops.XLAControlFlowContext()
       xla_context.Enter()
-      with self.assertRaisesRegexp(ValueError,
-                                   r"must be from the same graph.*"):
+      with self.assertRaisesRegex(ValueError, r"must be from the same graph.*"):
         loop = create_while_loop()
       xla_context.Exit()
     else:
@@ -1714,7 +1713,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
       xla_context.Enter()
       loop = create_while_loop()
       xla_context.Exit()
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
           r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
@@ -1976,7 +1975,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     i = constant_op.constant(0)
     c = lambda i, _: math_ops.less(i, 10)
     b = lambda i, x: [i + 1, x + 1]
-    with self.assertRaisesRegexp(ValueError, "is not compatible with"):
+    with self.assertRaisesRegex(ValueError, "is not compatible with"):
       # Shape of x is [2], but we specify a shape of [5].
       control_flow_ops.while_loop(
           c, b, [i, x], [i.shape, tensor_shape.TensorShape([5])])
@@ -1990,9 +1989,8 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     # body accepts N values and returns N+1 values.
     b = lambda i, *x: (i, i) + x
 
-    with self.assertRaisesRegexp(
-        ValueError,
-        "The two structures don't have the same nested structure."):
+    with self.assertRaisesRegex(
+        ValueError, "The two structures don't have the same nested structure."):
       control_flow_ops.while_loop(c, b, [i, x])
 
   @test_util.run_deprecated_v1
@@ -2035,7 +2033,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
       m = array_ops.ones([2, 2])
       c = lambda i, j: math_ops.less(i, 2)
       b = lambda i, j: [i + 1, array_ops.concat([j, j], 0)]
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
           r"shape \(4, 2\) after one iteration. To allow the shape to vary "
@@ -2138,7 +2136,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
       ]
 
     # Explicit shape invariant, with a specific (incompatible) rank.
-    with self.assertRaisesRegexp(ValueError, "is not compatible with"):
+    with self.assertRaisesRegex(ValueError, "is not compatible with"):
       control_flow_ops.while_loop(
           c, b1, [i, x],
           [i.get_shape(), tensor_shape.TensorShape([5])])
@@ -2188,7 +2186,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     b = lambda i, x: [i+1, x]
 
     # Explicit shape invariant, with a specific (incompatible) rank.
-    with self.assertRaisesRegexp(ValueError, "is not compatible with"):
+    with self.assertRaisesRegex(ValueError, "is not compatible with"):
       control_flow_ops.while_loop(
           c, b, [i, x],
           [i.get_shape(), tensor_shape.TensorShape([5])])
@@ -3305,7 +3303,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
         z = v * 2
         return i + 1, gradients_impl.gradients(z, x)[0]
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "Cannot compute gradient inside while loop with respect to op 'x'. "
           "We do not support taking the gradient wrt or through the initial "
@@ -3451,7 +3449,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
       def b(lv0, lv1, _):
         return [lv0, lv1]
 
-      with self.assertRaisesRegexp(ValueError, "the same number of elements"):
+      with self.assertRaisesRegex(ValueError, "the same number of elements"):
         control_flow_ops.while_loop(c, b, loop_vars)
 
   @test_util.run_v1_only("b/120545219")
@@ -4013,7 +4011,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
     result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
     grad_theta = gradients_impl.gradients(result, theta)
     if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
-      with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
+      with self.assertRaisesRegex(TypeError, "Second-order gradient"):
         gradients_impl.gradients(grad_theta, theta)
     grad_theta_stopped = array_ops.stop_gradient(grad_theta)
     gradients_impl.gradients(grad_theta_stopped, theta)
@@ -4676,7 +4674,7 @@ class ControlFlowContextCheckTest(test.TestCase):
   def testInvalidContext(self):
     # Accessing a while loop tensor outside of control flow is illegal.
     while_tensor = self._getWhileTensor()
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
         "is in a while loop. See info log for more details."):
@@ -4686,7 +4684,7 @@ class ControlFlowContextCheckTest(test.TestCase):
   def testInvalidContextInCond(self):
     # Accessing a while loop tensor in cond is illegal.
     while_tensor = self._getWhileTensor()
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
         "'while/Const_1' is in a while loop. See info log for more details."):
       # TODO(skyewm): this passes if we return while_tensor directly instead
@@ -4699,14 +4697,14 @@ class ControlFlowContextCheckTest(test.TestCase):
   def testInvalidContextInWhile(self):
     # Accessing a while loop tensor in a different while loop is illegal.
     while_tensor = self._getWhileTensor()
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "Cannot use 'while/Const_1' as input to 'while_1/Add' because they are "
         "in different while loops. See info log for more details."):
       control_flow_ops.while_loop(lambda i: i < 10,
                                   lambda x: math_ops.add(1, while_tensor), [0])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "Cannot use 'while/Const_1' as input to 'while_2/NextIteration' "
         "because they are in different while loops. See info log for more "
@@ -4763,7 +4761,7 @@ class ControlFlowContextCheckTest(test.TestCase):
       return control_flow_ops.while_loop(lambda i: i < 3,
                                          lambda i: i + while_tensor, [0])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "Cannot use 'cond/while/Const_1' as input to 'cond/while_1/add' because"
         " they are in different while loops. See info log for more details."):
@@ -4855,7 +4853,7 @@ class TupleTest(test.TestCase):
       # Should trigger the assign.
       self.evaluate(t)
 
-      self.assertEquals(1, self.evaluate(var))
+      self.assertEqual(1, self.evaluate(var))
 
 
 class AssertTest(test.TestCase):
diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py
index e01abc8133d..73804e6731a 100644
--- a/tensorflow/python/kernel_tests/conv_ops_test.py
+++ b/tensorflow/python/kernel_tests/conv_ops_test.py
@@ -2526,16 +2526,16 @@ class Conv2DTest(test.TestCase):
   def testOpEdgeCases(self):
     with self.cached_session() as sess:
       # Illegal strides.
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "strides in the batch and depth"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "strides in the batch and depth"):
         sess.run(
             nn_ops.conv2d(
                 array_ops.placeholder(dtypes.float32),
                 array_ops.placeholder(dtypes.float32),
                 strides=[2, 1, 1, 1],
                 padding="SAME"))
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "strides in the batch and depth"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "strides in the batch and depth"):
         sess.run(
             nn_ops.conv2d(
                 array_ops.placeholder(dtypes.float32),
@@ -2544,7 +2544,7 @@ class Conv2DTest(test.TestCase):
                 padding="SAME"))
 
       # Filter larger than input.
-      with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
+      with self.assertRaisesRegex(ValueError, "Negative dimension size"):
         sess.run(
             nn_ops.conv2d(
                 array_ops.placeholder(
@@ -2553,7 +2553,7 @@ class Conv2DTest(test.TestCase):
                     dtypes.float32, shape=[20, 21, 3, 2]),
                 strides=[1, 1, 1, 1],
                 padding="VALID"))
-      with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
+      with self.assertRaisesRegex(ValueError, "Negative dimension size"):
         sess.run(
             nn_ops.conv2d(
                 array_ops.placeholder(
@@ -2564,7 +2564,7 @@ class Conv2DTest(test.TestCase):
                 padding="VALID"))
 
       # Filter larger than input + padding.
-      with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
+      with self.assertRaisesRegex(ValueError, "Negative dimension size"):
         sess.run(
             nn_ops.conv2d(
                 array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
@@ -2573,8 +2573,8 @@ class Conv2DTest(test.TestCase):
                 padding=[[0, 0], [2, 2], [2, 2], [0, 0]]))
 
       # Negative padding during backprop.
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "nonnegative"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "nonnegative"):
         sess.run(
             nn_ops.conv2d_backprop_input([32, 20, 20, 3],
                                          array_ops.placeholder(
@@ -2586,8 +2586,8 @@ class Conv2DTest(test.TestCase):
                                          strides=[1, 1, 1, 1],
                                          padding=[[0, 0], [-1, 0], [0, 0],
                                                   [0, 0]]))
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "nonnegative"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "nonnegative"):
         sess.run(
             nn_ops.conv2d_backprop_filter(
                 array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
diff --git a/tensorflow/python/kernel_tests/critical_section_test.py b/tensorflow/python/kernel_tests/critical_section_test.py
index 55c1219580a..ab1906f939d 100644
--- a/tensorflow/python/kernel_tests/critical_section_test.py
+++ b/tensorflow/python/kernel_tests/critical_section_test.py
@@ -179,7 +179,7 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
     def fn(x):
       return cs.execute(lambda: add(x))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r"Attempting to lock a CriticalSection in which we are"):
       cs.execute(lambda: fn(1.0))
 
@@ -221,10 +221,10 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
       ex_3 = cs.execute(lambda: fn_captures_dependency(1.0))
 
     # Ensure there's no actual deadlock on to_execute.
-    self.assertEquals(2.0, self.evaluate(ex_0))
-    self.assertEquals(2.0, self.evaluate(ex_1))
-    self.assertEquals(2.0, self.evaluate(ex_2))
-    self.assertEquals(2.0, self.evaluate(ex_3))
+    self.assertEqual(2.0, self.evaluate(ex_0))
+    self.assertEqual(2.0, self.evaluate(ex_1))
+    self.assertEqual(2.0, self.evaluate(ex_2))
+    self.assertEqual(2.0, self.evaluate(ex_3))
 
   def testRecursiveCriticalSectionAccessWithinLoopIsProtected(self):
     cs = critical_section_ops.CriticalSection(shared_name="cs")
@@ -251,7 +251,7 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
         "'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
         "body_implicit_capture'\n"
         "==============\n")
-    self.assertEquals((1000, 1000), self.evaluate((i_n, j_n)))
+    self.assertEqual((1000, 1000), self.evaluate((i_n, j_n)))
     logging.warn(
         "\n==============\nSuccessfully finished running "
         "'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
@@ -278,7 +278,7 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
         "'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
         "body_implicit_capture_protected'\n"
         "==============\n")
-    self.assertEquals((1000, 1000), self.evaluate((i_n, j_n)))
+    self.assertEqual((1000, 1000), self.evaluate((i_n, j_n)))
     logging.warn(
         "\n==============\nSuccessfully finished running "
         "'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
@@ -303,7 +303,7 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
         "'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
         "body_args_capture'\n"
         "==============\n")
-    self.assertEquals((1000, 1000), self.evaluate((i_n, j_n)))
+    self.assertEqual((1000, 1000), self.evaluate((i_n, j_n)))
     logging.warn(
         "\n==============\nSuccessfully finished running "
         "'testRecursiveCriticalSectionAccessWithinLoopDoesNotDeadlock "
@@ -319,7 +319,8 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
     add = lambda x: x + 1
     def fn(x):
       return cs_same.execute(lambda: add(x))
-    with self.assertRaisesRegexp(
+
+    with self.assertRaisesRegex(
         ValueError, r"Attempting to lock a CriticalSection in which we are"):
       cs.execute(lambda: fn(1.0))
 
@@ -334,12 +335,12 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
     cs0.execute(lambda: v - 1)
     # It's *not* OK for a different CriticalSection to access it by
     # default.
-    with self.assertRaisesRegexp(
-        ValueError, "requested exclusive resource access"):
+    with self.assertRaisesRegex(ValueError,
+                                "requested exclusive resource access"):
       cs1.execute(lambda: v + 1)
     # It's not even OK if the second call doesn't request exclusive access.
-    with self.assertRaisesRegexp(
-        ValueError, "requested exclusive resource access"):
+    with self.assertRaisesRegex(ValueError,
+                                "requested exclusive resource access"):
       cs1.execute(lambda: v + 1, exclusive_resource_access=False)
 
     v2 = resource_variable_ops.ResourceVariable(0.0, name="v2")
@@ -349,8 +350,8 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase):
 
     # It's not OK if the second request requires exclusive resource
     # access.
-    with self.assertRaisesRegexp(
-        ValueError, "requested exclusive resource access"):
+    with self.assertRaisesRegex(ValueError,
+                                "requested exclusive resource access"):
       cs1.execute(lambda: v2 + 1)
 
   def testControlDependencyFromOutsideWhileLoopMixedWithInsideLoop(self):
diff --git a/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py b/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
index 0d86d13c715..d31a663e1ad 100644
--- a/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
+++ b/tensorflow/python/kernel_tests/ctc_decoder_ops_test.py
@@ -236,9 +236,9 @@ class CTCGreedyDecoderTest(test.TestCase):
         top_paths=2)
 
     # Requesting more paths than the beam width allows.
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 (".*requested more paths than the beam "
-                                  "width.*")):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                (".*requested more paths than the beam "
+                                 "width.*")):
       self._testCTCDecoder(
           ctc_ops.ctc_beam_search_decoder,
           inputs,
diff --git a/tensorflow/python/kernel_tests/ctc_loss_op_test.py b/tensorflow/python/kernel_tests/ctc_loss_op_test.py
index 9b94536de0a..ca8f171e700 100644
--- a/tensorflow/python/kernel_tests/ctc_loss_op_test.py
+++ b/tensorflow/python/kernel_tests/ctc_loss_op_test.py
@@ -90,7 +90,7 @@ class CTCLossTest(test.TestCase):
                    loss_truth,
                    grad_truth,
                    expected_err_re=None):
-    self.assertEquals(len(inputs), len(grad_truth))
+    self.assertEqual(len(inputs), len(grad_truth))
 
     inputs_t = constant_op.constant(inputs)
 
@@ -288,8 +288,7 @@ class CTCLossTest(test.TestCase):
           inputs=inputs_t, labels=labels, sequence_length=seq_lens)
       # Taking ths second gradient should fail, since it is not
       # yet supported.
-      with self.assertRaisesRegexp(LookupError,
-                                   "explicitly disabled"):
+      with self.assertRaisesRegex(LookupError, "explicitly disabled"):
         _ = gradients_impl._hessian_vector_product(loss, [inputs_t], v)
 
   @test_util.run_v1_only("b/120545219")
@@ -302,8 +301,8 @@ class CTCLossTest(test.TestCase):
         dense_shape=[5, 5])
 
     with self.session(use_gpu=False) as sess:
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "batch_size must not be 0"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "batch_size must not be 0"):
         sess.run(_ctc_loss_v2(labels, inputs, sequence_lengths))
 
 
diff --git a/tensorflow/python/kernel_tests/cwise_ops_binary_test.py b/tensorflow/python/kernel_tests/cwise_ops_binary_test.py
index 4c6a41bf205..50e6c0ad91f 100644
--- a/tensorflow/python/kernel_tests/cwise_ops_binary_test.py
+++ b/tensorflow/python/kernel_tests/cwise_ops_binary_test.py
@@ -796,7 +796,7 @@ class BinaryOpTest(test.TestCase):
   def testPowNegativeExponent(self):
     for dtype in [np.int32, np.int64]:
       with test_util.force_cpu():
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             errors_impl.InvalidArgumentError,
             "Integers to negative integer powers are not allowed"):
           x = np.array([5, 2]).astype(dtype)
@@ -804,7 +804,7 @@ class BinaryOpTest(test.TestCase):
           self.evaluate(math_ops.pow(x, y))
 
       with test_util.force_cpu():
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             errors_impl.InvalidArgumentError,
             "Integers to negative integer powers are not allowed"):
           x = np.array([5, 2]).astype(dtype)
@@ -812,7 +812,7 @@ class BinaryOpTest(test.TestCase):
           self.evaluate(math_ops.pow(x, y))
 
       with test_util.force_cpu():
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             errors_impl.InvalidArgumentError,
             "Integers to negative integer powers are not allowed"):
           x = np.array([5, 2]).astype(dtype)
@@ -948,7 +948,7 @@ class ComparisonOpTest(test.TestCase):
     y = np.arange(0, 10).reshape([5, 2])
     for t in dtypes:
       for f in funcs:
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             (ValueError, errors.InvalidArgumentError),
             "Incompatible shapes|Dimensions must be equal"):
           f(x.astype(t), y.astype(t))
diff --git a/tensorflow/python/kernel_tests/cwise_ops_test.py b/tensorflow/python/kernel_tests/cwise_ops_test.py
index 8c84bde1431..78d3af17990 100644
--- a/tensorflow/python/kernel_tests/cwise_ops_test.py
+++ b/tensorflow/python/kernel_tests/cwise_ops_test.py
@@ -217,7 +217,7 @@ class ComparisonOpTest(test.TestCase):
     for t in dtypes:
       for f in funcs:
         with self.subTest(t=t, f=f):
-          with self.assertRaisesRegexp(
+          with self.assertRaisesRegex(
               (ValueError, errors.InvalidArgumentError),
               "Incompatible shapes|Dimensions must be equal"):
             f(x.astype(t), y.astype(t))
@@ -1158,8 +1158,8 @@ class ComplexMakeRealImagTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testConjString(self):
     x = array_ops.placeholder(dtypes_lib.string)
-    with self.assertRaisesRegexp(TypeError,
-                                 r"Expected numeric or variant tensor"):
+    with self.assertRaisesRegex(TypeError,
+                                r"Expected numeric or variant tensor"):
       math_ops.conj(x)
 
   def _compareGradient(self, x):
@@ -1281,7 +1281,7 @@ class PolyvalTest(test.TestCase):
   def test_coeffs_raise(self):
     x = np.random.rand(2, 2).astype(np.float32)
     coeffs = {}
-    with self.assertRaisesRegexp(ValueError, "Argument coeffs must be list"):
+    with self.assertRaisesRegex(ValueError, "Argument coeffs must be list"):
       math_ops.polyval(coeffs, x)
 
 
diff --git a/tensorflow/python/kernel_tests/depthtospace_op_test.py b/tensorflow/python/kernel_tests/depthtospace_op_test.py
index b64b8cd09f1..17fb579ca52 100644
--- a/tensorflow/python/kernel_tests/depthtospace_op_test.py
+++ b/tensorflow/python/kernel_tests/depthtospace_op_test.py
@@ -50,7 +50,7 @@ class DepthToSpaceTest(test.TestCase):
         output_nchw = array_ops.depth_to_space(
             input_nchw, block_size, data_format="NCHW")
         output_nhwc = test_util.NCHWToNHWC(output_nchw)
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             errors_impl.InvalidArgumentError,
             "No OpKernel was registered to support Op 'DepthToSpace'"):
           output_nhwc.eval()
diff --git a/tensorflow/python/kernel_tests/diag_op_test.py b/tensorflow/python/kernel_tests/diag_op_test.py
index f41c4375d07..9ee39842b2f 100644
--- a/tensorflow/python/kernel_tests/diag_op_test.py
+++ b/tensorflow/python/kernel_tests/diag_op_test.py
@@ -537,7 +537,7 @@ class MatrixDiagTest(test.TestCase):
 
   @test_util.run_deprecated_v1
   def testInvalidShape(self):
-    with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
+    with self.assertRaisesRegex(ValueError, "must be at least rank 1"):
       array_ops.matrix_diag(0)
 
   @test_util.run_deprecated_v1
@@ -695,9 +695,9 @@ class MatrixSetDiagTest(test.TestCase):
 
   @test_util.run_deprecated_v1
   def testInvalidShape(self):
-    with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
+    with self.assertRaisesRegex(ValueError, "must be at least rank 2"):
       array_ops.matrix_set_diag(0, [0])
-    with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
+    with self.assertRaisesRegex(ValueError, "must be at least rank 1"):
       array_ops.matrix_set_diag([[0]], 0)
 
   @test_util.run_deprecated_v1
@@ -887,7 +887,7 @@ class MatrixDiagPartTest(test.TestCase):
 
   @test_util.run_deprecated_v1
   def testInvalidShape(self):
-    with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
+    with self.assertRaisesRegex(ValueError, "must be at least rank 2"):
       array_ops.matrix_diag_part(0)
 
   @test_util.run_deprecated_v1
@@ -1068,7 +1068,7 @@ class DiagTest(test.TestCase):
 
   @test_util.run_deprecated_v1
   def testInvalidRank(self):
-    with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
+    with self.assertRaisesRegex(ValueError, "must be at least rank 1"):
       array_ops.diag(0.0)
 
 
diff --git a/tensorflow/python/kernel_tests/distributions/bijector_test.py b/tensorflow/python/kernel_tests/distributions/bijector_test.py
index 49f24a57420..55dc4cbbfa8 100644
--- a/tensorflow/python/kernel_tests/distributions/bijector_test.py
+++ b/tensorflow/python/kernel_tests/distributions/bijector_test.py
@@ -36,9 +36,9 @@ class BaseBijectorTest(test.TestCase):
   """Tests properties of the Bijector base-class."""
 
   def testIsAbstract(self):
-    with self.assertRaisesRegexp(TypeError,
-                                 ("Can't instantiate abstract class Bijector "
-                                  "with abstract methods __init__")):
+    with self.assertRaisesRegex(TypeError,
+                                ("Can't instantiate abstract class Bijector "
+                                 "with abstract methods __init__")):
       bijector.Bijector()  # pylint: disable=abstract-class-instantiated
 
   def testDefaults(self):
@@ -65,20 +65,18 @@ class BaseBijectorTest(test.TestCase):
       self.assertAllEqual(shape, inverse_event_shape_)
       self.assertAllEqual(shape, bij.inverse_event_shape(shape))
 
-    with self.assertRaisesRegexp(
-        NotImplementedError, "inverse not implemented"):
+    with self.assertRaisesRegex(NotImplementedError, "inverse not implemented"):
       bij.inverse(0)
 
-    with self.assertRaisesRegexp(
-        NotImplementedError, "forward not implemented"):
+    with self.assertRaisesRegex(NotImplementedError, "forward not implemented"):
       bij.forward(0)
 
-    with self.assertRaisesRegexp(
-        NotImplementedError, "inverse_log_det_jacobian not implemented"):
+    with self.assertRaisesRegex(NotImplementedError,
+                                "inverse_log_det_jacobian not implemented"):
       bij.inverse_log_det_jacobian(0, event_ndims=0)
 
-    with self.assertRaisesRegexp(
-        NotImplementedError, "forward_log_det_jacobian not implemented"):
+    with self.assertRaisesRegex(NotImplementedError,
+                                "forward_log_det_jacobian not implemented"):
       bij.forward_log_det_jacobian(0, event_ndims=0)
 
 
@@ -121,16 +119,16 @@ class BijectorTestEventNdims(test.TestCase):
 
   def testBijectorNonIntegerEventNdims(self):
     bij = BrokenBijector()
-    with self.assertRaisesRegexp(ValueError, "Expected integer"):
+    with self.assertRaisesRegex(ValueError, "Expected integer"):
       bij.forward_log_det_jacobian(1., event_ndims=1.5)
-    with self.assertRaisesRegexp(ValueError, "Expected integer"):
+    with self.assertRaisesRegex(ValueError, "Expected integer"):
       bij.inverse_log_det_jacobian(1., event_ndims=1.5)
 
   def testBijectorArrayEventNdims(self):
     bij = BrokenBijector()
-    with self.assertRaisesRegexp(ValueError, "Expected scalar"):
+    with self.assertRaisesRegex(ValueError, "Expected scalar"):
       bij.forward_log_det_jacobian(1., event_ndims=(1, 2))
-    with self.assertRaisesRegexp(ValueError, "Expected scalar"):
+    with self.assertRaisesRegex(ValueError, "Expected scalar"):
       bij.inverse_log_det_jacobian(1., event_ndims=(1, 2))
 
   @test_util.run_deprecated_v1
@@ -248,7 +246,7 @@ class BijectorReduceEventDimsTest(test.TestCase):
   def testReduceEventNdimsForwardRaiseError(self):
     x = [[[1., 2.], [3., 4.]]]
     bij = ExpOnlyJacobian(forward_min_event_ndims=1)
-    with self.assertRaisesRegexp(ValueError, "must be larger than"):
+    with self.assertRaisesRegex(ValueError, "must be larger than"):
       bij.forward_log_det_jacobian(x, event_ndims=0)
 
   def testReduceEventNdimsInverse(self):
@@ -267,7 +265,7 @@ class BijectorReduceEventDimsTest(test.TestCase):
   def testReduceEventNdimsInverseRaiseError(self):
     x = [[[1., 2.], [3., 4.]]]
     bij = ExpOnlyJacobian(forward_min_event_ndims=1)
-    with self.assertRaisesRegexp(ValueError, "must be larger than"):
+    with self.assertRaisesRegex(ValueError, "must be larger than"):
       bij.inverse_log_det_jacobian(x, event_ndims=0)
 
   def testReduceEventNdimsForwardConstJacobian(self):
diff --git a/tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py b/tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py
index 1e967de570f..282196e4402 100644
--- a/tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py
+++ b/tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py
@@ -82,14 +82,14 @@ class KLTest(test.TestCase):
     class MyDist(normal.Normal):
       pass
 
-    with self.assertRaisesRegexp(TypeError, "must be callable"):
+    with self.assertRaisesRegex(TypeError, "must be callable"):
       kullback_leibler.RegisterKL(MyDist, MyDist)("blah")
 
     # First registration is OK
     kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
 
     # Second registration fails
-    with self.assertRaisesRegexp(ValueError, "has already been registered"):
+    with self.assertRaisesRegex(ValueError, "has already been registered"):
       kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)
 
   def testExactRegistrationsAllMatch(self):
diff --git a/tensorflow/python/kernel_tests/distributions/student_t_test.py b/tensorflow/python/kernel_tests/distributions/student_t_test.py
index 20c8613e91f..9848e4fd334 100644
--- a/tensorflow/python/kernel_tests/distributions/student_t_test.py
+++ b/tensorflow/python/kernel_tests/distributions/student_t_test.py
@@ -61,10 +61,10 @@ class StudentTTest(test.TestCase):
     student = student_t.StudentT(df, loc=mu, scale=-sigma)
 
     log_pdf = student.log_prob(t)
-    self.assertEquals(log_pdf.get_shape(), (6,))
+    self.assertEqual(log_pdf.get_shape(), (6,))
     log_pdf_values = self.evaluate(log_pdf)
     pdf = student.prob(t)
-    self.assertEquals(pdf.get_shape(), (6,))
+    self.assertEqual(pdf.get_shape(), (6,))
     pdf_values = self.evaluate(pdf)
 
     if not stats:
@@ -116,10 +116,10 @@ class StudentTTest(test.TestCase):
     student = student_t.StudentT(df, loc=mu, scale=sigma)
 
     log_cdf = student.log_cdf(t)
-    self.assertEquals(log_cdf.get_shape(), (6,))
+    self.assertEqual(log_cdf.get_shape(), (6,))
     log_cdf_values = self.evaluate(log_cdf)
     cdf = student.cdf(t)
-    self.assertEquals(cdf.get_shape(), (6,))
+    self.assertEqual(cdf.get_shape(), (6,))
     cdf_values = self.evaluate(cdf)
 
     if not stats:
diff --git a/tensorflow/python/kernel_tests/distributions/util_test.py b/tensorflow/python/kernel_tests/distributions/util_test.py
index 030ad601bf4..093fdb69dc3 100644
--- a/tensorflow/python/kernel_tests/distributions/util_test.py
+++ b/tensorflow/python/kernel_tests/distributions/util_test.py
@@ -495,7 +495,7 @@ class RotateTransposeTest(test.TestCase):
       error_message = r"Attempt to convert a value \(None\)"
     else:
       error_message = "None values not supported."
-    with self.assertRaisesRegexp(ValueError, error_message):
+    with self.assertRaisesRegex(ValueError, error_message):
       du.rotate_transpose(None, 1)
     for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):
       for shift in np.arange(-5, 5):
diff --git a/tensorflow/python/kernel_tests/fifo_queue_test.py b/tensorflow/python/kernel_tests/fifo_queue_test.py
index 880e949dd70..b470115440a 100644
--- a/tensorflow/python/kernel_tests/fifo_queue_test.py
+++ b/tensorflow/python/kernel_tests/fifo_queue_test.py
@@ -167,20 +167,20 @@ class FIFOQueueTest(test.TestCase):
       gc.collect()
       # If executing eagerly, deleting the Module should clean up the queue
       # resources.
-      with self.assertRaisesRegexp(errors_impl.NotFoundError,
-                                   r"Resource .* does not exist."):
+      with self.assertRaisesRegex(errors_impl.NotFoundError,
+                                  r"Resource .* does not exist."):
         gen_resource_variable_ops.destroy_resource_op(
             q1_handle, ignore_lookup_error=False)
-      with self.assertRaisesRegexp(errors_impl.NotFoundError,
-                                   r"Resource .* does not exist."):
+      with self.assertRaisesRegex(errors_impl.NotFoundError,
+                                  r"Resource .* does not exist."):
         gen_resource_variable_ops.destroy_resource_op(
             q2_handle, ignore_lookup_error=False)
 
   def testEnqueueDictWithoutNames(self):
     q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
-    with self.assertRaisesRegexp(ValueError, "must have names"):
+    with self.assertRaisesRegex(ValueError, "must have names"):
       q.enqueue({"a": 12.0})
-    with self.assertRaisesRegexp(ValueError, "must have names"):
+    with self.assertRaisesRegex(ValueError, "must have names"):
       q.enqueue_many({"a": [12.0, 13.0]})
 
   def testDequeue(self):
@@ -473,8 +473,8 @@ class UnconvertedFIFOQueueTests(test.TestCase):
         self.assertEqual([elem], self.evaluate(dequeued_t))
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                   "is closed and has insufficient"):
+      with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                  "is closed and has insufficient"):
         self.evaluate(dequeued_t)
 
   def testDoesNotLoseValue(self):
@@ -618,30 +618,30 @@ class UnconvertedFIFOQueueTests(test.TestCase):
           10, dtypes_lib.float32, shapes=((),), names="f")
       # Verify that enqueue() checks that when using names we must enqueue a
       # dictionary.
-      with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
+      with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
         enqueue_op = q.enqueue(10.0)
-      with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
+      with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
         enqueue_op = q.enqueue((10.0,))
       # The dictionary keys must match the queue component names.
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op = q.enqueue({})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op = q.enqueue({"x": 12})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
       enqueue_op = q.enqueue({"f": 10.0})
       enqueue_op2 = q.enqueue({"f": 20.0})
       enqueue_op3 = q.enqueue({"f": 30.0})
       # Verify that enqueue_many() checks that when using names we must enqueue
       # a dictionary.
-      with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
+      with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
         enqueue_op4 = q.enqueue_many([40.0, 50.0])
       # The dictionary keys must match the queue component names.
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op4 = q.enqueue_many({})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op4 = q.enqueue_many({"x": 12})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
       enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
       dequeue = q.dequeue()
@@ -665,32 +665,32 @@ class UnconvertedFIFOQueueTests(test.TestCase):
           names=("f", "i", "s"))
       # Verify that enqueue() checks that when using names we must enqueue a
       # dictionary.
-      with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
+      with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
         enqueue_op = q.enqueue((10.0, 123, "aa"))
       # The dictionary keys must match the queue component names.
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op = q.enqueue({})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op = q.enqueue({"x": 10.0})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op = q.enqueue({"i": 12, "s": "aa"})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
       enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
       enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
       enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
       # Verify that enqueue_many() checks that when using names we must enqueue
       # a dictionary.
-      with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
+      with self.assertRaisesRegex(ValueError, "enqueue a dictionary"):
         enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
       # The dictionary keys must match the queue component names.
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op4 = q.enqueue_many({})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
-      with self.assertRaisesRegexp(ValueError, "match names of Queue"):
+      with self.assertRaisesRegex(ValueError, "match names of Queue"):
         enqueue_op4 = q.enqueue_many({
             "f": [40.0, 50.0],
             "i": [126, 127],
@@ -743,8 +743,8 @@ class UnconvertedFIFOQueueTests(test.TestCase):
       elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
       elems_bad = array_ops.placeholder(dtypes_lib.int32)
       enqueue_op = q.enqueue((elems_ok, elems_bad))
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   r"Expected \[3,3\], got \[3,4\]"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  r"Expected \[3,3\], got \[3,4\]"):
         sess.run([enqueue_op],
                  feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
 
@@ -756,9 +756,10 @@ class UnconvertedFIFOQueueTests(test.TestCase):
       elems_bad = array_ops.placeholder(dtypes_lib.int32)
       enqueue_op = q.enqueue_many((elems_ok, elems_bad))
       dequeued_t = q.dequeue_many(2)
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Shape mismatch in tuple component 1. "
-                                   r"Expected \[2,3,3\], got \[2,3,4\]"):
+      with self.assertRaisesRegex(
+          errors_impl.InvalidArgumentError,
+          "Shape mismatch in tuple component 1. "
+          r"Expected \[2,3,3\], got \[2,3,4\]"):
         sess.run([enqueue_op],
                  feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
         self.evaluate(dequeued_t)
@@ -995,8 +996,8 @@ class FIFOQueueParallelTests(test.TestCase):
         for elem in elems:
           self.assertEqual([elem], self.evaluate(dequeued_t))
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1018,8 +1019,8 @@ class FIFOQueueParallelTests(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1046,8 +1047,8 @@ class FIFOQueueParallelTests(test.TestCase):
       def dequeue():
         self.assertAllEqual(elems, self.evaluate(dequeued_t))
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1074,8 +1075,8 @@ class FIFOQueueParallelTests(test.TestCase):
       def dequeue():
         self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1189,8 +1190,8 @@ class FIFOQueueParallelTests(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1212,8 +1213,8 @@ class FIFOQueueParallelTests(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1234,7 +1235,7 @@ class FIFOQueueParallelTests(test.TestCase):
       close_op.run()
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
         enqueue_op.run()
 
   def testEnqueueManyToClosedQueue(self):
@@ -1248,7 +1249,7 @@ class FIFOQueueParallelTests(test.TestCase):
       close_op.run()
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
         enqueue_op.run()
 
   def testBlockingEnqueueToFullQueue(self):
@@ -1702,8 +1703,8 @@ class FIFOQueueWithTimeoutTest(test.TestCase):
 
       # Intentionally do not run any enqueue_ops so that dequeue will block
       # until operation_timeout_in_ms.
-      with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
-                                   "Timed out waiting for notification"):
+      with self.assertRaisesRegex(errors_impl.DeadlineExceededError,
+                                  "Timed out waiting for notification"):
         self.evaluate(dequeued_t)
 
   def testReusableAfterTimeout(self):
@@ -1712,12 +1713,12 @@ class FIFOQueueWithTimeoutTest(test.TestCase):
       dequeued_t = q.dequeue()
       enqueue_op = q.enqueue(37)
 
-      with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
-                                   "Timed out waiting for notification"):
+      with self.assertRaisesRegex(errors_impl.DeadlineExceededError,
+                                  "Timed out waiting for notification"):
         sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
 
-      with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
-                                   "Timed out waiting for notification"):
+      with self.assertRaisesRegex(errors_impl.DeadlineExceededError,
+                                  "Timed out waiting for notification"):
         sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
 
       self.evaluate(enqueue_op)
diff --git a/tensorflow/python/kernel_tests/functional_ops_test.py b/tensorflow/python/kernel_tests/functional_ops_test.py
index cc1cfd57c18..7c660d837f3 100644
--- a/tensorflow/python/kernel_tests/functional_ops_test.py
+++ b/tensorflow/python/kernel_tests/functional_ops_test.py
@@ -257,7 +257,7 @@ class FunctionalOpsTest(test.TestCase):
     elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
     initializer = np.array(1.0)
     # Multiply a * 1 each time
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "two structures don't have the same nested structure"):
       functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
 
@@ -712,12 +712,12 @@ class FunctionalOpsTest(test.TestCase):
           return n - 1, x + n, x
 
         with self.session(graph=g, use_gpu=use_gpu):
-          with self.assertRaisesRegexp(
+          with self.assertRaisesRegex(
               errors.InvalidArgumentError,
               "Expected a single scalar.*got 2 tensors."):
             functional_ops.While([5., 0.], CondReturnsTooManyArgs,
                                  Body)[0].eval()
-          with self.assertRaisesRegexp(
+          with self.assertRaisesRegex(
               errors.InvalidArgumentError,
               "While loop body returned 3 arguments. Expected: 2"):
             functional_ops.While([5., 0.], Cond,
@@ -934,13 +934,13 @@ class FunctionalOpsTest(test.TestCase):
       return v, v
 
     with self.test_session(use_gpu=True):
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "must be a scalar"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "must be a scalar"):
         functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Invalid start/limit/delta"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Invalid start/limit/delta"):
         functional_ops.For(0, 10, -1, [0.0], Foo)[0].eval()
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "For loop body returned 2 arguments. Expected: 1"):
         functional_ops.For(0, 10, 1, [0.0], ReturnsTooManyArgs)[0].eval()
@@ -1169,8 +1169,7 @@ class PartitionedCallTest(test.TestCase):
         args=[constant_op.constant([1, 2, 3], dtype=dtypes.int32)],
         f=AddFive,
         executor_type="NON_EXISTENT_EXECUTOR")
-    with self.assertRaisesRegexp(errors.NotFoundError,
-                                 "NON_EXISTENT_EXECUTOR"):
+    with self.assertRaisesRegex(errors.NotFoundError, "NON_EXISTENT_EXECUTOR"):
       self.evaluate(op)
 
 
diff --git a/tensorflow/python/kernel_tests/identity_n_op_py_test.py b/tensorflow/python/kernel_tests/identity_n_op_py_test.py
index a1110d640f0..0498c1b019c 100644
--- a/tensorflow/python/kernel_tests/identity_n_op_py_test.py
+++ b/tensorflow/python/kernel_tests/identity_n_op_py_test.py
@@ -65,9 +65,9 @@ class IdentityNOpTest(test.TestCase):
       shape = [2, 3]
       array_2x3 = [[1, 2, 3], [6, 5, 4]]
       tensor = constant_op.constant(array_2x3)
-      self.assertEquals(shape, tensor.get_shape())
-      self.assertEquals(shape, array_ops.identity_n([tensor])[0].get_shape())
-      self.assertEquals(shape, array_ops.identity_n([array_2x3])[0].get_shape())
+      self.assertEqual(shape, tensor.get_shape())
+      self.assertEqual(shape, array_ops.identity_n([tensor])[0].get_shape())
+      self.assertEqual(shape, array_ops.identity_n([array_2x3])[0].get_shape())
 
 
 if __name__ == "__main__":
diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py
index 5b147847496..dd6e8fb5e63 100644
--- a/tensorflow/python/kernel_tests/init_ops_test.py
+++ b/tensorflow/python/kernel_tests/init_ops_test.py
@@ -245,11 +245,11 @@ class ConstantInitializersTest(test.TestCase):
 
   def testInvalidValueTypeForConstantInitializerCausesTypeError(self):
     c = constant_op.constant([1.0, 2.0, 3.0])
-    with self.assertRaisesRegexp(TypeError,
-                                 r"Invalid type for initial value: .*Tensor.*"):
+    with self.assertRaisesRegex(TypeError,
+                                r"Invalid type for initial value: .*Tensor.*"):
       init_ops.constant_initializer(c, dtype=dtypes.float32)
     v = variables.Variable([3.0, 2.0, 1.0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r"Invalid type for initial value: .*Variable.*"):
       init_ops.constant_initializer(v, dtype=dtypes.float32)
 
diff --git a/tensorflow/python/kernel_tests/inplace_ops_test.py b/tensorflow/python/kernel_tests/inplace_ops_test.py
index cbb63cecde7..72d4c28a31f 100644
--- a/tensorflow/python/kernel_tests/inplace_ops_test.py
+++ b/tensorflow/python/kernel_tests/inplace_ops_test.py
@@ -160,14 +160,14 @@ class InplaceOpsTest(test_util.TensorFlowTestCase):
 
   def testError(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "must be a vector"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "must be a vector"):
         _ = inplace_ops.inplace_update([[1.]], [[0]], [[10]]).eval()
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "x and v shape doesn't match"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "x and v shape doesn't match"):
         _ = inplace_ops.inplace_update([[1.]], [0], [10]).eval()
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "i and x shape doesn't match"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "i and x shape doesn't match"):
         _ = inplace_ops.inplace_update([[1.]], [0, 1], [[10]]).eval()
 
   @test_util.run_deprecated_v1
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
index 627349c69b3..597ca3a1606 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
@@ -63,11 +63,11 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
     self.assertIs(op_sum[0], op_a)
 
   def test_at_least_one_operators_required(self):
-    with self.assertRaisesRegexp(ValueError, "must contain at least one"):
+    with self.assertRaisesRegex(ValueError, "must contain at least one"):
       add_operators([])
 
   def test_attempting_to_add_numbers_raises(self):
-    with self.assertRaisesRegexp(TypeError, "contain only LinearOperator"):
+    with self.assertRaisesRegex(TypeError, "contain only LinearOperator"):
       add_operators([1, 2])
 
   @test_util.run_deprecated_v1
@@ -157,19 +157,19 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
   def test_incompatible_domain_dimensions_raises(self):
     op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
     op2 = linalg.LinearOperatorDiag(rng.rand(2, 4))
-    with self.assertRaisesRegexp(ValueError, "must.*same domain dimension"):
+    with self.assertRaisesRegex(ValueError, "must.*same domain dimension"):
       add_operators([op1, op2])
 
   def test_incompatible_range_dimensions_raises(self):
     op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3))
     op2 = linalg.LinearOperatorDiag(rng.rand(3, 3))
-    with self.assertRaisesRegexp(ValueError, "must.*same range dimension"):
+    with self.assertRaisesRegex(ValueError, "must.*same range dimension"):
       add_operators([op1, op2])
 
   def test_non_broadcastable_batch_shape_raises(self):
     op1 = linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3))
     op2 = linalg.LinearOperatorDiag(rng.rand(4, 3, 3))
-    with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
+    with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
       add_operators([op1, op2])
 
 
@@ -258,7 +258,7 @@ class LinearOperatorOrderOfAdditionTest(test.TestCase):
     ]
     # tril cannot be added in tier 0, and the intermediate tier 1 with the
     # BadAdder will catch it and raise.
-    with self.assertRaisesRegexp(AssertionError, "BadAdder.can_add called"):
+    with self.assertRaisesRegex(AssertionError, "BadAdder.can_add called"):
       add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
 
 
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
index 5619f1cd38a..88ab7079593 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py
@@ -102,11 +102,11 @@ class LinearOperatorAdjointTest(
     matrix = [[1., 0.], [1., 1.]]
     operator = linalg.LinearOperatorFullMatrix(
         matrix, is_positive_definite=False)
-    with self.assertRaisesRegexp(ValueError, "positive-definite"):
+    with self.assertRaisesRegex(ValueError, "positive-definite"):
       LinearOperatorAdjoint(operator, is_positive_definite=True)
 
     operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False)
-    with self.assertRaisesRegexp(ValueError, "self-adjoint"):
+    with self.assertRaisesRegex(ValueError, "self-adjoint"):
       LinearOperatorAdjoint(operator, is_self_adjoint=True)
 
   def test_name(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py
index 8057d055783..bbdc4f1a5fa 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_algebra_test.py
@@ -67,14 +67,14 @@ class AdjointTest(test.TestCase):
     class CustomLinOp(linear_operator.LinearOperator):
       pass
 
-    with self.assertRaisesRegexp(TypeError, "must be callable"):
+    with self.assertRaisesRegex(TypeError, "must be callable"):
       linear_operator_algebra.RegisterAdjoint(CustomLinOp)("blah")
 
     # First registration is OK
     linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None)
 
     # Second registration fails
-    with self.assertRaisesRegexp(ValueError, "has already been registered"):
+    with self.assertRaisesRegex(ValueError, "has already been registered"):
       linear_operator_algebra.RegisterAdjoint(CustomLinOp)(lambda a: None)
 
   def testExactAdjointRegistrationsAllMatch(self):
@@ -102,10 +102,10 @@ class CholeskyTest(test.TestCase):
     def _cholesky(a):  # pylint: disable=unused-argument,unused-variable
       return "OK"
 
-    with self.assertRaisesRegexp(ValueError, "positive definite"):
+    with self.assertRaisesRegex(ValueError, "positive definite"):
       CustomLinOp(dtype=None, is_self_adjoint=True).cholesky()
 
-    with self.assertRaisesRegexp(ValueError, "self adjoint"):
+    with self.assertRaisesRegex(ValueError, "self adjoint"):
       CustomLinOp(dtype=None, is_positive_definite=True).cholesky()
 
     custom_linop = CustomLinOp(
@@ -117,14 +117,14 @@ class CholeskyTest(test.TestCase):
     class CustomLinOp(linear_operator.LinearOperator):
       pass
 
-    with self.assertRaisesRegexp(TypeError, "must be callable"):
+    with self.assertRaisesRegex(TypeError, "must be callable"):
       linear_operator_algebra.RegisterCholesky(CustomLinOp)("blah")
 
     # First registration is OK
     linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None)
 
     # Second registration fails
-    with self.assertRaisesRegexp(ValueError, "has already been registered"):
+    with self.assertRaisesRegex(ValueError, "has already been registered"):
       linear_operator_algebra.RegisterCholesky(CustomLinOp)(lambda a: None)
 
   def testExactCholeskyRegistrationsAllMatch(self):
@@ -161,7 +161,7 @@ class MatmulTest(test.TestCase):
     class CustomLinOp(linear_operator.LinearOperator):
       pass
 
-    with self.assertRaisesRegexp(TypeError, "must be callable"):
+    with self.assertRaisesRegex(TypeError, "must be callable"):
       linear_operator_algebra.RegisterMatmul(CustomLinOp, CustomLinOp)("blah")
 
     # First registration is OK
@@ -169,7 +169,7 @@ class MatmulTest(test.TestCase):
         CustomLinOp, CustomLinOp)(lambda a: None)
 
     # Second registration fails
-    with self.assertRaisesRegexp(ValueError, "has already been registered"):
+    with self.assertRaisesRegex(ValueError, "has already been registered"):
       linear_operator_algebra.RegisterMatmul(
           CustomLinOp, CustomLinOp)(lambda a: None)
 
@@ -210,7 +210,7 @@ class SolveTest(test.TestCase):
     class CustomLinOp(linear_operator.LinearOperator):
       pass
 
-    with self.assertRaisesRegexp(TypeError, "must be callable"):
+    with self.assertRaisesRegex(TypeError, "must be callable"):
       linear_operator_algebra.RegisterSolve(CustomLinOp, CustomLinOp)("blah")
 
     # First registration is OK
@@ -218,7 +218,7 @@ class SolveTest(test.TestCase):
         CustomLinOp, CustomLinOp)(lambda a: None)
 
     # Second registration fails
-    with self.assertRaisesRegexp(ValueError, "has already been registered"):
+    with self.assertRaisesRegex(ValueError, "has already been registered"):
       linear_operator_algebra.RegisterSolve(
           CustomLinOp, CustomLinOp)(lambda a: None)
 
@@ -247,7 +247,7 @@ class InverseTest(test.TestCase):
     def _inverse(a):  # pylint: disable=unused-argument,unused-variable
       return "OK"
 
-    with self.assertRaisesRegexp(ValueError, "singular"):
+    with self.assertRaisesRegex(ValueError, "singular"):
       CustomLinOp(dtype=None, is_non_singular=False).inverse()
 
     self.assertEqual("OK", CustomLinOp(
@@ -258,14 +258,14 @@ class InverseTest(test.TestCase):
     class CustomLinOp(linear_operator.LinearOperator):
       pass
 
-    with self.assertRaisesRegexp(TypeError, "must be callable"):
+    with self.assertRaisesRegex(TypeError, "must be callable"):
       linear_operator_algebra.RegisterInverse(CustomLinOp)("blah")
 
     # First registration is OK
     linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None)
 
     # Second registration fails
-    with self.assertRaisesRegexp(ValueError, "has already been registered"):
+    with self.assertRaisesRegex(ValueError, "has already been registered"):
       linear_operator_algebra.RegisterInverse(CustomLinOp)(lambda a: None)
 
   def testExactRegistrationsAllMatch(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py
index 552825fb47c..e0e6fedd34e 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_block_diag_test.py
@@ -247,7 +247,7 @@ class SquareLinearOperatorBlockDiagTest(
     self.assertFalse(operator.is_positive_definite)
     self.assertTrue(operator.is_non_singular)
 
-    with self.assertRaisesRegexp(ValueError, "always non-singular"):
+    with self.assertRaisesRegex(ValueError, "always non-singular"):
       block_diag.LinearOperatorBlockDiag(
           [operator_1, operator_2], is_non_singular=False)
 
@@ -265,7 +265,7 @@ class SquareLinearOperatorBlockDiagTest(
         linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
         linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
     ]
-    with self.assertRaisesRegexp(TypeError, "same dtype"):
+    with self.assertRaisesRegex(TypeError, "same dtype"):
       block_diag.LinearOperatorBlockDiag(operators)
 
   def test_non_square_operator_raises(self):
@@ -273,11 +273,11 @@ class SquareLinearOperatorBlockDiagTest(
         linalg.LinearOperatorFullMatrix(rng.rand(3, 4), is_square=False),
         linalg.LinearOperatorFullMatrix(rng.rand(3, 3))
     ]
-    with self.assertRaisesRegexp(ValueError, "square matrices"):
+    with self.assertRaisesRegex(ValueError, "square matrices"):
       block_diag.LinearOperatorBlockDiag(operators)
 
   def test_empty_operators_raises(self):
-    with self.assertRaisesRegexp(ValueError, "non-empty"):
+    with self.assertRaisesRegex(ValueError, "non-empty"):
       block_diag.LinearOperatorBlockDiag([])
 
   def test_incompatible_input_blocks_raises(self):
@@ -291,7 +291,7 @@ class SquareLinearOperatorBlockDiagTest(
     x = np.random.rand(2, 4, 5).tolist()
     msg = ("dimension does not match" if context.executing_eagerly()
            else "input structure is ambiguous")
-    with self.assertRaisesRegexp(ValueError, msg):
+    with self.assertRaisesRegex(ValueError, msg):
       operator.matmul(x)
 
 
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py
index dfa5c900ecd..a254427fa46 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_block_lower_triangular_test.py
@@ -219,7 +219,7 @@ class SquareLinearOperatorBlockLowerTriangularTest(
     self.assertFalse(operator.is_positive_definite)
     self.assertTrue(operator.is_non_singular)
 
-    with self.assertRaisesRegexp(ValueError, "always non-singular"):
+    with self.assertRaisesRegex(ValueError, "always non-singular"):
       block_lower_triangular.LinearOperatorBlockLowerTriangular(
           [[operator_1], [operator_2, operator_3]], is_non_singular=False)
 
@@ -230,7 +230,7 @@ class SquareLinearOperatorBlockLowerTriangularTest(
     block_lower_triangular.LinearOperatorBlockLowerTriangular(
         [[operator_1], [operator_4, operator_2]], is_non_singular=True)
 
-    with self.assertRaisesRegexp(ValueError, "always singular"):
+    with self.assertRaisesRegex(ValueError, "always singular"):
       block_lower_triangular.LinearOperatorBlockLowerTriangular(
           [[operator_1], [operator_2, operator_4]], is_non_singular=True)
 
@@ -240,7 +240,7 @@ class SquareLinearOperatorBlockLowerTriangularTest(
         [linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
          linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))]
     ]
-    with self.assertRaisesRegexp(TypeError, "same dtype"):
+    with self.assertRaisesRegex(TypeError, "same dtype"):
       block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
 
   def test_non_square_operator_raises(self):
@@ -249,15 +249,15 @@ class SquareLinearOperatorBlockLowerTriangularTest(
         [linalg.LinearOperatorFullMatrix(rng.rand(4, 4)),
          linalg.LinearOperatorFullMatrix(rng.rand(4, 4))]
     ]
-    with self.assertRaisesRegexp(ValueError, "must be square"):
+    with self.assertRaisesRegex(ValueError, "must be square"):
       block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
 
   def test_empty_operators_raises(self):
-    with self.assertRaisesRegexp(ValueError, "non-empty"):
+    with self.assertRaisesRegex(ValueError, "non-empty"):
       block_lower_triangular.LinearOperatorBlockLowerTriangular([])
 
   def test_operators_wrong_length_raises(self):
-    with self.assertRaisesRegexp(ValueError, "must contain `i` blocks"):
+    with self.assertRaisesRegex(ValueError, "must contain `i` blocks"):
       block_lower_triangular.LinearOperatorBlockLowerTriangular([
           [linalg.LinearOperatorFullMatrix(rng.rand(2, 2))],
           [linalg.LinearOperatorFullMatrix(rng.rand(2, 2))
@@ -269,7 +269,7 @@ class SquareLinearOperatorBlockLowerTriangularTest(
         [linalg.LinearOperatorFullMatrix(rng.rand(3, 4)),
          linalg.LinearOperatorFullMatrix(rng.rand(3, 3))]
     ]
-    with self.assertRaisesRegexp(ValueError, "must be equal"):
+    with self.assertRaisesRegex(ValueError, "must be equal"):
       block_lower_triangular.LinearOperatorBlockLowerTriangular(operators)
 
   def test_incompatible_input_blocks_raises(self):
@@ -286,7 +286,7 @@ class SquareLinearOperatorBlockLowerTriangularTest(
     x = np.random.rand(2, 4, 5).tolist()
     msg = ("dimension does not match" if context.executing_eagerly()
            else "input structure is ambiguous")
-    with self.assertRaisesRegexp(ValueError, msg):
+    with self.assertRaisesRegex(ValueError, msg):
       operator.matmul(x)
 
 
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
index eb506467e29..c3a3ae9fe8a 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
@@ -381,7 +381,7 @@ class LinearOperatorCirculantTestNonHermitianSpectrum(
 
   def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
     spectrum = [1., 2.]
-    with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
+    with self.assertRaisesRegex(ValueError, "real.*always.*self-adjoint"):
       linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=False)
 
   def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
@@ -635,7 +635,7 @@ class LinearOperatorCirculant2DTestNonHermitianSpectrum(
 
   def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
     spectrum = [[1., 2.], [3., 4]]
-    with self.assertRaisesRegexp(ValueError, "real.*always.*self-adjoint"):
+    with self.assertRaisesRegex(ValueError, "real.*always.*self-adjoint"):
       linalg.LinearOperatorCirculant2D(spectrum, is_self_adjoint=False)
 
   def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
@@ -645,7 +645,7 @@ class LinearOperatorCirculant2DTestNonHermitianSpectrum(
 
   def test_invalid_rank_raises(self):
     spectrum = array_ops.constant(np.float32(rng.rand(2)))
-    with self.assertRaisesRegexp(ValueError, "must have at least 2 dimensions"):
+    with self.assertRaisesRegex(ValueError, "must have at least 2 dimensions"):
       linalg.LinearOperatorCirculant2D(spectrum)
 
   def test_tape_safe(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py
index edae1efb845..9bca236bbc3 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_composition_test.py
@@ -113,7 +113,7 @@ class SquareLinearOperatorCompositionTest(
     self.assertFalse(operator.is_positive_definite)
     self.assertTrue(operator.is_non_singular)
 
-    with self.assertRaisesRegexp(ValueError, "always non-singular"):
+    with self.assertRaisesRegex(ValueError, "always non-singular"):
       linalg.LinearOperatorComposition(
           [operator_1, operator_2], is_non_singular=False)
 
@@ -131,11 +131,11 @@ class SquareLinearOperatorCompositionTest(
         linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
         linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
     ]
-    with self.assertRaisesRegexp(TypeError, "same dtype"):
+    with self.assertRaisesRegex(TypeError, "same dtype"):
       linalg.LinearOperatorComposition(operators)
 
   def test_empty_operators_raises(self):
-    with self.assertRaisesRegexp(ValueError, "non-empty"):
+    with self.assertRaisesRegex(ValueError, "non-empty"):
       linalg.LinearOperatorComposition([])
 
 
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py
index 59f5fa20024..d22659c306a 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py
@@ -126,7 +126,7 @@ class LinearOperatorDiagTest(
       self.evaluate(operator.assert_self_adjoint())
 
   def test_scalar_diag_raises(self):
-    with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
+    with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
       linalg.LinearOperatorDiag(1.)
 
   def test_broadcast_matmul_and_solve(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
index 7e01626e1db..1c5f7cfbf31 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py
@@ -249,7 +249,7 @@ class NonSquareLinearOperatorFullMatrixTest(
     self.assertFalse(operator.is_square)
 
   def test_matrix_must_have_at_least_two_dims_or_raises(self):
-    with self.assertRaisesRegexp(ValueError, "at least 2 dimensions"):
+    with self.assertRaisesRegex(ValueError, "at least 2 dimensions"):
       linalg.LinearOperatorFullMatrix([1.])
 
   def test_tape_safe(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py
index 4179d450ad1..5462a7c9071 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_householder_test.py
@@ -76,7 +76,7 @@ class LinearOperatorHouseholderTest(
     return operator, matrix
 
   def test_scalar_reflection_axis_raises(self):
-    with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
+    with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
       householder.LinearOperatorHouseholder(1.)
 
   def test_householder_adjoint_type(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py
index a7fd4d1fc34..ab8306910fa 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py
@@ -89,27 +89,27 @@ class LinearOperatorIdentityTest(
       self.assertAllClose(x, self.evaluate(y))
 
   def test_non_scalar_num_rows_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
+    with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
       linalg_lib.LinearOperatorIdentity(num_rows=[2])
 
   def test_non_integer_num_rows_raises_static(self):
-    with self.assertRaisesRegexp(TypeError, "must be integer"):
+    with self.assertRaisesRegex(TypeError, "must be integer"):
       linalg_lib.LinearOperatorIdentity(num_rows=2.)
 
   def test_negative_num_rows_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+    with self.assertRaisesRegex(ValueError, "must be non-negative"):
       linalg_lib.LinearOperatorIdentity(num_rows=-2)
 
   def test_non_1d_batch_shape_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be a 1-D"):
+    with self.assertRaisesRegex(ValueError, "must be a 1-D"):
       linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=2)
 
   def test_non_integer_batch_shape_raises_static(self):
-    with self.assertRaisesRegexp(TypeError, "must be integer"):
+    with self.assertRaisesRegex(TypeError, "must be integer"):
       linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[2.])
 
   def test_negative_batch_shape_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+    with self.assertRaisesRegex(ValueError, "must be non-negative"):
       linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[-2])
 
   def test_non_scalar_num_rows_raises_dynamic(self):
@@ -148,7 +148,7 @@ class LinearOperatorIdentityTest(
   def test_wrong_matrix_dimensions_raises_static(self):
     operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
     x = rng.randn(3, 3).astype(np.float32)
-    with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
+    with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"):
       operator.matmul(x)
 
   def test_wrong_matrix_dimensions_raises_dynamic(self):
@@ -241,7 +241,7 @@ class LinearOperatorIdentityTest(
     self.assertTrue(operator.is_self_adjoint)
 
     # Any of them False raises because the identity is always self-adjoint etc..
-    with self.assertRaisesRegexp(ValueError, "is always non-singular"):
+    with self.assertRaisesRegex(ValueError, "is always non-singular"):
       operator = linalg_lib.LinearOperatorIdentity(
           num_rows=2,
           is_non_singular=None,
@@ -269,10 +269,10 @@ class LinearOperatorIdentityTest(
         operator.inverse(), linalg_lib.LinearOperatorIdentity)
 
   def test_ref_type_shape_args_raises(self):
-    with self.assertRaisesRegexp(TypeError, "num_rows.*reference"):
+    with self.assertRaisesRegex(TypeError, "num_rows.*reference"):
       linalg_lib.LinearOperatorIdentity(num_rows=variables_module.Variable(2))
 
-    with self.assertRaisesRegexp(TypeError, "batch_shape.*reference"):
+    with self.assertRaisesRegex(TypeError, "batch_shape.*reference"):
       linalg_lib.LinearOperatorIdentity(
           num_rows=2, batch_shape=variables_module.Variable([3]))
 
@@ -380,7 +380,7 @@ class LinearOperatorScaledIdentityTest(
 
   def test_non_scalar_num_rows_raises_static(self):
     # Many "test_...num_rows" tests are performed in LinearOperatorIdentity.
-    with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
+    with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
       linalg_lib.LinearOperatorScaledIdentity(
           num_rows=[2], multiplier=123.)
 
@@ -388,7 +388,7 @@ class LinearOperatorScaledIdentityTest(
     operator = linalg_lib.LinearOperatorScaledIdentity(
         num_rows=2, multiplier=2.2)
     x = rng.randn(3, 3).astype(np.float32)
-    with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
+    with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"):
       operator.matmul(x)
 
   def test_wrong_matrix_dimensions_raises_dynamic(self):
@@ -540,7 +540,7 @@ class LinearOperatorScaledIdentityTest(
         linalg_lib.LinearOperatorScaledIdentity)
 
   def test_ref_type_shape_args_raises(self):
-    with self.assertRaisesRegexp(TypeError, "num_rows.*reference"):
+    with self.assertRaisesRegex(TypeError, "num_rows.*reference"):
       linalg_lib.LinearOperatorScaledIdentity(
           num_rows=variables_module.Variable(2), multiplier=1.23)
 
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_inversion_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_inversion_test.py
index 4b2ce3d9da7..618556dc5a5 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_inversion_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_inversion_test.py
@@ -100,11 +100,11 @@ class LinearOperatorInversionTest(
     matrix = [[1., 0.], [1., 1.]]
     operator = linalg.LinearOperatorFullMatrix(
         matrix, is_positive_definite=False)
-    with self.assertRaisesRegexp(ValueError, "positive-definite"):
+    with self.assertRaisesRegex(ValueError, "positive-definite"):
       LinearOperatorInversion(operator, is_positive_definite=True)
 
     operator = linalg.LinearOperatorFullMatrix(matrix, is_self_adjoint=False)
-    with self.assertRaisesRegexp(ValueError, "self-adjoint"):
+    with self.assertRaisesRegex(ValueError, "self-adjoint"):
       LinearOperatorInversion(operator, is_self_adjoint=True)
 
   def test_singular_raises(self):
@@ -112,11 +112,11 @@ class LinearOperatorInversionTest(
     matrix = [[1., 1.], [1., 1.]]
 
     operator = linalg.LinearOperatorFullMatrix(matrix, is_non_singular=False)
-    with self.assertRaisesRegexp(ValueError, "is_non_singular"):
+    with self.assertRaisesRegex(ValueError, "is_non_singular"):
       LinearOperatorInversion(operator)
 
     operator = linalg.LinearOperatorFullMatrix(matrix)
-    with self.assertRaisesRegexp(ValueError, "is_non_singular"):
+    with self.assertRaisesRegex(ValueError, "is_non_singular"):
       LinearOperatorInversion(operator, is_non_singular=False)
 
   def test_name(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
index 04d8ab2938a..1d002a171f6 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
@@ -166,7 +166,7 @@ class SquareLinearOperatorKroneckerTest(
     self.assertFalse(operator.is_positive_definite)
     self.assertTrue(operator.is_non_singular)
 
-    with self.assertRaisesRegexp(ValueError, "always non-singular"):
+    with self.assertRaisesRegex(ValueError, "always non-singular"):
       kronecker.LinearOperatorKronecker(
           [operator_1, operator_2], is_non_singular=False)
 
@@ -184,11 +184,11 @@ class SquareLinearOperatorKroneckerTest(
         linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3)),
         linalg.LinearOperatorFullMatrix(rng.rand(2, 3, 3).astype(np.float32))
     ]
-    with self.assertRaisesRegexp(TypeError, "same dtype"):
+    with self.assertRaisesRegex(TypeError, "same dtype"):
       kronecker.LinearOperatorKronecker(operators)
 
   def test_empty_or_one_operators_raises(self):
-    with self.assertRaisesRegexp(ValueError, ">=1 operators"):
+    with self.assertRaisesRegex(ValueError, ">=1 operators"):
       kronecker.LinearOperatorKronecker([])
 
   def test_kronecker_adjoint_type(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
index d27ab7d6ba5..2c14d4021db 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
@@ -310,34 +310,34 @@ class LinearOperatorLowRankUpdateBroadcastsShape(test.TestCase):
     base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
     u = rng.rand(5, 3, 2)
     v = rng.rand(4, 3, 2)
-    with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
+    with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
       linalg.LinearOperatorLowRankUpdate(base_operator, u=u, v=v)
 
   def test_u_and_base_operator_incompatible_batch_shape_raises(self):
     base_operator = linalg.LinearOperatorIdentity(
         num_rows=3, batch_shape=[4], dtype=np.float64)
     u = rng.rand(5, 3, 2)
-    with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
+    with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
       linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
 
   def test_u_and_base_operator_incompatible_domain_dimension(self):
     base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
     u = rng.rand(5, 4, 2)
-    with self.assertRaisesRegexp(ValueError, "not compatible"):
+    with self.assertRaisesRegex(ValueError, "not compatible"):
       linalg.LinearOperatorLowRankUpdate(base_operator, u=u)
 
   def test_u_and_diag_incompatible_low_rank_raises(self):
     base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
     u = rng.rand(5, 3, 2)
     diag = rng.rand(5, 4)  # Last dimension should be 2
-    with self.assertRaisesRegexp(ValueError, "not compatible"):
+    with self.assertRaisesRegex(ValueError, "not compatible"):
       linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
 
   def test_diag_incompatible_batch_shape_raises(self):
     base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64)
     u = rng.rand(5, 3, 2)
     diag = rng.rand(4, 2)  # First dimension should be 5
-    with self.assertRaisesRegexp(ValueError, "Incompatible shapes"):
+    with self.assertRaisesRegex(ValueError, "Incompatible shapes"):
       linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag)
 
 
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py
index 22a7aa798b5..a54d1944f54 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py
@@ -86,7 +86,7 @@ class LinearOperatorLowerTriangularTest(
     self.assertFalse(operator.is_self_adjoint)
 
   def test_tril_must_have_at_least_two_dims_or_raises(self):
-    with self.assertRaisesRegexp(ValueError, "at least 2 dimensions"):
+    with self.assertRaisesRegex(ValueError, "at least 2 dimensions"):
       linalg.LinearOperatorLowerTriangular([1.])
 
   def test_triangular_diag_matmul(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_permutation_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_permutation_test.py
index 864d5f2ee52..78d477be685 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_permutation_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_permutation_test.py
@@ -75,14 +75,14 @@ class LinearOperatorPermutationTest(
 
   def test_permutation_raises(self):
     perm = constant_op.constant(0, dtype=dtypes.int32)
-    with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
+    with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
       permutation.LinearOperatorPermutation(perm)
     perm = [0., 1., 2.]
-    with self.assertRaisesRegexp(TypeError, "must be integer dtype"):
+    with self.assertRaisesRegex(TypeError, "must be integer dtype"):
       permutation.LinearOperatorPermutation(perm)
     perm = [-1, 2, 3]
-    with self.assertRaisesRegexp(
-        ValueError, "must be a vector of unique integers"):
+    with self.assertRaisesRegex(ValueError,
+                                "must be a vector of unique integers"):
       permutation.LinearOperatorPermutation(perm)
 
   def test_to_dense_4x4(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_test.py
index 9280abc5f5e..475cac212ce 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_test.py
@@ -173,29 +173,29 @@ class LinearOperatorTest(test.TestCase):
     self.assertFalse(operator.is_square)
 
   def test_is_square_set_incorrectly_to_false_raises(self):
-    with self.assertRaisesRegexp(ValueError, "but.*was square"):
+    with self.assertRaisesRegex(ValueError, "but.*was square"):
       _ = LinearOperatorShape(shape=(2, 4, 4), is_square=False).is_square
 
   def test_is_square_set_inconsistent_with_other_hints_raises(self):
-    with self.assertRaisesRegexp(ValueError, "is always square"):
+    with self.assertRaisesRegex(ValueError, "is always square"):
       matrix = array_ops.placeholder_with_default(input=(), shape=None)
       LinearOperatorMatmulSolve(matrix, is_non_singular=True, is_square=False)
 
-    with self.assertRaisesRegexp(ValueError, "is always square"):
+    with self.assertRaisesRegex(ValueError, "is always square"):
       matrix = array_ops.placeholder_with_default(input=(), shape=None)
       LinearOperatorMatmulSolve(
           matrix, is_positive_definite=True, is_square=False)
 
   def test_non_square_operators_raise_on_determinant_and_solve(self):
     operator = LinearOperatorShape((2, 3))
-    with self.assertRaisesRegexp(NotImplementedError, "not be square"):
+    with self.assertRaisesRegex(NotImplementedError, "not be square"):
       operator.determinant()
-    with self.assertRaisesRegexp(NotImplementedError, "not be square"):
+    with self.assertRaisesRegex(NotImplementedError, "not be square"):
       operator.log_abs_determinant()
-    with self.assertRaisesRegexp(NotImplementedError, "not be square"):
+    with self.assertRaisesRegex(NotImplementedError, "not be square"):
       operator.solve(rng.rand(2, 2))
 
-    with self.assertRaisesRegexp(ValueError, "is always square"):
+    with self.assertRaisesRegex(ValueError, "is always square"):
       matrix = array_ops.placeholder_with_default(input=(), shape=None)
       LinearOperatorMatmulSolve(
           matrix, is_positive_definite=True, is_square=False)
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_toeplitz_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_toeplitz_test.py
index 918c238d352..b2ce96ebcc7 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_toeplitz_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_toeplitz_test.py
@@ -130,13 +130,13 @@ class LinearOperatorToeplitzTest(
     return operator, matrix
 
   def test_scalar_row_col_raises(self):
-    with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
+    with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
       linear_operator_toeplitz.LinearOperatorToeplitz(1., 1.)
 
-    with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
+    with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
       linear_operator_toeplitz.LinearOperatorToeplitz([1.], 1.)
 
-    with self.assertRaisesRegexp(ValueError, "must have at least 1 dimension"):
+    with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
       linear_operator_toeplitz.LinearOperatorToeplitz(1., [1.])
 
   def test_tape_safe(self):
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py
index d82de56c80c..486cbc43d0b 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py
@@ -182,10 +182,10 @@ class BroadcastMatrixBatchDimsTest(test.TestCase):
     x = rng.rand(3)
     y = rng.rand(1, 1)
 
-    with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
+    with self.assertRaisesRegex(ValueError, "at least two dimensions"):
       linear_operator_util.broadcast_matrix_batch_dims([x, y])
 
-    with self.assertRaisesRegexp(ValueError, "at least two dimensions"):
+    with self.assertRaisesRegex(ValueError, "at least two dimensions"):
       linear_operator_util.broadcast_matrix_batch_dims([y, x])
 
 
@@ -337,7 +337,7 @@ class UseOperatorOrProvidedHintUnlessContradictingTest(test.TestCase,
   )
   def test_raises_if_contradicting(self, operator_hint_value,
                                    provided_hint_value):
-    with self.assertRaisesRegexp(ValueError, "my error message"):
+    with self.assertRaisesRegex(ValueError, "my error message"):
       linear_operator_util.use_operator_or_provided_hint_unless_contradicting(
           operator=DummyOperatorWithHint(my_hint=operator_hint_value),
           hint_attr_name="my_hint",
@@ -413,7 +413,7 @@ class BlockwiseTest(test.TestCase, parameterized.TestCase):
 
     # Since the leftmost dimension of `x` is equal to the number of blocks, and
     # the operators have unknown dimension, the input is ambiguous.
-    with self.assertRaisesRegexp(ValueError, "structure is ambiguous"):
+    with self.assertRaisesRegex(ValueError, "structure is ambiguous"):
       linear_operator_util.arg_is_blockwise(op_dimensions, x, -2)
 
   def test_mismatched_input_raises(self):
@@ -425,7 +425,7 @@ class BlockwiseTest(test.TestCase, parameterized.TestCase):
     # two-element list; if interpreted blockwise, its corresponding dimensions
     # sum to 12 (=6*2). If not interpreted blockwise, its corresponding
     # dimension is 6. This is a mismatch.
-    with self.assertRaisesRegexp(ValueError, "dimension does not match"):
+    with self.assertRaisesRegex(ValueError, "dimension does not match"):
       linear_operator_util.arg_is_blockwise(op_dimensions, x, -1)
 
 if __name__ == "__main__":
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py
index fa5c8e2cfc4..8ca4e0f796f 100644
--- a/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py
@@ -89,33 +89,33 @@ class LinearOperatorZerosTest(
       self.evaluate(operator.assert_self_adjoint())  # Should not fail
 
   def test_non_scalar_num_rows_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
+    with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
       linalg_lib.LinearOperatorZeros(num_rows=[2])
-    with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
+    with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
       linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=[2])
 
   def test_non_integer_num_rows_raises_static(self):
-    with self.assertRaisesRegexp(TypeError, "must be integer"):
+    with self.assertRaisesRegex(TypeError, "must be integer"):
       linalg_lib.LinearOperatorZeros(num_rows=2.)
-    with self.assertRaisesRegexp(TypeError, "must be integer"):
+    with self.assertRaisesRegex(TypeError, "must be integer"):
       linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=2.)
 
   def test_negative_num_rows_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+    with self.assertRaisesRegex(ValueError, "must be non-negative"):
       linalg_lib.LinearOperatorZeros(num_rows=-2)
-    with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+    with self.assertRaisesRegex(ValueError, "must be non-negative"):
       linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=-2)
 
   def test_non_1d_batch_shape_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be a 1-D"):
+    with self.assertRaisesRegex(ValueError, "must be a 1-D"):
       linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=2)
 
   def test_non_integer_batch_shape_raises_static(self):
-    with self.assertRaisesRegexp(TypeError, "must be integer"):
+    with self.assertRaisesRegex(TypeError, "must be integer"):
       linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[2.])
 
   def test_negative_batch_shape_raises_static(self):
-    with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+    with self.assertRaisesRegex(ValueError, "must be non-negative"):
       linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[-2])
 
   def test_non_scalar_num_rows_raises_dynamic(self):
@@ -153,7 +153,7 @@ class LinearOperatorZerosTest(
   def test_wrong_matrix_dimensions_raises_static(self):
     operator = linalg_lib.LinearOperatorZeros(num_rows=2)
     x = rng.randn(3, 3).astype(np.float32)
-    with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
+    with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"):
       operator.matmul(x)
 
   def test_wrong_matrix_dimensions_raises_dynamic(self):
@@ -185,14 +185,14 @@ class LinearOperatorZerosTest(
         linalg_lib.LinearOperatorZeros))
 
   def test_ref_type_shape_args_raises(self):
-    with self.assertRaisesRegexp(TypeError, "num_rows.cannot.be.reference"):
+    with self.assertRaisesRegex(TypeError, "num_rows.cannot.be.reference"):
       linalg_lib.LinearOperatorZeros(num_rows=variables_module.Variable(2))
 
-    with self.assertRaisesRegexp(TypeError, "num_columns.cannot.be.reference"):
+    with self.assertRaisesRegex(TypeError, "num_columns.cannot.be.reference"):
       linalg_lib.LinearOperatorZeros(
           num_rows=2, num_columns=variables_module.Variable(3))
 
-    with self.assertRaisesRegexp(TypeError, "batch_shape.cannot.be.reference"):
+    with self.assertRaisesRegex(TypeError, "batch_shape.cannot.be.reference"):
       linalg_lib.LinearOperatorZeros(
           num_rows=2, batch_shape=variables_module.Variable([2]))
 
diff --git a/tensorflow/python/kernel_tests/list_ops_test.py b/tensorflow/python/kernel_tests/list_ops_test.py
index 53ebdd3ab88..ce20cf489e6 100644
--- a/tensorflow/python/kernel_tests/list_ops_test.py
+++ b/tensorflow/python/kernel_tests/list_ops_test.py
@@ -78,8 +78,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     l = list_ops.empty_tensor_list(
         element_dtype=dtypes.float32, element_shape=[], max_num_elements=1)
     l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "Tried to push item into a full list"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "Tried to push item into a full list"):
       l = list_ops.tensor_list_push_back(l, 2.)
       self.evaluate(l)
 
@@ -91,8 +91,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
         element_dtype=dtypes.float32,
         element_shape=[],
         max_num_elements=max_num_elements)
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "Trying to pop from an empty list"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "Trying to pop from an empty list"):
       l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
       self.evaluate(l)
 
@@ -115,7 +115,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testPopUninitializedTensorWithInvalidElementShapeFails(self):
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=None, num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Trying to read an uninitialized tensor but "
         "element_shape is not fully defined"):
@@ -124,7 +124,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=[None, 2], num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"Incompatible shapes during merge: \[1,3\] vs. \[\?,2\]"):
       _, e = gen_list_ops.tensor_list_pop_back(
@@ -191,8 +191,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Should raise an error when the element tensors do not all have the same
     # shape.
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "Incompatible ranks during merge: 0 vs. 1"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "Incompatible ranks during merge: 0 vs. 1"):
       l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))
       t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
       self.evaluate(t)
@@ -213,7 +213,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Should raise an error when the element tensors do not all have the same
     # shape.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"Incompatible shapes during merge: \[1\] vs. \[2\]"):
       l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))
@@ -234,8 +234,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Should not be able to stack empty lists with partially defined
     # element_shape.
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "non-fully-defined"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "non-fully-defined"):
       l = list_ops.empty_tensor_list(
           element_dtype=dtypes.float32,
           element_shape=[None, 2],
@@ -244,8 +244,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       self.evaluate(t)
 
     # Should not be able to stack empty lists with undefined element_shape.
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "non-fully-defined"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "non-fully-defined"):
       l = list_ops.empty_tensor_list(
           element_dtype=dtypes.float32,
           element_shape=None,
@@ -285,10 +285,10 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testStackReservedListWithNoElementsAndPartialElementShapeFails(self):
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=None, num_elements=3)
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "Tried to stack list which only contains "
-                                 "uninitialized tensors and has a "
-                                 "non-fully-defined element_shape: <unknown>"):
+    with self.assertRaisesRegex(
+        errors.InvalidArgumentError, "Tried to stack list which only contains "
+        "uninitialized tensors and has a "
+        "non-fully-defined element_shape: <unknown>"):
       t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
       self.evaluate(t)
 
@@ -341,8 +341,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Should raise an error when the requested tensors do not all have the same
     # shape.
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "Incompatible ranks during merge: 0 vs. 1"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "Incompatible ranks during merge: 0 vs. 1"):
       t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
       self.evaluate(t)
 
@@ -366,7 +366,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Should raise an error when the requested tensors do not all have the same
     # shape.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"Incompatible shapes during merge: \[1\] vs. \[2\]"):
       t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
@@ -387,8 +387,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Should not be able to gather from empty lists with partially defined
     # element_shape.
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "non-fully-defined"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "non-fully-defined"):
       l = list_ops.empty_tensor_list(
           element_dtype=dtypes.float32,
           element_shape=[None, 2],
@@ -398,8 +398,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     # Should not be able to gather from empty lists with undefined
     # element_shape.
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "non-fully-defined"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "non-fully-defined"):
       l = list_ops.empty_tensor_list(
           element_dtype=dtypes.float32,
           element_shape=None,
@@ -455,7 +455,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testGatherReservedListWithNoElementsAndPartialElementShapeFails(self):
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=None, num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Tried to gather uninitialized tensors from a"
         " list with non-fully-defined element_shape"):
@@ -485,7 +485,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def testScatterFailsWhenIndexLargerThanNumElements(self):
     c0 = constant_op.constant([1.0, 2.0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "TensorListScatter: Trying to scatter at index 3 in list with size 3"):
       l = gen_list_ops.tensor_list_scatter_v2(
@@ -494,7 +494,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def testScatterFailsWithInvalidNumElements(self):
     c0 = constant_op.constant([1.0, 2.0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "TensorListScatter expects num_elements >= -1, found: -2"):
       l = gen_list_ops.tensor_list_scatter_v2(
@@ -503,7 +503,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def testScatterWithInvalidRowsInInputTensorFails(self):
     c0 = constant_op.constant([1.0, 2.0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Invalid number of rows in input tensor. Expected: 3 Actual: 2"):
       l = list_ops.tensor_list_scatter(c0, [1, 0, 2], [])
@@ -511,7 +511,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def testScatterWithNegativeIndicesFails(self):
     c0 = constant_op.constant([1.0, 2.0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Indices in TensorListScatter must all be non-negative."):
       l = list_ops.tensor_list_scatter(c0, [-1, -2], element_shape=[])
@@ -658,7 +658,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testGetUninitializedTensorWithInvalidElementShapeFails(self):
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=None, num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Trying to read an uninitialized tensor but "
         "element_shape is not fully defined"):
@@ -676,7 +676,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       error_type = errors.InvalidArgumentError
     else:
       error_type = ValueError
-    with self.assertRaisesRegexp(error_type, r"shapes"):
+    with self.assertRaisesRegex(error_type, r"shapes"):
       e0 = gen_list_ops.tensor_list_get_item(
           l, 0, element_dtype=dtypes.float32, element_shape=[1, 3])
       self.evaluate(e0)
@@ -699,7 +699,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testSetOnEmptyListWithMaxNumElementsFails(self):
     l = list_ops.empty_tensor_list(
         element_dtype=dtypes.float32, element_shape=[], max_num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Trying to modify element 0 in a list with 0 elements."):
       l = list_ops.tensor_list_set_item(l, 0, 1.)
@@ -882,8 +882,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       with ops.device("/job:ps"):
         l_ps = array_ops.identity(l)
         l_ps = list_ops.tensor_list_push_back(l_ps, 2.)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Tried to push item into a full list"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Tried to push item into a full list"):
         with ops.device("/job:worker"):
           l_worker = array_ops.identity(l_ps)
           l_worker = list_ops.tensor_list_push_back(l_worker, 3.0)
@@ -943,8 +943,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       # at graph building time.
       l = list_ops.tensor_list_set_item(l, 0, ph)
       l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "incompatible shape"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "incompatible shape"):
         sess.run(l_0, {ph: [3.0]})
 
   def testResourceVariableScatterGather(self):
@@ -1021,7 +1021,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
           "element shapes are not identical at index 0")
     else:
       expected_error = (ValueError, "Shapes must be equal rank")
-    with self.assertRaisesRegexp(*expected_error):
+    with self.assertRaisesRegex(*expected_error):
       l_batch_of_vec_tls = array_ops.stack(
           [list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2)
       self.evaluate(
@@ -1033,7 +1033,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
                         r"input_b\[0\].dtype != element_dtype.")
     else:
       expected_error = (ValueError, "input_b.type != element_dtype")
-    with self.assertRaisesRegexp(*expected_error):
+    with self.assertRaisesRegex(*expected_error):
       l_batch_of_int_tls = array_ops.stack(
           [list_ops.tensor_list_from_tensor([1], element_shape=[])] * 2)
       self.evaluate(
@@ -1073,8 +1073,8 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     with self.assertRaises((errors.InvalidArgumentError, ValueError)):
       self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, []))
 
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "incompatible shape to a list at index 0"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "incompatible shape to a list at index 0"):
       self.evaluate(
           list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]]))
 
@@ -1082,7 +1082,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       expected_error = (errors.InvalidArgumentError, "Invalid data type")
     else:
       expected_error = (ValueError, "wrong element dtype")
-    with self.assertRaisesRegexp(*expected_error):
+    with self.assertRaisesRegex(*expected_error):
       self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4]))
 
   def testZerosLike(self):
@@ -1246,7 +1246,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
         element_shape=[], element_dtype=dtypes.float32, num_elements=2)
     l2 = list_ops.tensor_list_reserve(
         element_shape=[], element_dtype=dtypes.float32, num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Trying to add two lists of tensors with different lengths"):
       l = math_ops.add_n([l1, l2])
@@ -1268,7 +1268,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
           element_dtype=dtypes.float32,
           num_elements=3)
       l = math_ops.add_n([l1, l2])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "Trying to add two lists of tensors with incompatible element shapes"
       ):
@@ -1314,7 +1314,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
         element_dtype=dtypes.float32, element_shape=None)
     l = list_ops.tensor_list_push_back(l, [[0., 1.]])
     l = list_ops.tensor_list_push_back(l, [[2.], [4.]])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError, r"Incompatible shapes during merge: "
         r"\[2\] vs. \[1\]"):
       t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
@@ -1333,7 +1333,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testConcatEmptyListWithUnknownElementShapeFails(self):
     l = list_ops.empty_tensor_list(
         element_dtype=dtypes.float32, element_shape=None)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "All except the first dimension must be fully"
         " defined when concating an empty tensor list"):
@@ -1343,7 +1343,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testConcatEmptyListWithPartiallyDefinedElementShapeFails(self):
     l = list_ops.empty_tensor_list(
         element_dtype=dtypes.float32, element_shape=[2, None])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "All except the first dimension must be fully"
         " defined when concating an empty tensor list"):
@@ -1354,7 +1354,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     l = list_ops.empty_tensor_list(
         element_dtype=dtypes.float32,
         element_shape=tensor_shape.TensorShape([]))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Concat requires elements to be at least vectors, "
         "found scalars instead"):
@@ -1365,14 +1365,14 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     l = list_ops.empty_tensor_list(
         element_dtype=dtypes.float32, element_shape=None)
     l1 = list_ops.tensor_list_push_back(l, 1.)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError, "Concat saw a scalar shape at index 0"
         " but requires at least vectors"):
       t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)
       self.evaluate(t)
     l1 = list_ops.tensor_list_push_back(l, [1.])
     l1 = list_ops.tensor_list_push_back(l1, 2.)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError, "Concat saw a scalar shape at index 1"
         " but requires at least vectors"):
       t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)
@@ -1420,7 +1420,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testConcatWithUninitializedTensorsFailsIfNoElementShape(self):
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=None, num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"Trying to concat list with only uninitialized tensors "
         r"but element_shape_except_first_dim_ is not fully defined"):
@@ -1430,7 +1430,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testConcatWithUninitializedTensorsFailsIfNoInputLengths(self):
     l = list_ops.tensor_list_reserve(
         element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"List contains uninitialized tensor at index 0"
         r" but leading_dims has only 0 elements."):
@@ -1467,7 +1467,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     with self.cached_session():
       tensor = array_ops.placeholder(dtype=dtypes.float32)
       l = list_ops.tensor_list_split(tensor, element_shape=None, lengths=[1])
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"Tensor must be at least a vector, but saw shape: \[\]"):
         l.eval({tensor: 1})
@@ -1479,24 +1479,24 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       l = list_ops.tensor_list_split([1., 2.],
                                      element_shape=None,
                                      lengths=lengths)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"Expected lengths to be a vector, received shape: \[\]"):
         l.eval({lengths: 1})
 
   def testSplitWithInvalidLengthsFails(self):
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r"Invalid value in lengths: -1"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r"Invalid value in lengths: -1"):
       l = list_ops.tensor_list_split([1., 2.],
                                      element_shape=None,
                                      lengths=[1, -1])
       self.evaluate(l)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"Attempting to slice \[0, 3\] from tensor with length 2"):
       l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[3])
       self.evaluate(l)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         r"Unused values in tensor. Length of tensor: 2 Values used: 1"):
       l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1])
@@ -1504,11 +1504,11 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   @test_util.run_deprecated_v1
   def testSkipEagerSplitWithScalarElementShapeFails(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r"Shapes must be equal rank, but are 1 and 0"):
+    with self.assertRaisesRegex(ValueError,
+                                r"Shapes must be equal rank, but are 1 and 0"):
       l = list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1])
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"TensorListSplit requires element_shape to be at least of rank 1, "
           r"but saw: \[\]"):
@@ -1520,7 +1520,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def testEagerOnlySplitWithScalarElementShapeFails(self):
     if context.executing_eagerly():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"TensorListSplit requires element_shape to be at least of rank 1, "
           r"but saw: \[\]"):
@@ -1528,14 +1528,14 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   @test_util.run_deprecated_v1
   def testSkipEagerSplitWithIncompatibleTensorShapeAndElementShapeFails(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r"Shapes must be equal rank, but are 2 and 1"):
+    with self.assertRaisesRegex(ValueError,
+                                r"Shapes must be equal rank, but are 2 and 1"):
       l = list_ops.tensor_list_split([[1.], [2.]],
                                      element_shape=[1],
                                      lengths=[1, 1])
 
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"):
         element_shape = array_ops.placeholder(dtype=dtypes.int32)
@@ -1546,7 +1546,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def testEagerOnlySplitWithIncompatibleTensorShapeAndElementShapeFails(self):
     if context.executing_eagerly():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"):
         list_ops.tensor_list_split([[1.], [2.]],
@@ -1576,7 +1576,7 @@ class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
         [1., 2.])
 
   def testResizeWithInvalidSizeFails(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "TensorListSlice expects size to be non-negative"):
       l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[])
diff --git a/tensorflow/python/kernel_tests/lookup_ops_test.py b/tensorflow/python/kernel_tests/lookup_ops_test.py
index 3b9f97670d4..514c7a1e997 100644
--- a/tensorflow/python/kernel_tests/lookup_ops_test.py
+++ b/tensorflow/python/kernel_tests/lookup_ops_test.py
@@ -1181,8 +1181,8 @@ class DenseHashTableOpTest(test.TestCase):
 
   def testSameEmptyAndDeletedKey(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Empty and deleted keys"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Empty and deleted keys"):
         table = lookup_ops.DenseHashTable(
             dtypes.int64,
             dtypes.int64,
@@ -1810,39 +1810,39 @@ class DenseHashTableOpTest(test.TestCase):
       # Inserting the empty key returns an error
       keys1 = constant_op.constant([11, 0], dtypes.int64)
       values1 = constant_op.constant([0, 1], dtypes.int64)
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "empty_key"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "empty_key"):
         self.evaluate(table.insert(keys1, values1))
 
       # Looking up the empty key returns an error
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "empty_key"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "empty_key"):
         self.evaluate(table.lookup(keys1))
 
       # Inserting the deleted key returns an error
       keys2 = constant_op.constant([11, -1], dtypes.int64)
       values2 = constant_op.constant([0, 1], dtypes.int64)
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "deleted_key"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "deleted_key"):
         self.evaluate(table.insert(keys2, values2))
 
       # Looking up the empty key returns an error
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "deleted_key"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "deleted_key"):
         self.evaluate(table.lookup(keys2))
 
       # Arbitrary tensors of keys are not supported
       keys = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
       values = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Expected key shape"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Expected key shape"):
         self.evaluate(table.lookup(keys))
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Expected key shape"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Expected key shape"):
         self.evaluate(table.insert(keys, values))
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Number of buckets must be"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Number of buckets must be"):
         table2 = lookup_ops.DenseHashTable(
             dtypes.int64,
             dtypes.int64,
@@ -1852,7 +1852,7 @@ class DenseHashTableOpTest(test.TestCase):
             initial_num_buckets=12)
         self.assertAllEqual(0, self.evaluate(table2.size()))
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Empty and deleted keys must have same shape"):
         table3 = lookup_ops.DenseHashTable(
@@ -1863,8 +1863,8 @@ class DenseHashTableOpTest(test.TestCase):
             deleted_key=[1, 2])
         self.assertAllEqual(0, self.evaluate(table3.size()))
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Empty and deleted keys cannot be equal"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Empty and deleted keys cannot be equal"):
         table4 = lookup_ops.DenseHashTable(
             dtypes.int64,
             dtypes.int64,
@@ -1873,8 +1873,8 @@ class DenseHashTableOpTest(test.TestCase):
             deleted_key=42)
         self.assertAllEqual(0, self.evaluate(table4.size()))
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Empty and deleted keys cannot be equal"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Empty and deleted keys cannot be equal"):
         table5 = lookup_ops.DenseHashTable(
             dtypes.int64,
             dtypes.int64,
@@ -2067,9 +2067,8 @@ class IndexTableFromFile(test.TestCase):
 
   def test_index_table_from_file_str_fails_with_zero_size_vocabulary(self):
     vocabulary_file = self._createVocabFile("zero_vocab_str.txt")
-    self.assertRaisesRegexp(
-        ValueError,
-        "vocab_size must be greater than 0, got 0. "
+    self.assertRaisesRegex(
+        ValueError, "vocab_size must be greater than 0, got 0. "
         "vocabulary_file: .*zero_vocab_str.txt",
         lookup_ops.index_table_from_file,
         vocabulary_file=vocabulary_file,
@@ -2078,9 +2077,8 @@ class IndexTableFromFile(test.TestCase):
   def test_index_table_from_file_tensor_fails_with_zero_size_vocabulary(self):
     vocabulary_file = constant_op.constant(
         self._createVocabFile("zero_vocab_tensor.txt"))
-    self.assertRaisesRegexp(
-        ValueError,
-        "vocab_size must be greater than 0, got 0. "
+    self.assertRaisesRegex(
+        ValueError, "vocab_size must be greater than 0, got 0. "
         "vocabulary_file: .*zero_vocab_tensor.txt",
         lookup_ops.index_table_from_file,
         vocabulary_file=vocabulary_file,
@@ -2103,8 +2101,8 @@ class IndexTableFromFile(test.TestCase):
   def test_index_table_from_file_with_vocab_size_too_large(self):
     vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
     with self.cached_session():
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Invalid vocab_size"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Invalid vocab_size"):
         table = lookup_ops.index_table_from_file(
             vocabulary_file=vocabulary_file, vocab_size=4)
         self.evaluate(table.initializer)
@@ -2225,15 +2223,15 @@ class IndexTableFromTensor(test.TestCase):
 
   def test_index_table_from_tensor_missing_vocabulary_list(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(ValueError,
-                                   "vocabulary_list must be specified"):
+      with self.assertRaisesRegex(ValueError,
+                                  "vocabulary_list must be specified"):
         lookup_ops.index_table_from_tensor(
             vocabulary_list=None, num_oov_buckets=1)
 
   def test_index_table_from_tensor_empty_vocabulary_list(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(
-          errors_impl.OpError, "keys and values cannot be empty"):
+      with self.assertRaisesRegex(errors_impl.OpError,
+                                  "keys and values cannot be empty"):
         _ = lookup_ops.index_table_from_tensor(
             vocabulary_list=np.array([], dtype=np.str_), num_oov_buckets=1)
         self.evaluate(lookup_ops.tables_initializer())
@@ -2347,8 +2345,8 @@ class IndexToStringTableFromFileTest(test.TestCase):
   def test_index_to_string_table_with_vocab_size_too_large(self):
     vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
     with self.cached_session():
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Invalid vocab_size"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Invalid vocab_size"):
         _ = lookup_ops.index_to_string_table_from_file(
             vocabulary_file=vocabulary_file, vocab_size=4)
         self.evaluate(lookup_ops.tables_initializer())
@@ -2532,13 +2530,13 @@ class IdTableWithHashBucketsTest(test.TestCase):
 
   def testFloat64IdTableWithOnlyHashBucket(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
+      with self.assertRaisesRegex(TypeError, "Invalid key_dtype"):
         lookup_ops.IdTableWithHashBuckets(
             None, num_oov_buckets=5, key_dtype=dtypes.float64)
 
   def testBoolIdTableWithOnlyHashBucket(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
+      with self.assertRaisesRegex(TypeError, "Invalid key_dtype"):
         lookup_ops.IdTableWithHashBuckets(
             None, num_oov_buckets=5, key_dtype=dtypes.bool)
 
diff --git a/tensorflow/python/kernel_tests/losses_test.py b/tensorflow/python/kernel_tests/losses_test.py
index b5f3e317d1c..101e0a5f1ff 100644
--- a/tensorflow/python/kernel_tests/losses_test.py
+++ b/tensorflow/python/kernel_tests/losses_test.py
@@ -131,7 +131,7 @@ class SoftmaxCrossEntropyLossTest(test.TestCase):
                                      [0.0, 0.0, 10.0]])
       labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
       loss = losses.softmax_cross_entropy(labels, logits)
-      self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
+      self.assertEqual('softmax_cross_entropy_loss/value', loss.op.name)
       self.assertAlmostEqual(loss.eval(), 0.0, 3)
 
   @test_util.run_deprecated_v1
@@ -142,7 +142,7 @@ class SoftmaxCrossEntropyLossTest(test.TestCase):
 
     with self.cached_session():
       loss = losses.softmax_cross_entropy(labels, logits)
-      self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'softmax_cross_entropy_loss/value')
       self.assertAlmostEqual(loss.eval(), 10.0, 3)
 
   @test_util.run_deprecated_v1
@@ -223,7 +223,7 @@ class SoftmaxCrossEntropyLossTest(test.TestCase):
       label_smoothing = 0.1
       loss = losses.softmax_cross_entropy(
           labels, logits, label_smoothing=label_smoothing)
-      self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'softmax_cross_entropy_loss/value')
       expected_value = 400.0 * label_smoothing / 3.0
       self.assertAlmostEqual(loss.eval(), expected_value, 3)
 
@@ -245,7 +245,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
                                      [0.0, 0.0, 10.0]])
       labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
       loss = losses.sparse_softmax_cross_entropy(labels, logits)
-      self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
       self.assertAlmostEqual(loss.eval(), 0.0, 3)
 
   @test_util.assert_no_new_pyobjects_executing_eagerly
@@ -262,7 +262,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
                                      [0.0, 0.0, 10.0]])
       labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
       loss = losses.sparse_softmax_cross_entropy(labels, logits)
-      self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
       self.assertAlmostEqual(loss.eval(), 0.0, 3)
 
   @test_util.run_deprecated_v1
@@ -272,7 +272,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
                                      [0.0, 0.0, 10.0]])
       labels = constant_op.constant([0, 1, 2])
       loss = losses.sparse_softmax_cross_entropy(labels, logits)
-      self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
       self.assertAlmostEqual(loss.eval(), 0.0, 3)
 
   @test_util.run_deprecated_v1
@@ -283,7 +283,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
 
     with self.cached_session():
       loss = losses.sparse_softmax_cross_entropy(labels, logits)
-      self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
       self.assertAlmostEqual(loss.eval(), 10.0, 3)
 
   @test_util.run_deprecated_v1
@@ -294,7 +294,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
 
     with self.cached_session():
       loss = losses.sparse_softmax_cross_entropy(labels, logits)
-      self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
       self.assertAlmostEqual(loss.eval(), 10.0, 3)
 
   @test_util.run_deprecated_v1
@@ -305,7 +305,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
 
     with self.cached_session():
       loss = losses.sparse_softmax_cross_entropy(labels, logits)
-      self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
+      self.assertEqual(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
       self.assertAlmostEqual(loss.eval(), 10.0, 3)
 
   @test_util.run_deprecated_v1
@@ -488,7 +488,7 @@ class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
       labels = constant_op.constant([[0, 1], [2, 3]])
       weights = constant_op.constant(1.2)
 
-      with self.assertRaisesRegexp(ValueError, 'mismatch'):
+      with self.assertRaisesRegex(ValueError, 'mismatch'):
         losses.sparse_softmax_cross_entropy(
             labels, logits, weights=weights).eval()
 
@@ -503,8 +503,8 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
                                      [-100.0, -100.0, 100.0]])
       labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
       loss = losses.sigmoid_cross_entropy(labels, logits)
-      self.assertEquals(logits.dtype, loss.dtype)
-      self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
+      self.assertEqual(logits.dtype, loss.dtype)
+      self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name)
       self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
 
   @test_util.run_deprecated_v1
@@ -514,7 +514,7 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
     weights = array_ops.ones_like(logits, dtype=dtypes.float32)
 
     loss = losses.sigmoid_cross_entropy(labels, logits, weights)
-    self.assertEquals(logits.dtype, loss.dtype)
+    self.assertEqual(logits.dtype, loss.dtype)
 
     with self.cached_session() as sess:
       loss = sess.run(loss,
@@ -531,7 +531,7 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
     weights = array_ops.ones_like(logits, dtype=dtypes.float32)
 
     loss = losses.sigmoid_cross_entropy(labels, logits, weights)
-    self.assertEquals(logits.dtype, loss.dtype)
+    self.assertEqual(logits.dtype, loss.dtype)
 
     with self.cached_session() as sess:
       loss = sess.run(loss,
@@ -549,8 +549,8 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
                                      [-100.0, -100.0, 100.0]])
       labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
       loss = losses.sigmoid_cross_entropy(labels, logits)
-      self.assertEquals(logits.dtype, loss.dtype)
-      self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
+      self.assertEqual(logits.dtype, loss.dtype)
+      self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name)
       self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
 
   @test_util.run_deprecated_v1
@@ -562,8 +562,8 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
       labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
       weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
       loss = losses.sigmoid_cross_entropy(labels, logits, weights)
-      self.assertEquals(logits.dtype, loss.dtype)
-      self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
+      self.assertEqual(logits.dtype, loss.dtype)
+      self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name)
       self.assertAlmostEqual(1700.0 / 7.0, self.evaluate(loss), 3)
 
   @test_util.run_deprecated_v1
@@ -573,8 +573,8 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
                                    [-100.0, 100.0, 100.0]])
     labels = constant_op.constant([[1, 0, 1], [1, 1, 0], [0, 1, 1]])
     loss = losses.sigmoid_cross_entropy(labels, logits)
-    self.assertEquals(logits.dtype, loss.dtype)
-    self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
+    self.assertEqual(logits.dtype, loss.dtype)
+    self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name)
 
     with self.cached_session():
       self.assertAlmostEqual(0.0, self.evaluate(loss), 3)
@@ -589,7 +589,7 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
         (1, 0, 1), (1, 1, 0), (0, 1, 1)
     ), dtype=dtypes.int64)
     loss = losses.sigmoid_cross_entropy(labels, logits)
-    self.assertEquals(logits.dtype, loss.dtype)
+    self.assertEqual(logits.dtype, loss.dtype)
 
     with self.cached_session():
       self.assertAlmostEqual(44.444, self.evaluate(loss), 3)
@@ -602,7 +602,7 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
     labels = constant_op.constant(((1, 0, 1), (1, 1, 0), (0, 1, 1)))
     loss = losses.sigmoid_cross_entropy(
         labels, logits, reduction=losses.Reduction.NONE)
-    self.assertEquals(logits.dtype, loss.dtype)
+    self.assertEqual(logits.dtype, loss.dtype)
 
     with self.cached_session():
       self.assertAllClose(((0., 0., 0.), (0., 100., 100.), (100., 0., 100.)),
@@ -627,8 +627,8 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
       label_smoothing = 0.1
       loss = losses.sigmoid_cross_entropy(
           labels, logits, label_smoothing=label_smoothing)
-      self.assertEquals(logits.dtype, loss.dtype)
-      self.assertEquals('sigmoid_cross_entropy_loss/value', loss.op.name)
+      self.assertEqual(logits.dtype, loss.dtype)
+      self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name)
       expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
       self.assertAlmostEqual(loss.eval(), expected_value, 3)
 
@@ -640,7 +640,7 @@ class SigmoidCrossEntropyLossTest(test.TestCase):
       sigmoid_labels = constant_op.constant([[1, 0, 1]])
       sigmoid_loss = losses.sigmoid_cross_entropy(
           sigmoid_labels, sigmoid_logits, label_smoothing=label_smoothing)
-      self.assertEquals(sigmoid_logits.dtype, sigmoid_loss.dtype)
+      self.assertEqual(sigmoid_logits.dtype, sigmoid_loss.dtype)
 
       softmax_logits = constant_op.constant(
           [[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
@@ -1143,7 +1143,7 @@ class MeanPairwiseSquaredErrorTest(test.TestCase):
     expected_error_msg = 'weights can not be broadcast to values'
 
     # Static check.
-    with self.assertRaisesRegexp(ValueError, expected_error_msg):
+    with self.assertRaisesRegex(ValueError, expected_error_msg):
       losses.mean_pairwise_squared_error(
           predictions=predictions, labels=labels, weights=weights)
 
@@ -1156,7 +1156,7 @@ class MeanPairwiseSquaredErrorTest(test.TestCase):
         labels=labels_placeholder,
         weights=weights_placeholder)
     with self.cached_session():
-      with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
+      with self.assertRaisesRegex(errors_impl.OpError, expected_error_msg):
         dynamic_inputs_op.eval(feed_dict={
             predictions_placeholder: predictions,
             labels_placeholder: labels,
@@ -1456,7 +1456,7 @@ class ComputeWeightedLossTest(test.TestCase):
       expected_error_msg = 'weights can not be broadcast to values'
 
       # Static check.
-      with self.assertRaisesRegexp(ValueError, expected_error_msg):
+      with self.assertRaisesRegex(ValueError, expected_error_msg):
         losses.compute_weighted_loss(self._raw_losses, weights=weights)
 
       # Dynamic check.
@@ -1465,7 +1465,7 @@ class ComputeWeightedLossTest(test.TestCase):
           self._raw_losses, weights=weights_placeholder)
       self.assertEqual(1, len(util.get_losses()))
       with self.cached_session():
-        with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
+        with self.assertRaisesRegex(errors_impl.OpError, expected_error_msg):
           weighted_loss.eval(feed_dict={weights_placeholder: weights})
 
   def testInvalidWeightTooManyDims(self):
@@ -1479,7 +1479,7 @@ class ComputeWeightedLossTest(test.TestCase):
       self.assertEqual(0, len(util.get_losses()))
 
       # Static check.
-      with self.assertRaisesRegexp(ValueError, expected_error_msg):
+      with self.assertRaisesRegex(ValueError, expected_error_msg):
         losses.compute_weighted_loss(raw_losses, weights=weights)
 
       # Dynamic check.
@@ -1488,7 +1488,7 @@ class ComputeWeightedLossTest(test.TestCase):
           raw_losses, weights=weights_placeholder)
       self.assertEqual(1, len(util.get_losses()))
       with self.cached_session():
-        with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
+        with self.assertRaisesRegex(errors_impl.OpError, expected_error_msg):
           weighted_loss.eval(feed_dict={weights_placeholder: weights})
 
   def testInvalid3Weight(self):
diff --git a/tensorflow/python/kernel_tests/manip_ops_test.py b/tensorflow/python/kernel_tests/manip_ops_test.py
index e6cb06ca477..eb9a3d6d0d9 100644
--- a/tensorflow/python/kernel_tests/manip_ops_test.py
+++ b/tensorflow/python/kernel_tests/manip_ops_test.py
@@ -99,8 +99,8 @@ class RollTest(test_util.TensorFlowTestCase):
     self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
     # Make sure negative axis should be 0 <= axis + dims < dims
     with self.cached_session(use_gpu=True):
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "is out of range"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "is out of range"):
         manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
                        3, -10).eval()
 
@@ -112,8 +112,8 @@ class RollTest(test_util.TensorFlowTestCase):
   @test_util.run_deprecated_v1
   def testInvalidInputShape(self):
     # The input should be 1-D or higher, checked in shape function.
-    with self.assertRaisesRegexp(
-        ValueError, "Shape must be at least rank 1 but is rank 0"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be at least rank 1 but is rank 0"):
       manip_ops.roll(7, 1, 0)
 
   @test_util.run_deprecated_v1
@@ -123,15 +123,15 @@ class RollTest(test_util.TensorFlowTestCase):
     shift = 1
     axis = 0
     with self.cached_session(use_gpu=True):
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "input must be 1-D or higher"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "input must be 1-D or higher"):
         manip_ops.roll(tensor, shift, axis).eval(feed_dict={tensor: 7})
 
   @test_util.run_deprecated_v1
   def testInvalidAxisShape(self):
     # The axis should be a scalar or 1-D, checked in shape function.
-    with self.assertRaisesRegexp(
-        ValueError, "Shape must be at most rank 1 but is rank 2"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be at most rank 1 but is rank 2"):
       manip_ops.roll([[1, 2], [3, 4]], 1, [[0, 1]])
 
   @test_util.run_deprecated_v1
@@ -141,15 +141,15 @@ class RollTest(test_util.TensorFlowTestCase):
     shift = 1
     axis = array_ops.placeholder(dtype=dtypes.int32)
     with self.cached_session(use_gpu=True):
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "axis must be a scalar or a 1-D vector"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "axis must be a scalar or a 1-D vector"):
         manip_ops.roll(tensor, shift, axis).eval(feed_dict={axis: [[0, 1]]})
 
   @test_util.run_deprecated_v1
   def testInvalidShiftShape(self):
     # The shift should be a scalar or 1-D, checked in shape function.
-    with self.assertRaisesRegexp(
-        ValueError, "Shape must be at most rank 1 but is rank 2"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be at most rank 1 but is rank 2"):
       manip_ops.roll([[1, 2], [3, 4]], [[0, 1]], 1)
 
   @test_util.run_deprecated_v1
@@ -159,14 +159,14 @@ class RollTest(test_util.TensorFlowTestCase):
     shift = array_ops.placeholder(dtype=dtypes.int32)
     axis = 1
     with self.cached_session(use_gpu=True):
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "shift must be a scalar or a 1-D vector"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "shift must be a scalar or a 1-D vector"):
         manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [[0, 1]]})
 
   @test_util.run_deprecated_v1
   def testInvalidShiftAndAxisNotEqualShape(self):
     # The shift and axis must be same size, checked in shape function.
-    with self.assertRaisesRegexp(ValueError, "both shapes must be equal"):
+    with self.assertRaisesRegex(ValueError, "both shapes must be equal"):
       manip_ops.roll([[1, 2], [3, 4]], [1], [0, 1])
 
   @test_util.run_deprecated_v1
@@ -176,8 +176,8 @@ class RollTest(test_util.TensorFlowTestCase):
     shift = array_ops.placeholder(dtype=dtypes.int32)
     axis = [0, 1]
     with self.cached_session(use_gpu=True):
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "shift and axis must have the same size"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "shift and axis must have the same size"):
         manip_ops.roll(tensor, shift, axis).eval(feed_dict={shift: [1]})
 
   def testRollAxisOutOfRangeRaises(self):
@@ -185,8 +185,8 @@ class RollTest(test_util.TensorFlowTestCase):
     shift = 1
     axis = 1
     with self.cached_session(use_gpu=True):
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "is out of range"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "is out of range"):
         manip_ops.roll(tensor, shift, axis).eval()
 
 
diff --git a/tensorflow/python/kernel_tests/map_fn_test.py b/tensorflow/python/kernel_tests/map_fn_test.py
index 22716a6869e..62379ed222a 100644
--- a/tensorflow/python/kernel_tests/map_fn_test.py
+++ b/tensorflow/python/kernel_tests/map_fn_test.py
@@ -81,9 +81,9 @@ class MapFnTest(test.TestCase):
 
   @test_util.run_in_graph_and_eager_modes
   def testMapOverScalarErrors(self):
-    with self.assertRaisesRegexp(ValueError, "not scalars"):
+    with self.assertRaisesRegex(ValueError, "not scalars"):
       map_fn.map_fn(lambda x: x, [1, 2])
-    with self.assertRaisesRegexp(ValueError, "not a scalar"):
+    with self.assertRaisesRegex(ValueError, "not a scalar"):
       map_fn.map_fn(lambda x: x, 1)
 
   @test_util.run_deprecated_v1
@@ -155,7 +155,7 @@ class MapFnTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def testMap_MultiOutputMismatchedDtype(self):
     nums = np.array([1, 2, 3, 4, 5, 6])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r"two structures don't have the same nested structure"):
       # lambda emits tuple, but dtype is a list
       map_fn.map_fn(
@@ -240,7 +240,7 @@ class MapFnTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   def testMapEmptyList(self):
     x = []
-    with self.assertRaisesRegexp(ValueError, r"elems must be a Tensor or"):
+    with self.assertRaisesRegex(ValueError, r"elems must be a Tensor or"):
       _ = map_fn.map_fn(lambda e: e, x)
 
 
diff --git a/tensorflow/python/kernel_tests/matmul_op_test.py b/tensorflow/python/kernel_tests/matmul_op_test.py
index a8cb14f2b34..712d7336b94 100644
--- a/tensorflow/python/kernel_tests/matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/matmul_op_test.py
@@ -195,14 +195,14 @@ except AttributeError:
 class MatMulInfixOperatorTest(test_lib.TestCase):
 
   def testMismatchedShape(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         Exception, "(Shape must be rank 2 but is rank 1|is not a matrix)"):
       infix_matmul(
           ops.convert_to_tensor([10.0, 20.0, 30.0]),
           ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
 
   def testMismatchedDimensions(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         Exception, "(Dimensions must be equal|Matrix size-incompatible)"):
       infix_matmul(
           ops.convert_to_tensor([[10.0, 20.0, 30.0]]),
diff --git a/tensorflow/python/kernel_tests/metrics_test.py b/tensorflow/python/kernel_tests/metrics_test.py
index 5f7db2764cc..edb3b428dd0 100644
--- a/tensorflow/python/kernel_tests/metrics_test.py
+++ b/tensorflow/python/kernel_tests/metrics_test.py
@@ -160,9 +160,9 @@ def _assert_nan(test_case, actual):
 
 
 def _assert_metric_variables(test_case, expected):
-  test_case.assertEquals(
+  test_case.assertEqual(
       set(expected), set(v.name for v in variables.local_variables()))
-  test_case.assertEquals(
+  test_case.assertEqual(
       set(expected),
       set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
 
@@ -340,11 +340,11 @@ class MeanTest(test.TestCase):
     expected_error_msg = 'weights can not be broadcast to values'
     for invalid_weight in invalid_weights:
       # Static shapes.
-      with self.assertRaisesRegexp(ValueError, expected_error_msg):
+      with self.assertRaisesRegex(ValueError, expected_error_msg):
         metrics.mean(values, invalid_weight)
 
       # Dynamic shapes.
-      with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
+      with self.assertRaisesRegex(errors_impl.OpError, expected_error_msg):
         with self.cached_session():
           _, update_op = metrics.mean(values_placeholder, invalid_weight)
           variables.local_variables_initializer().run()
diff --git a/tensorflow/python/kernel_tests/norm_op_test.py b/tensorflow/python/kernel_tests/norm_op_test.py
index bfea2134454..f3787190426 100644
--- a/tensorflow/python/kernel_tests/norm_op_test.py
+++ b/tensorflow/python/kernel_tests/norm_op_test.py
@@ -40,18 +40,18 @@ class NormOpTest(test_lib.TestCase):
   def testBadOrder(self):
     matrix = [[0., 1.], [2., 3.]]
     for ord_ in "fro", -7, -1.1, 0:
-      with self.assertRaisesRegexp(ValueError,
-                                   "'ord' must be a supported vector norm"):
+      with self.assertRaisesRegex(ValueError,
+                                  "'ord' must be a supported vector norm"):
         linalg_ops.norm(matrix, ord=ord_)
 
     for ord_ in "fro", -7, -1.1, 0:
-      with self.assertRaisesRegexp(ValueError,
-                                   "'ord' must be a supported vector norm"):
+      with self.assertRaisesRegex(ValueError,
+                                  "'ord' must be a supported vector norm"):
         linalg_ops.norm(matrix, ord=ord_, axis=-1)
 
     for ord_ in "foo", -7, -1.1, 1.1:
-      with self.assertRaisesRegexp(ValueError,
-                                   "'ord' must be a supported matrix norm"):
+      with self.assertRaisesRegex(ValueError,
+                                  "'ord' must be a supported matrix norm"):
         linalg_ops.norm(matrix, ord=ord_, axis=[-2, -1])
 
   @test_util.run_v1_only("b/120545219")
@@ -60,7 +60,7 @@ class NormOpTest(test_lib.TestCase):
     for axis_ in [], [1, 2, 3], [[1]], [[1], [2]], [3.1415], [1, 1]:
       error_prefix = ("'axis' must be None, an integer, or a tuple of 2 unique "
                       "integers")
-      with self.assertRaisesRegexp(ValueError, error_prefix):
+      with self.assertRaisesRegex(ValueError, error_prefix):
         linalg_ops.norm(matrix, axis=axis_)
 
 
diff --git a/tensorflow/python/kernel_tests/nth_element_op_test.py b/tensorflow/python/kernel_tests/nth_element_op_test.py
index 4be78b2d5ca..d8b9adb8731 100644
--- a/tensorflow/python/kernel_tests/nth_element_op_test.py
+++ b/tensorflow/python/kernel_tests/nth_element_op_test.py
@@ -114,8 +114,7 @@ class NthElementTest(test.TestCase):
 
   @test_util.run_deprecated_v1
   def testInvalidInput(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 "at least rank 1 but is rank 0"):
+    with self.assertRaisesRegex(ValueError, "at least rank 1 but is rank 0"):
       nn_ops.nth_element(5, 0)
 
   @test_util.run_deprecated_v1
@@ -127,11 +126,9 @@ class NthElementTest(test.TestCase):
 
   @test_util.run_deprecated_v1
   def testInvalidN(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 "non-negative but is -1"):
+    with self.assertRaisesRegex(ValueError, "non-negative but is -1"):
       nn_ops.nth_element([5], -1)
-    with self.assertRaisesRegexp(ValueError,
-                                 "scalar but has rank 1"):
+    with self.assertRaisesRegex(ValueError, "scalar but has rank 1"):
       nn_ops.nth_element([5, 6, 3], [1])
 
   @test_util.run_deprecated_v1
@@ -146,8 +143,7 @@ class NthElementTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testNTooLarge(self):
     inputs = [[0.1, 0.2], [0.3, 0.4]]
-    with self.assertRaisesRegexp(ValueError,
-                                 "must have last dimension > n = 2"):
+    with self.assertRaisesRegex(ValueError, "must have last dimension > n = 2"):
       nn_ops.nth_element(inputs, 2)
 
   @test_util.run_deprecated_v1
diff --git a/tensorflow/python/kernel_tests/numerics_test.py b/tensorflow/python/kernel_tests/numerics_test.py
index eadb8ceff07..025ad59939c 100644
--- a/tensorflow/python/kernel_tests/numerics_test.py
+++ b/tensorflow/python/kernel_tests/numerics_test.py
@@ -110,9 +110,8 @@ class NumericsTest(test.TestCase):
     _ = control_flow_ops.cond(predicate,
                               lambda: constant_op.constant([37.]),
                               lambda: constant_op.constant([42.]))
-    with self.assertRaisesRegexp(
-        ValueError,
-        r"`tf\.add_check_numerics_ops\(\) is not compatible with "
+    with self.assertRaisesRegex(
+        ValueError, r"`tf\.add_check_numerics_ops\(\) is not compatible with "
         r"TensorFlow control flow operations such as `tf\.cond\(\)` "
         r"or `tf.while_loop\(\)`\."):
       numerics.add_check_numerics_ops()
@@ -122,9 +121,8 @@ class NumericsTest(test.TestCase):
     _ = control_flow_ops.while_loop(lambda _: predicate,
                                     lambda _: constant_op.constant([37.]),
                                     [constant_op.constant([42.])])
-    with self.assertRaisesRegexp(
-        ValueError,
-        r"`tf\.add_check_numerics_ops\(\) is not compatible with "
+    with self.assertRaisesRegex(
+        ValueError, r"`tf\.add_check_numerics_ops\(\) is not compatible with "
         r"TensorFlow control flow operations such as `tf\.cond\(\)` "
         r"or `tf.while_loop\(\)`\."):
       numerics.add_check_numerics_ops()
diff --git a/tensorflow/python/kernel_tests/pad_op_test.py b/tensorflow/python/kernel_tests/pad_op_test.py
index 6fb8a4b5d86..0a53db908bf 100644
--- a/tensorflow/python/kernel_tests/pad_op_test.py
+++ b/tensorflow/python/kernel_tests/pad_op_test.py
@@ -165,7 +165,7 @@ class PadOpTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testPaddingsNonNegative(self):
     with self.session(use_gpu=True):
-      with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+      with self.assertRaisesRegex(ValueError, "must be non-negative"):
         array_ops.pad(constant_op.constant(
             [1], shape=[1]),
                       constant_op.constant(
@@ -174,7 +174,7 @@ class PadOpTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testPaddingsNonNegative2(self):
     with self.session(use_gpu=True):
-      with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+      with self.assertRaisesRegex(ValueError, "must be non-negative"):
         array_ops.pad(constant_op.constant(
             [1], shape=[1]),
                       constant_op.constant(
@@ -198,7 +198,7 @@ class PadOpTest(test.TestCase):
   def testInvalid(self):
     with self.cached_session():
       x = [[1, 2, 3], [4, 5, 6]]
-      with self.assertRaisesRegexp(ValueError, "Unknown padding mode"):
+      with self.assertRaisesRegex(ValueError, "Unknown padding mode"):
         array_ops.pad(x, [[1, 0], [2, 1]], mode="weird").eval()
 
   def testPaddingTypes(self):
diff --git a/tensorflow/python/kernel_tests/padding_fifo_queue_test.py b/tensorflow/python/kernel_tests/padding_fifo_queue_test.py
index 1825bebea8a..5870c21750f 100644
--- a/tensorflow/python/kernel_tests/padding_fifo_queue_test.py
+++ b/tensorflow/python/kernel_tests/padding_fifo_queue_test.py
@@ -318,7 +318,7 @@ class PaddingFIFOQueueTest(test.TestCase):
 
   def testConstructPaddingFIFOQueueWithNoShape(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           r"When providing partial shapes, a list of shapes must be provided."):
         data_flow_ops.PaddingFIFOQueue(10, dtypes_lib.float32,
@@ -612,8 +612,8 @@ class PaddingFIFOQueueTest(test.TestCase):
       elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
       elems_bad = array_ops.placeholder(dtypes_lib.int32)
       enqueue_op = q.enqueue((elems_ok, elems_bad))
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   r"Expected \[\?,3\], got \[3,4\]"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  r"Expected \[\?,3\], got \[3,4\]"):
         sess.run([enqueue_op],
                  feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
 
@@ -628,9 +628,10 @@ class PaddingFIFOQueueTest(test.TestCase):
       elems_bad = array_ops.placeholder(dtypes_lib.int32)
       enqueue_op = q.enqueue_many((elems_ok, elems_bad))
       dequeued_t = q.dequeue_many(2)
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Shape mismatch in tuple component 1. "
-                                   r"Expected \[2,\?,3\], got \[2,3,4\]"):
+      with self.assertRaisesRegex(
+          errors_impl.InvalidArgumentError,
+          "Shape mismatch in tuple component 1. "
+          r"Expected \[2,\?,3\], got \[2,3,4\]"):
         sess.run([enqueue_op],
                  feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
         self.evaluate(dequeued_t)
@@ -914,8 +915,8 @@ class PaddingFIFOQueueTest(test.TestCase):
         self.assertEqual([elem], self.evaluate(dequeued_t))
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                   "is closed and has insufficient"):
+      with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                  "is closed and has insufficient"):
         self.evaluate(dequeued_t)
 
   def testBlockingDequeueFromClosedQueue(self):
@@ -935,8 +936,8 @@ class PaddingFIFOQueueTest(test.TestCase):
         for elem in elems:
           self.assertEqual([elem], self.evaluate(dequeued_t))
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -980,8 +981,8 @@ class PaddingFIFOQueueTest(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1008,8 +1009,8 @@ class PaddingFIFOQueueTest(test.TestCase):
       def dequeue():
         self.assertAllEqual(elems, self.evaluate(dequeued_t))
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1036,8 +1037,8 @@ class PaddingFIFOQueueTest(test.TestCase):
       def dequeue():
         self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1132,8 +1133,8 @@ class PaddingFIFOQueueTest(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1155,8 +1156,8 @@ class PaddingFIFOQueueTest(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -1177,7 +1178,7 @@ class PaddingFIFOQueueTest(test.TestCase):
       close_op.run()
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
         enqueue_op.run()
 
   def testEnqueueManyToClosedQueue(self):
@@ -1191,7 +1192,7 @@ class PaddingFIFOQueueTest(test.TestCase):
       close_op.run()
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
         enqueue_op.run()
 
   def testBlockingEnqueueToFullQueue(self):
@@ -1589,7 +1590,7 @@ class PaddingFIFOQueueTest(test.TestCase):
         self.assertAllEqual(input_elem, output_elem)
 
   def testUnknownRank(self):
-    with self.assertRaisesRegexp(ValueError, "must have a defined rank"):
+    with self.assertRaisesRegex(ValueError, "must have a defined rank"):
       data_flow_ops.PaddingFIFOQueue(32, [dtypes_lib.float32],
                                      [tensor_shape.TensorShape(None)])
 
diff --git a/tensorflow/python/kernel_tests/parsing_ops_test.py b/tensorflow/python/kernel_tests/parsing_ops_test.py
index c94fd0fde49..07d5e6201a1 100644
--- a/tensorflow/python/kernel_tests/parsing_ops_test.py
+++ b/tensorflow/python/kernel_tests/parsing_ops_test.py
@@ -2437,8 +2437,8 @@ class DecodeJSONExampleTest(test.TestCase):
   def testInvalidSyntax(self):
     json_tensor = constant_op.constant(["{]"])
     if context.executing_eagerly():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Error while parsing JSON"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Error while parsing JSON"):
         parsing_ops.decode_json_example(json_tensor)
     else:
       binary_tensor = parsing_ops.decode_json_example(json_tensor)
diff --git a/tensorflow/python/kernel_tests/partitioned_variables_test.py b/tensorflow/python/kernel_tests/partitioned_variables_test.py
index edcbc2967e2..111488007e5 100644
--- a/tensorflow/python/kernel_tests/partitioned_variables_test.py
+++ b/tensorflow/python/kernel_tests/partitioned_variables_test.py
@@ -321,7 +321,7 @@ class PartitionedVariablesTestCase(test.TestCase):
   def _TestSaveSpec(self, slices, expected_specs):
     self.assertEqual(len(expected_specs), len(slices))
     for i in xrange(len(expected_specs)):
-      self.assertEquals(expected_specs[i], slices[i]._save_slice_info.spec)
+      self.assertEqual(expected_specs[i], slices[i]._save_slice_info.spec)
 
   def testVecConstantInit(self):
     with self.cached_session():
diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py
index c9b1e42d66b..7555230fa35 100644
--- a/tensorflow/python/kernel_tests/pooling_ops_test.py
+++ b/tensorflow/python/kernel_tests/pooling_ops_test.py
@@ -756,7 +756,7 @@ class PoolingTest(test.TestCase):
                                          use_gpu=False):
     with self.cached_session(use_gpu=use_gpu):
       t = constant_op.constant(1.0, shape=in_size)
-      with self.assertRaisesRegexp(errors_impl.UnimplementedError, error_msg):
+      with self.assertRaisesRegex(errors_impl.UnimplementedError, error_msg):
         t = nn_ops.max_pool(
             t, ksize=ksize, strides=strides, padding="SAME").eval()
 
@@ -1931,7 +1931,7 @@ class PoolingTest(test.TestCase):
       for pool_func in pool_funcs:
         if pool_func != nn_ops.max_pool:
           # Illegal strides.
-          with self.assertRaisesRegexp(
+          with self.assertRaisesRegex(
               errors_impl.UnimplementedError,
               "Pooling is not yet supported on the batch"):
             sess.run(
@@ -1942,14 +1942,14 @@ class PoolingTest(test.TestCase):
                     padding="SAME"))
 
         # Filter too large.
-        with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
+        with self.assertRaisesRegex(ValueError, "Negative dimension size"):
           sess.run(
               pool_func(
                   array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
                   ksize=[1, 20, 21, 1],
                   strides=[1, 1, 1, 1],
                   padding="VALID"))
-        with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
+        with self.assertRaisesRegex(ValueError, "Negative dimension size"):
           pool_func(
               array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
               ksize=[1, 21, 20, 1],
diff --git a/tensorflow/python/kernel_tests/priority_queue_test.py b/tensorflow/python/kernel_tests/priority_queue_test.py
index c183fc0db48..a71d728563c 100644
--- a/tensorflow/python/kernel_tests/priority_queue_test.py
+++ b/tensorflow/python/kernel_tests/priority_queue_test.py
@@ -332,7 +332,7 @@ class PriorityQueueTest(test.TestCase):
       input_other = array_ops.placeholder(dtypes.string)
       q = data_flow_ops.PriorityQueue(2000, (dtypes.string,), (()))
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           r"Shape mismatch in tuple component 0. Expected \[\], got \[2\]"):
         sess.run([q.enqueue((input_priority, input_other))],
@@ -342,7 +342,7 @@ class PriorityQueueTest(test.TestCase):
                      input_other: np.random.rand(3, 5).astype(bytes)
                  })
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           r"Shape mismatch in tuple component 0. Expected \[2\], got \[2,2\]"):
         sess.run(
diff --git a/tensorflow/python/kernel_tests/py_func_test.py b/tensorflow/python/kernel_tests/py_func_test.py
index 5365e9a490e..5c86215631d 100644
--- a/tensorflow/python/kernel_tests/py_func_test.py
+++ b/tensorflow/python/kernel_tests/py_func_test.py
@@ -320,8 +320,8 @@ class PyFuncTest(PyFuncTestBase):
 
       y, = script_ops.py_func(bad, [], [dtypes.float32])
 
-      with self.assertRaisesRegexp(errors.InternalError,
-                                   "Unsupported numpy data type"):
+      with self.assertRaisesRegex(errors.InternalError,
+                                  "Unsupported numpy data type"):
         self.evaluate(y)
 
   @test_util.run_v1_only("b/120545219")
@@ -334,8 +334,8 @@ class PyFuncTest(PyFuncTestBase):
 
       z, = script_ops.py_func(bad, [], [dtypes.int64])
 
-      with self.assertRaisesRegexp(errors.InternalError,
-                                   "Unsupported object type"):
+      with self.assertRaisesRegex(errors.InternalError,
+                                  "Unsupported object type"):
         self.evaluate(z)
 
   @test_util.run_v1_only("b/120545219")
@@ -634,8 +634,8 @@ class EagerPyFuncTest(PyFuncTestBase):
     def return_variable():
       return resource_variable_ops.ResourceVariable(0.0)
 
-    with self.assertRaisesRegexp(errors.UnknownError,
-                                 "Attempting to return a variable"):
+    with self.assertRaisesRegex(errors.UnknownError,
+                                "Attempting to return a variable"):
       output = script_ops.eager_py_func(
           return_variable, inp=[], Tout=dtypes.float32)
       self.evaluate(output)
@@ -773,7 +773,7 @@ class EagerPyFuncTest(PyFuncTestBase):
   def testEagerPyFuncNotACallable(self):
     x = constant_op.constant("x", dtype=dtypes.string)
 
-    with self.assertRaisesRegexp(ValueError, "callable"):
+    with self.assertRaisesRegex(ValueError, "callable"):
       _ = script_ops.eager_py_func(x, inp=[x], Tout=dtypes.string)
 
 
diff --git a/tensorflow/python/kernel_tests/qr_op_test.py b/tensorflow/python/kernel_tests/qr_op_test.py
index 0c291dbd940..2effb832bda 100644
--- a/tensorflow/python/kernel_tests/qr_op_test.py
+++ b/tensorflow/python/kernel_tests/qr_op_test.py
@@ -50,12 +50,12 @@ class QrOpTest(test.TestCase):
   def testWrongDimensions(self):
     # The input to svd should be a tensor of at least rank 2.
     scalar = constant_op.constant(1.)
-    with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
-                                 "rank.* 2.*0"):
+    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
+                                "rank.* 2.*0"):
       linalg_ops.qr(scalar)
     vector = constant_op.constant([1., 2.])
-    with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
-                                 "rank.* 2.*1"):
+    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
+                                "rank.* 2.*1"):
       linalg_ops.qr(vector)
 
   @test_util.run_in_graph_and_eager_modes(use_gpu=True)
diff --git a/tensorflow/python/kernel_tests/random/random_ops_test.py b/tensorflow/python/kernel_tests/random/random_ops_test.py
index 73c8bd09db0..c361f79fb1f 100644
--- a/tensorflow/python/kernel_tests/random/random_ops_test.py
+++ b/tensorflow/python/kernel_tests/random/random_ops_test.py
@@ -303,11 +303,11 @@ class RandomUniformTest(RandomOpTestCommon):
   @test_util.run_deprecated_v1
   def testUniformIntsWithInvalidShape(self):
     for dtype in dtypes.int32, dtypes.int64:
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "minval must be a scalar; got a tensor of shape"):
         random_ops.random_uniform(
             [1000], minval=[1, 2], maxval=3, dtype=dtype)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "maxval must be a scalar; got a tensor of shape"):
         random_ops.random_uniform(
             [1000], minval=1, maxval=[2, 3], dtype=dtype)
diff --git a/tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py b/tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py
index ba7ee16e7c2..4cb5f1935d9 100644
--- a/tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py
+++ b/tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py
@@ -653,8 +653,8 @@ class RandomShuffleQueueTest(test.TestCase):
       self.assertItemsEqual(expected, results)
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                   "is closed and has insufficient"):
+      with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                  "is closed and has insufficient"):
         self.evaluate(dequeued_t)
 
   def testBlockingDequeueFromClosedQueue(self):
@@ -680,8 +680,8 @@ class RandomShuffleQueueTest(test.TestCase):
 
         self.assertItemsEqual(elems, results)
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=blocking_dequeue)
@@ -705,8 +705,8 @@ class RandomShuffleQueueTest(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
         finished.append(True)
 
@@ -736,8 +736,8 @@ class RandomShuffleQueueTest(test.TestCase):
         self.assertItemsEqual(elems, self.evaluate(dequeued_t))
         progress.append(1)
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
         progress.append(2)
 
@@ -770,9 +770,9 @@ class RandomShuffleQueueTest(test.TestCase):
 
       def dequeue():
         results.extend(self.evaluate(dequeued_t))
-        self.assertEquals(3, len(results))
+        self.assertEqual(3, len(results))
         results.extend(self.evaluate(dequeued_t))
-        self.assertEquals(4, len(results))
+        self.assertEqual(4, len(results))
 
       dequeue_thread = self.checkedThread(target=dequeue)
       dequeue_thread.start()
@@ -801,11 +801,11 @@ class RandomShuffleQueueTest(test.TestCase):
 
       def dequeue():
         results.extend(self.evaluate(dequeued_t))
-        self.assertEquals(3, len(results))
+        self.assertEqual(3, len(results))
         # min_after_dequeue is 2, we ask for 3 elements, and we end up only
         # getting the remaining 1.
         results.extend(self.evaluate(dequeued_t))
-        self.assertEquals(4, len(results))
+        self.assertEqual(4, len(results))
 
       dequeue_thread = self.checkedThread(target=dequeue)
       dequeue_thread.start()
@@ -833,8 +833,8 @@ class RandomShuffleQueueTest(test.TestCase):
         results.extend(self.evaluate(dequeued_t))
         self.assertEqual(len(results), 3)
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
         # While the last dequeue failed, we want to insure that it returns
         # any elements that it potentially reserved to dequeue. Thus the
@@ -858,8 +858,8 @@ class RandomShuffleQueueTest(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -878,8 +878,8 @@ class RandomShuffleQueueTest(test.TestCase):
 
       def dequeue():
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
-                                     "is closed and has insufficient"):
+        with self.assertRaisesRegex(errors_impl.OutOfRangeError,
+                                    "is closed and has insufficient"):
           self.evaluate(dequeued_t)
 
       dequeue_thread = self.checkedThread(target=dequeue)
@@ -900,7 +900,7 @@ class RandomShuffleQueueTest(test.TestCase):
       close_op.run()
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
         enqueue_op.run()
 
   def testEnqueueManyToClosedQueue(self):
@@ -914,7 +914,7 @@ class RandomShuffleQueueTest(test.TestCase):
       close_op.run()
 
       # Expect the operation to fail due to the queue being closed.
-      with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
         enqueue_op.run()
 
   def testBlockingEnqueueToFullQueue(self):
@@ -996,7 +996,7 @@ class RandomShuffleQueueTest(test.TestCase):
         self.evaluate(blocking_enqueue_op)
 
         # Expect the operation to fail due to the queue being closed.
-        with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
+        with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
           self.evaluate(blocking_enqueue_op)
 
       thread1 = self.checkedThread(target=blocking_enqueue)
@@ -1069,7 +1069,7 @@ class RandomShuffleQueueTest(test.TestCase):
 
       # At this point the close operation will complete, so the next enqueue
       # will fail.
-      with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
         self.evaluate(blocking_enqueue_op)
 
   def testSharedQueueSameSession(self):
diff --git a/tensorflow/python/kernel_tests/reduce_join_op_test.py b/tensorflow/python/kernel_tests/reduce_join_op_test.py
index 751e3e3648b..a9c1278a7b1 100644
--- a/tensorflow/python/kernel_tests/reduce_join_op_test.py
+++ b/tensorflow/python/kernel_tests/reduce_join_op_test.py
@@ -303,17 +303,15 @@ class ReduceJoinTest(UnicodeTestCase):
   @test_util.run_deprecated_v1
   def testInvalidReductionIndices(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(ValueError, "Invalid reduction dim"):
+      with self.assertRaisesRegex(ValueError, "Invalid reduction dim"):
         string_ops.reduce_join(inputs="", axis=0)
-      with self.assertRaisesRegexp(ValueError,
-                                   "Invalid reduction dimension -3"):
+      with self.assertRaisesRegex(ValueError, "Invalid reduction dimension -3"):
         string_ops.reduce_join(inputs=[[""]], axis=-3)
-      with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
+      with self.assertRaisesRegex(ValueError, "Invalid reduction dimension 2"):
         string_ops.reduce_join(inputs=[[""]], axis=2)
-      with self.assertRaisesRegexp(ValueError,
-                                   "Invalid reduction dimension -3"):
+      with self.assertRaisesRegex(ValueError, "Invalid reduction dimension -3"):
         string_ops.reduce_join(inputs=[[""]], axis=[0, -3])
-      with self.assertRaisesRegexp(ValueError, "Invalid reduction dimension 2"):
+      with self.assertRaisesRegex(ValueError, "Invalid reduction dimension 2"):
         string_ops.reduce_join(inputs=[[""]], axis=[0, 2])
 
   def testZeroDims(self):
diff --git a/tensorflow/python/kernel_tests/relu_op_test.py b/tensorflow/python/kernel_tests/relu_op_test.py
index 0c599a0f5f6..a93a2046a1a 100644
--- a/tensorflow/python/kernel_tests/relu_op_test.py
+++ b/tensorflow/python/kernel_tests/relu_op_test.py
@@ -91,7 +91,7 @@ class ReluTest(test.TestCase):
       self.skipTest("No GPU available")
     inputs = constant_op.constant(
         np.array([[-50, 7, 23], [0, 1, -5], [6, -2, 11]]), dtypes.qint8)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Tensor size must be a multiple of 4 for Relu<qint8>. Got 9"):
       self.evaluate(nn_ops.relu(inputs))
@@ -99,7 +99,7 @@ class ReluTest(test.TestCase):
     inputs = constant_op.constant(
         np.array([1, -2, 3, -4, 5, -6, 7, -8, 9, -8, 7, -6, 5, -4, 3, -2, 1]),
         dtypes.qint8)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         "Tensor size must be a multiple of 4 for Relu<qint8>. Got 17"):
       self.evaluate(nn_ops.relu(inputs))
diff --git a/tensorflow/python/kernel_tests/reshape_op_test.py b/tensorflow/python/kernel_tests/reshape_op_test.py
index 264838d7ae5..0d54138e053 100644
--- a/tensorflow/python/kernel_tests/reshape_op_test.py
+++ b/tensorflow/python/kernel_tests/reshape_op_test.py
@@ -147,12 +147,12 @@ class ReshapeTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testErrors(self):
     y = constant_op.constant(0.0, shape=[23, 29, 31])
-    with self.assertRaisesRegexp(ValueError, "must be evenly divisible by 17"):
+    with self.assertRaisesRegex(ValueError, "must be evenly divisible by 17"):
       array_ops.reshape(y, [17, -1])
 
     z = constant_op.constant(0.0, shape=[32, 128])
-    with self.assertRaisesRegexp(ValueError,
-                                 "Cannot reshape a tensor with 4096 elements"):
+    with self.assertRaisesRegex(ValueError,
+                                "Cannot reshape a tensor with 4096 elements"):
       array_ops.reshape(z, [4095])
 
   @test_util.run_deprecated_v1
diff --git a/tensorflow/python/kernel_tests/resource_variable_ops_test.py b/tensorflow/python/kernel_tests/resource_variable_ops_test.py
index fb172fbcb10..953c616b0bc 100644
--- a/tensorflow/python/kernel_tests/resource_variable_ops_test.py
+++ b/tensorflow/python/kernel_tests/resource_variable_ops_test.py
@@ -109,7 +109,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
       handle = resource_variable_ops.var_handle_op(
           dtype=dtypes.int32, shape=[1], name="foo")
       resource_variable_ops.assign_variable_op(handle, 1)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "Trying to read variable with wrong dtype. "
           "Expected float got int32"):
@@ -203,7 +203,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
           dtype=dtypes.int32, shape=[1], name="foo")
       resource_variable_ops.assign_variable_op(
           handle, constant_op.constant([1]))
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError, "Trying to assign variable with wrong "
           "dtype. Expected int32 got float"):
         resource_variable_ops.assign_variable_op(
@@ -962,8 +962,8 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
       with variable_scope.variable_scope("foo"):
         var = variable_scope.get_variable("x", shape=[1, 1],
                                           dtype=dtypes.float32)
-        with self.assertRaisesRegexp(ValueError,
-                                     "Shapes.*and.*are incompatible"):
+        with self.assertRaisesRegex(ValueError,
+                                    "Shapes.*and.*are incompatible"):
           assign = var.assign(np.zeros(shape=[2, 2]))
           self.evaluate(assign)
 
@@ -1124,7 +1124,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
       v = resource_variable_ops.ResourceVariable(initial_value=zero)
       return (i + 1, v.read_value())
 
-    with self.assertRaisesRegexp(ValueError, "initializer"):
+    with self.assertRaisesRegex(ValueError, "initializer"):
       control_flow_ops.while_loop(cond, body, [0, 0])
 
   def testVariableEager(self):
@@ -1193,8 +1193,8 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
                                                    name="var8")
       var_handle = var._handle
       del var
-      with self.assertRaisesRegexp(errors.NotFoundError,
-                                   r"Resource .* does not exist."):
+      with self.assertRaisesRegex(errors.NotFoundError,
+                                  r"Resource .* does not exist."):
         resource_variable_ops.destroy_resource_op(var_handle,
                                                   ignore_lookup_error=False)
 
@@ -1280,7 +1280,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
     # The exact error and message differ between graph construction (where the
     # error is realized during shape inference at graph construction time) and
     # eager execution (where the error is realized during kernel execution).
-    with self.assertRaisesRegexp(Exception, r"shape.*2.*3"):
+    with self.assertRaisesRegex(Exception, r"shape.*2.*3"):
       state_ops.scatter_update(v, [0, 1], [0, 1, 2])
 
   @test_util.run_in_graph_and_eager_modes
@@ -1288,7 +1288,7 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
     v = resource_variable_ops.ResourceVariable([0, 1, 2, 3])
     self.evaluate(v.initializer)
     pattern = re.compile("shapes must be equal", re.IGNORECASE)
-    with self.assertRaisesRegexp(Exception, pattern):
+    with self.assertRaisesRegex(Exception, pattern):
       self.evaluate(v.assign_add(1))
 
   @test_util.run_in_graph_and_eager_modes
diff --git a/tensorflow/python/kernel_tests/reverse_sequence_op_test.py b/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
index 267decff38b..39bbc613a0e 100644
--- a/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
+++ b/tensorflow/python/kernel_tests/reverse_sequence_op_test.py
@@ -149,37 +149,37 @@ class ReverseSequenceTest(test.TestCase):
   def testInvalidArguments(self):
     # Batch size mismatched between input and seq_lengths.
     # seq_length too long
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 (r"Dimensions must be equal|"
-                                  r"Length of seq_lengths != input.dims\(0\)")):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                (r"Dimensions must be equal|"
+                                 r"Length of seq_lengths != input.dims\(0\)")):
       array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2, 2], seq_axis=1)
 
     # seq_length too short
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 (r"Dimensions must be equal|"
-                                  r"Length of seq_lengths != input.dims\(0\)")):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                (r"Dimensions must be equal|"
+                                 r"Length of seq_lengths != input.dims\(0\)")):
       array_ops.reverse_sequence([[1, 2], [3, 4]], [2], seq_axis=1)
 
     # Invalid seq_length shape
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 ("Shape must be rank 1 but is rank 2|"
-                                  "seq_lengths must be 1-dim")):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                ("Shape must be rank 1 but is rank 2|"
+                                 "seq_lengths must be 1-dim")):
       array_ops.reverse_sequence([[1, 2], [3, 4]], [[2, 2]], seq_axis=1)
 
     # seq_axis out of bounds.
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 "seq_dim must be < input rank"):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                "seq_dim must be < input rank"):
       array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2], seq_axis=2)
 
     # batch_axis out of bounds.
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 "batch_dim must be < input rank"):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                "batch_dim must be < input rank"):
       array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2],
                                  seq_axis=1,
                                  batch_axis=3)
 
-    with self.assertRaisesRegexp((errors.OpError, errors.InvalidArgumentError),
-                                 "batch_dim == seq_dim == 0"):
+    with self.assertRaisesRegex((errors.OpError, errors.InvalidArgumentError),
+                                "batch_dim == seq_dim == 0"):
       output = array_ops.reverse_sequence([[1, 2], [3, 4]], [2, 2], seq_axis=0)
       self.evaluate(output)
 
diff --git a/tensorflow/python/kernel_tests/rnn_cell_test.py b/tensorflow/python/kernel_tests/rnn_cell_test.py
index 9de14006de2..c6cf1cdf875 100644
--- a/tensorflow/python/kernel_tests/rnn_cell_test.py
+++ b/tensorflow/python/kernel_tests/rnn_cell_test.py
@@ -200,7 +200,7 @@ class RNNTest(test.TestCase):
   def testInvalidSequenceLengthShape(self):
     cell = Plus1RNNCell()
     inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
-    with self.assertRaisesRegexp(ValueError, "must be a vector"):
+    with self.assertRaisesRegex(ValueError, "must be a vector"):
       rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)
 
   @test_util.run_v1_only("b/124229375")
@@ -2796,10 +2796,9 @@ class RNNCellTest(test.TestCase, parameterized.TestCase):
             state_is_tuple=False)
         cell(x, m)  # Execute to create variables
       variables = variables_lib.global_variables()
-      self.assertEquals(variables[0].op.name, "root/lstm_cell/kernel")
-      self.assertEquals(variables[1].op.name, "root/lstm_cell/bias")
-      self.assertEquals(variables[2].op.name,
-                        "root/lstm_cell/projection/kernel")
+      self.assertEqual(variables[0].op.name, "root/lstm_cell/kernel")
+      self.assertEqual(variables[1].op.name, "root/lstm_cell/bias")
+      self.assertEqual(variables[2].op.name, "root/lstm_cell/projection/kernel")
 
   @test_util.run_in_graph_and_eager_modes
   def testWrapperCheckpointing(self):
@@ -2950,7 +2949,7 @@ class RNNCellTest(test.TestCase, parameterized.TestCase):
         m_good = (array_ops.zeros([1, 2]), array_ops.zeros([1, 2]))
 
         # Test incorrectness of state
-        with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
+        with self.assertRaisesRegex(ValueError, "Expected state .* a tuple"):
           rnn_cell_impl.MultiRNNCell(
               [rnn_cell_impl.GRUCell(2) for _ in range(2)],
               state_is_tuple=True)(x, m_bad)
diff --git a/tensorflow/python/kernel_tests/rnn_test.py b/tensorflow/python/kernel_tests/rnn_test.py
index 0a5f25d8814..27732de19d1 100644
--- a/tensorflow/python/kernel_tests/rnn_test.py
+++ b/tensorflow/python/kernel_tests/rnn_test.py
@@ -135,7 +135,7 @@ class RNNTest(test.TestCase):
       inputs = [constant_op.constant(np.ones((3, 4)))]
     else:
       inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
-    with self.assertRaisesRegexp(ValueError, "must be a vector"):
+    with self.assertRaisesRegex(ValueError, "must be a vector"):
       rnn.dynamic_rnn(
           cell,
           array_ops.stack(inputs),
@@ -157,8 +157,8 @@ class RNNTest(test.TestCase):
     ]
     for cell_cls in cells:
       with self.cached_session():
-        with self.assertRaisesRegexp(
-            ValueError, "RNN cell only supports floating"):
+        with self.assertRaisesRegex(ValueError,
+                                    "RNN cell only supports floating"):
           cell = cell_cls(2, dtype=dtypes.int32)
           rnn.dynamic_rnn(cell, inputs, dtype=dtypes.int32)
 
@@ -279,22 +279,22 @@ class RNNTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testCellGetInitialState(self):
     cell = rnn_cell_impl.BasicRNNCell(5)
-    with self.assertRaisesRegexp(
-        ValueError, "batch_size and dtype cannot be None"):
+    with self.assertRaisesRegex(ValueError,
+                                "batch_size and dtype cannot be None"):
       cell.get_initial_state(None, None, None)
 
     inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 1))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "batch size from input tensor is different from"):
       cell.get_initial_state(inputs=inputs, batch_size=50, dtype=None)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "batch size from input tensor is different from"):
       cell.get_initial_state(
           inputs=inputs, batch_size=constant_op.constant(50), dtype=None)
 
-    with self.assertRaisesRegexp(
-        ValueError, "dtype from input tensor is different from"):
+    with self.assertRaisesRegex(ValueError,
+                                "dtype from input tensor is different from"):
       cell.get_initial_state(inputs=inputs, batch_size=None, dtype=dtypes.int16)
 
     initial_state = cell.get_initial_state(
diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
index e993ae29c10..1982fd27d4d 100644
--- a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
+++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py
@@ -782,7 +782,7 @@ class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
     tf_indices = [8, 3, 0, 9]
     with self.session(use_gpu=False):
       for tf_op in ops_list:
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError, "Cannot specify a negative value for num_segments"):
           tf_op(
               data=tf_x,
diff --git a/tensorflow/python/kernel_tests/sets_test.py b/tensorflow/python/kernel_tests/sets_test.py
index b4f23229348..c0802742a73 100644
--- a/tensorflow/python/kernel_tests/sets_test.py
+++ b/tensorflow/python/kernel_tests/sets_test.py
@@ -140,7 +140,7 @@ class SetOpsTest(test_util.TensorFlowTestCase):
         constant_op.constant([3, 2, 3], dtypes.int64))
 
     if invalid_indices:
-      with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
+      with self.assertRaisesRegex(errors_impl.OpError, "out of order"):
         self._set_size(sp)
     else:
       self.assertAllEqual([
@@ -368,7 +368,7 @@ class SetOpsTest(test_util.TensorFlowTestCase):
         constant_op.constant([4, 2, 4], dtypes.int64))
 
     if invalid_indices:
-      with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
+      with self.assertRaisesRegex(errors_impl.OpError, "out of order"):
         self._set_intersection(sp_a, sp_b)
     else:
       expected_indices = [
@@ -858,9 +858,9 @@ class SetOpsTest(test_util.TensorFlowTestCase):
         constant_op.constant([4, 2, 4], dtypes.int64))
 
     if invalid_indices:
-      with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
+      with self.assertRaisesRegex(errors_impl.OpError, "out of order"):
         self._set_difference(sp_a, sp_b, False)
-      with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
+      with self.assertRaisesRegex(errors_impl.OpError, "out of order"):
         self._set_difference(sp_a, sp_b, True)
     else:
       # a-b
@@ -1154,7 +1154,7 @@ class SetOpsTest(test_util.TensorFlowTestCase):
         constant_op.constant([4, 2, 4], dtypes.int64))
 
     if invalid_indices:
-      with self.assertRaisesRegexp(errors_impl.OpError, "out of order"):
+      with self.assertRaisesRegex(errors_impl.OpError, "out of order"):
         self._set_union(sp_a, sp_b)
     else:
       expected_indices = [
diff --git a/tensorflow/python/kernel_tests/softplus_op_test.py b/tensorflow/python/kernel_tests/softplus_op_test.py
index 5273dd7ffc7..c79c1e150d3 100644
--- a/tensorflow/python/kernel_tests/softplus_op_test.py
+++ b/tensorflow/python/kernel_tests/softplus_op_test.py
@@ -128,7 +128,7 @@ class SoftplusTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testNoInts(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           TypeError,
           "'features' has DataType int32 not in list of allowed values"):
         nn_ops.softplus(constant_op.constant(42)).eval()
diff --git a/tensorflow/python/kernel_tests/softsign_op_test.py b/tensorflow/python/kernel_tests/softsign_op_test.py
index 5554240c826..28b525ded35 100644
--- a/tensorflow/python/kernel_tests/softsign_op_test.py
+++ b/tensorflow/python/kernel_tests/softsign_op_test.py
@@ -70,7 +70,7 @@ class SoftsignTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testNoInts(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           TypeError,
           "'features' has DataType int32 not in list of allowed values"):
         nn_ops.softsign(constant_op.constant(7)).eval()
diff --git a/tensorflow/python/kernel_tests/sparse_add_op_test.py b/tensorflow/python/kernel_tests/sparse_add_op_test.py
index 00eff54077c..1a43564fec2 100644
--- a/tensorflow/python/kernel_tests/sparse_add_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_add_op_test.py
@@ -207,8 +207,8 @@ class SparseAddTest(test.TestCase):
         sparse = sparse_tensor.SparseTensorValue(bad_idx, val, shape)
         s = sparse_ops.sparse_add(sparse, dense)
 
-        with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                     "invalid index"):
+        with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                    "invalid index"):
           self.evaluate(s)
 
 ######################## Benchmarking code
diff --git a/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py b/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
index 67b42d02b88..205237dd01d 100644
--- a/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
+++ b/tensorflow/python/kernel_tests/sparse_conditional_accumulator_test.py
@@ -451,7 +451,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
       q = data_flow_ops.SparseConditionalAccumulator(
           dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Input indices should be vector but received shape:"):
         q.apply_grad(
@@ -464,8 +464,8 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
       q = data_flow_ops.SparseConditionalAccumulator(
           dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Values cannot be 0-dimensional."):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Values cannot be 0-dimensional."):
         q.apply_grad(
             grad_indices=[0], grad_values=np.array(1).astype(np.float32)).run()
 
@@ -475,8 +475,8 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
       q = data_flow_ops.SparseConditionalAccumulator(
           dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([3, 3]))
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   " non-empty input values, got "):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  " non-empty input values, got "):
         q.apply_grad(
             grad_indices=[0, 1],
             grad_values=np.array([[0, 1, 1]]).astype(np.float32)).run()
@@ -492,7 +492,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
 
       accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Input indices should be vector but received shape:"):
         sess.run(accum_op,
@@ -512,8 +512,8 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
 
       accum_op = q.apply_grad(grad_indices=x_indices, grad_values=x_values)
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   " non-empty input values, got "):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  " non-empty input values, got "):
         sess.run(accum_op,
                  feed_dict={
                      x_indices: [0, 1],
@@ -526,20 +526,20 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
       q = data_flow_ops.SparseConditionalAccumulator(
           dtypes_lib.float32, name="Q", shape=tensor_shape.TensorShape([]))
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Input indices should be vector"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Input indices should be vector"):
         q.apply_grad(grad_indices=0, grad_values=[1.0], grad_shape=[]).run()
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Input indices should be vector"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Input indices should be vector"):
         q.apply_grad(grad_indices=0, grad_values=[1.0]).run()
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Values cannot be 0-dimensional."):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Values cannot be 0-dimensional."):
         q.apply_grad(grad_indices=[0], grad_values=1.0, grad_shape=[]).run()
 
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Values cannot be 0-dimensional."):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Values cannot be 0-dimensional."):
         q.apply_grad(grad_indices=[0], grad_values=1.0).run()
 
       # The right way to apply a scalar
@@ -553,7 +553,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
           dtypes_lib.float32, name="Q", shape=[2, 2, None])
 
       # Provided shape has wrong rank
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: expected shape rank at least 3, got 2"):
         q.apply_grad(
@@ -562,7 +562,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
             grad_shape=[2, 2]).run()
 
       # Provided shape has wrong dim
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: expected shape dim 1 to be 2, got 3"):
         q.apply_grad(
@@ -571,7 +571,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
             grad_shape=[2, 3, 2]).run()
 
       # Indices exceeded accumulator's shape's limits
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: index of slice 0 exceeded limits of shape;"
           " index is 3 exceeded 2"):
@@ -580,7 +580,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
             grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
 
       # Values' rank does not match shape
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: expected values rank at least 3, got 2"):
         q.apply_grad(
@@ -588,7 +588,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
             grad_values=np.array([[1, 2], [3, 4]]).astype(np.float32)).run()
 
       # Values' dim does not match shape
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: expected values dim 1 to be 2, got 3"):
         q.apply_grad(
@@ -604,7 +604,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
               [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32)).run()
 
       # Values' rank does not match accumulated gradient
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: expected values rank 4, got 3"):
         q.apply_grad(
@@ -612,7 +612,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
             grad_values=np.array([[[1, 2], [3, 4]]]).astype(np.float32)).run()
 
       # Values' dim does not match accumulated gradient
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: expected values dim 3 to be 2, got 3"):
         q.apply_grad(
@@ -633,7 +633,7 @@ class IndexedSlicesConditionalAccumulatorTest(test.TestCase):
                   np.float32),
           local_step=1).run()
 
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Shape mismatch: expected values dim 3 to be 3, got 2"):
         q.apply_grad(
diff --git a/tensorflow/python/kernel_tests/sparse_cross_op_test.py b/tensorflow/python/kernel_tests/sparse_cross_op_test.py
index b352c1a080f..48192551a18 100644
--- a/tensorflow/python/kernel_tests/sparse_cross_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_cross_op_test.py
@@ -418,10 +418,10 @@ class SparseCrossOpTest(test.TestCase):
       self.assertTrue(all_values_are_different)
 
   def _assert_sparse_tensor_empty(self, sp):
-    self.assertEquals(0, sp.indices.size)
-    self.assertEquals(0, sp.values.size)
+    self.assertEqual(0, sp.indices.size)
+    self.assertEqual(0, sp.values.size)
     # TODO(zakaria): check if we can ignore the first dim of the shape.
-    self.assertEquals(0, sp.dense_shape[1])
+    self.assertEqual(0, sp.dense_shape[1])
 
   def _assert_sparse_tensor_equals(self, sp1, sp2):
     self.assertAllEqual(sp1.indices.eval(), sp2.indices)
@@ -464,31 +464,31 @@ class SparseCrossOpTest(test.TestCase):
 
     st1 = sparse_tensor.SparseTensor([[0, 0]], [0], [2, 2])
     st1._indices = array_ops.zeros([], dtypes.int64)
-    with self.assertRaisesRegexp((errors.InvalidArgumentError, ValueError),
-                                 'Input indices should be a matrix'):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                'Input indices should be a matrix'):
       self.evaluate(sparse_ops.sparse_cross([st1]))
 
     st2 = sparse_tensor.SparseTensor([[0, 0]], [0], [2, 2])
     st2._values = array_ops.zeros([], dtypes.int64)
-    with self.assertRaisesRegexp((errors.InvalidArgumentError, ValueError),
-                                 'Input values should be a vector'):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                'Input values should be a vector'):
       self.evaluate(sparse_ops.sparse_cross([st2]))
 
     st3 = sparse_tensor.SparseTensor([[0, 0]], [0], [2, 2])
     st3._dense_shape = array_ops.zeros([], dtypes.int64)
-    with self.assertRaisesRegexp((errors.InvalidArgumentError, ValueError),
-                                 'Input shapes should be a vector'):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                'Input shapes should be a vector'):
       self.evaluate(sparse_ops.sparse_cross([st3]))
 
   def test_bad_tensor_shapes(self):
     # All inputs must be 2D.
-    with self.assertRaisesRegexp((errors.InvalidArgumentError, ValueError),
-                                 'Expected D2 of index to be 2'):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                'Expected D2 of index to be 2'):
       st = sparse_tensor.SparseTensor([[0]], [0], [10])  # 1D SparseTensor
       self.evaluate(sparse_ops.sparse_cross([st]))
 
-    with self.assertRaisesRegexp((errors.InvalidArgumentError, ValueError),
-                                 'Dense inputs should be a matrix'):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                'Dense inputs should be a matrix'):
       dt = array_ops.zeros([0])  # 1D DenseTensor.
       self.evaluate(sparse_ops.sparse_cross([dt]))
 
@@ -496,11 +496,11 @@ class SparseCrossOpTest(test.TestCase):
     st1 = sparse_tensor.SparseTensor([[0, 0]], [0], [10, 10])  # batch size 10
     st2 = sparse_tensor.SparseTensor([[0, 0]], [0], [7, 10])  # batch size 7
     dt = array_ops.zeros([5, 0])  # batch size 5
-    with self.assertRaisesRegexp((errors.InvalidArgumentError, ValueError),
-                                 'Expected batch size'):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                'Expected batch size'):
       self.evaluate(sparse_ops.sparse_cross([st1, dt]))
-    with self.assertRaisesRegexp((errors.InvalidArgumentError, ValueError),
-                                 'Expected batch size'):
+    with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
+                                'Expected batch size'):
       self.evaluate(sparse_ops.sparse_cross([st1, st2]))
 
 
diff --git a/tensorflow/python/kernel_tests/sparse_ops_test.py b/tensorflow/python/kernel_tests/sparse_ops_test.py
index e4cc2046c64..5268a2be537 100644
--- a/tensorflow/python/kernel_tests/sparse_ops_test.py
+++ b/tensorflow/python/kernel_tests/sparse_ops_test.py
@@ -446,7 +446,7 @@ class SparseResetShapeTest(test_util.TensorFlowTestCase):
     sp_input = self._SparseTensor_2x5x6()
     new_shape = np.array([3, 7, 5], dtype=np.int64)
 
-    with self.assertRaisesRegexp(ValueError, "should have dimension sizes"):
+    with self.assertRaisesRegex(ValueError, "should have dimension sizes"):
       sparse_ops.sparse_reset_shape(sp_input, new_shape)
 
   @test_util.run_deprecated_v1
@@ -792,7 +792,7 @@ class SparseMathOpsTest(test_util.TensorFlowTestCase):
       b = sparse_tensor.SparseTensor([[0, 0, 1, 0], [0, 0, 3, 0]], [10, 20],
                                      [1, 1, 4, 2])
       c = a * b
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InvalidArgumentError,
           "broadcasts dense to sparse only; got incompatible shapes"):
         self.evaluate(c)
diff --git a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py
index 6ec51bb9735..946774e7275 100644
--- a/tensorflow/python/kernel_tests/sparse_reshape_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_reshape_op_test.py
@@ -71,14 +71,14 @@ class SparseReshapeTest(test.TestCase):
   def testRaisesIfMoreThanOneInferredDim(self):
     sp_input = sparse_tensor.SparseTensor.from_value(
         self._SparseTensorValue_2x3x4())
-    with self.assertRaisesRegexp(ValueError, "At most one dimension can"):
+    with self.assertRaisesRegex(ValueError, "At most one dimension can"):
       sparse_ops.sparse_reshape(sp_input, shape=(-1, 2, -1))
 
   @test_util.run_deprecated_v1
   def testRaisesIfInferredShapeNotPossible(self):
     sp_input = sparse_tensor.SparseTensor.from_value(
         self._SparseTensorValue_2x3x4())
-    with self.assertRaisesRegexp(ValueError, "Cannot reshape"):
+    with self.assertRaisesRegex(ValueError, "Cannot reshape"):
       sparse_ops.sparse_reshape(sp_input, shape=(-1, 7))
 
   @test_util.run_deprecated_v1
@@ -249,7 +249,7 @@ class SparseReshapeTest(test.TestCase):
   def testProvideStaticallyMismatchedSizes(self):
     input_val = self._SparseTensorValue_5x6()
     sp_input = sparse_tensor.SparseTensor.from_value(input_val)
-    with self.assertRaisesRegexp(ValueError, "Cannot reshape"):
+    with self.assertRaisesRegex(ValueError, "Cannot reshape"):
       sparse_ops.sparse_reshape(sp_input, [4, 7])
 
   @test_util.run_deprecated_v1
diff --git a/tensorflow/python/kernel_tests/sparse_split_op_test.py b/tensorflow/python/kernel_tests/sparse_split_op_test.py
index f4bb7498b02..bdd4b8e7634 100644
--- a/tensorflow/python/kernel_tests/sparse_split_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_split_op_test.py
@@ -254,13 +254,13 @@ class SparseSplitOpTest(test.TestCase):
                             expected_output.indices.eval())
 
   def testArgumentErrors(self):
-    with self.assertRaisesRegexp(ValueError, 'Keyword arguments are required'):
+    with self.assertRaisesRegex(ValueError, 'Keyword arguments are required'):
       sparse_ops.sparse_split(3, 2, 1)
-    with self.assertRaisesRegexp(ValueError, 'sp_input is required'):
+    with self.assertRaisesRegex(ValueError, 'sp_input is required'):
       sparse_ops.sparse_split()
-    with self.assertRaisesRegexp(ValueError, 'num_split is required'):
+    with self.assertRaisesRegex(ValueError, 'num_split is required'):
       sparse_ops.sparse_split(sp_input=1)
-    with self.assertRaisesRegexp(ValueError, 'axis is required'):
+    with self.assertRaisesRegex(ValueError, 'axis is required'):
       sparse_ops.sparse_split(num_split=2, sp_input=1)
 
 
diff --git a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
index c8dc99c8ec0..79f1c488f35 100644
--- a/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
@@ -131,7 +131,7 @@ class SparseTensorDenseMatMulTest(test.TestCase):
     x_shape_inconsistent = [10, 15]
     x_st_shape_inconsistent = sparse_tensor.SparseTensor(x_indices, x_values,
                                                          x_shape_inconsistent)
-    with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
+    with self.assertRaisesRegex(ValueError, "Dimensions must be equal"):
       sparse_ops.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
 
   @test_util.deprecated_graph_mode_only
diff --git a/tensorflow/python/kernel_tests/sparse_xent_op_test.py b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
index 9c65f75054f..cf1337b493d 100644
--- a/tensorflow/python/kernel_tests/sparse_xent_op_test.py
+++ b/tensorflow/python/kernel_tests/sparse_xent_op_test.py
@@ -143,13 +143,13 @@ class SparseXentTest(test.TestCase):
 
   def testShapeMismatch(self):
     with self.session(use_gpu=True):
-      with self.assertRaisesRegexp(ValueError, ".*Rank mismatch:*"):
+      with self.assertRaisesRegex(ValueError, ".*Rank mismatch:*"):
         nn_ops.sparse_softmax_cross_entropy_with_logits(
             labels=[[0, 2]], logits=[[0., 1.], [2., 3.], [2., 3.]])
 
   def testScalar(self):
     with self.session(use_gpu=True):
-      with self.assertRaisesRegexp(ValueError, ".*Logits cannot be scalars*"):
+      with self.assertRaisesRegex(ValueError, ".*Logits cannot be scalars*"):
         nn_ops.sparse_softmax_cross_entropy_with_logits(
             labels=constant_op.constant(0), logits=constant_op.constant(1.0))
 
@@ -267,8 +267,8 @@ class SparseXentTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testScalarHandling(self):
     with self.session(use_gpu=False) as sess:
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   ".*labels must be 1-D.*"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  ".*labels must be 1-D.*"):
         labels = array_ops.placeholder(dtypes.int32, shape=[None, 1])
         logits = array_ops.placeholder(dtypes.float32, shape=[None, 3])
         ce = nn_ops.sparse_softmax_cross_entropy_with_logits(
diff --git a/tensorflow/python/kernel_tests/split_op_test.py b/tensorflow/python/kernel_tests/split_op_test.py
index 14c5b53de92..ef66d8dda0b 100644
--- a/tensorflow/python/kernel_tests/split_op_test.py
+++ b/tensorflow/python/kernel_tests/split_op_test.py
@@ -342,7 +342,7 @@ class SplitOpTest(test.TestCase):
       array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
 
     # num_split does not evenly divide the size in split_dim.
-    with self.assertRaisesRegexp(ValueError, "should evenly divide"):
+    with self.assertRaisesRegex(ValueError, "should evenly divide"):
       array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
 
     # Unknown split_dim.
@@ -378,14 +378,14 @@ class SplitOpTest(test.TestCase):
     x = array_ops.placeholder(dtypes.int32)
     values = np.zeros([5, 30])
     splits = array_ops.placeholder(dtypes.int32)
-    with self.assertRaisesRegexp(ValueError, "Cannot infer"):
+    with self.assertRaisesRegex(ValueError, "Cannot infer"):
       y = array_ops.split(values, splits, axis=x)
 
     splits = array_ops.placeholder(dtypes.int32, [3])
     y = array_ops.split(values, splits, axis=x)
     with self.session(use_gpu=True) as sess:
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "must have exactly one element"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "must have exactly one element"):
         sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
 
 
diff --git a/tensorflow/python/kernel_tests/stack_op_test.py b/tensorflow/python/kernel_tests/stack_op_test.py
index aebbeefcc8d..00117187c81 100644
--- a/tensorflow/python/kernel_tests/stack_op_test.py
+++ b/tensorflow/python/kernel_tests/stack_op_test.py
@@ -248,12 +248,12 @@ class StackOpTest(test.TestCase):
 
   def testDimOutOfRange(self):
     t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
-    with self.assertRaisesRegexp(ValueError, r"axis = 2 not in \[-2, 2\)"):
+    with self.assertRaisesRegex(ValueError, r"axis = 2 not in \[-2, 2\)"):
       array_ops.stack(t, axis=2)
 
   def testDimOutOfNegativeRange(self):
     t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
-    with self.assertRaisesRegexp(ValueError, r"axis = -3 not in \[-2, 2\)"):
+    with self.assertRaisesRegex(ValueError, r"axis = -3 not in \[-2, 2\)"):
       array_ops.stack(t, axis=-3)
 
   def testComplex(self):
diff --git a/tensorflow/python/kernel_tests/string_bytes_split_op_test.py b/tensorflow/python/kernel_tests/string_bytes_split_op_test.py
index 8a4f5edc519..058ab1f9ecd 100644
--- a/tensorflow/python/kernel_tests/string_bytes_split_op_test.py
+++ b/tensorflow/python/kernel_tests/string_bytes_split_op_test.py
@@ -72,8 +72,8 @@ class StringsToBytesOpTest(test_util.TensorFlowTestCase,
     def f(v):
       return ragged_string_ops.string_bytes_split(v)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'input must have a statically-known rank'):
+    with self.assertRaisesRegex(ValueError,
+                                'input must have a statically-known rank'):
       f(['foo'])
 
 
diff --git a/tensorflow/python/kernel_tests/string_format_op_test.py b/tensorflow/python/kernel_tests/string_format_op_test.py
index 74a5072bab9..52379cc2c8d 100644
--- a/tensorflow/python/kernel_tests/string_format_op_test.py
+++ b/tensorflow/python/kernel_tests/string_format_op_test.py
@@ -358,23 +358,23 @@ class StringFormatOpTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes()
   def testTensorCountMustMatchPlaceholderCount(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, r"2 placeholder\(s\) in template does not match 1 "
-                      r"tensor\(s\) provided as input"):
+          r"tensor\(s\) provided as input"):
         tensor = math_ops.range(10)
         format_output = string_ops.string_format("{} {}", tensor)
         self.evaluate(format_output)
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, r"2 placeholder\(s\) in template does not match 1 "
-                      r"tensor\(s\) provided as input"):
+          r"tensor\(s\) provided as input"):
         tensor = math_ops.range(10)
         format_output = string_ops.string_format("{} {}", [tensor])
         self.evaluate(format_output)
     with self.cached_session():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, r"1 placeholder\(s\) in template does not match 2 "
-                      r"tensor\(s\) provided as input"):
+          r"tensor\(s\) provided as input"):
         tensor = math_ops.range(10)
         format_output = string_ops.string_format("{}", (tensor, tensor))
         self.evaluate(format_output)
diff --git a/tensorflow/python/kernel_tests/string_length_op_test.py b/tensorflow/python/kernel_tests/string_length_op_test.py
index bfa6ac2454a..42a5cb63ccf 100644
--- a/tensorflow/python/kernel_tests/string_length_op_test.py
+++ b/tensorflow/python/kernel_tests/string_length_op_test.py
@@ -48,7 +48,7 @@ class StringLengthOpTest(test.TestCase):
           self.evaluate(utf8_byte_lengths), expected_utf8_byte_lengths)
       self.assertAllEqual(
           self.evaluate(utf8_char_lengths), expected_utf8_char_lengths)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Attr 'unit' of 'StringLength' Op passed string 'XYZ' "
           'not in: "BYTE", "UTF8_CHAR"'):
         string_ops.string_length(utf8_strings, unit="XYZ")
diff --git a/tensorflow/python/kernel_tests/string_split_op_test.py b/tensorflow/python/kernel_tests/string_split_op_test.py
index 8e9517b2f1f..eef80ca0767 100755
--- a/tensorflow/python/kernel_tests/string_split_op_test.py
+++ b/tensorflow/python/kernel_tests/string_split_op_test.py
@@ -217,7 +217,7 @@ class StringSplitOpTest(test.TestCase, parameterized.TestCase):
                                    expected=None,
                                    error=None):
     if error is not None:
-      with self.assertRaisesRegexp(ValueError, error):
+      with self.assertRaisesRegex(ValueError, error):
         ragged_string_ops.string_split(source, sep, skip_empty, delimiter,
                                        result_type)
     if expected is not None:
@@ -447,7 +447,7 @@ class StringSplitV2OpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
                      self.evaluate(actual_sparse_v1.dense_shape).tolist())
 
   def testSplitV1BadResultType(self):
-    with self.assertRaisesRegexp(ValueError, "result_type must be .*"):
+    with self.assertRaisesRegex(ValueError, "result_type must be .*"):
       ragged_string_ops.strings_split_v1("foo", result_type="BouncyTensor")
 
   def _py_split(self, strings, **kwargs):
diff --git a/tensorflow/python/kernel_tests/summary_ops_test.py b/tensorflow/python/kernel_tests/summary_ops_test.py
index 387083ceff4..cefbf48b9c9 100644
--- a/tensorflow/python/kernel_tests/summary_ops_test.py
+++ b/tensorflow/python/kernel_tests/summary_ops_test.py
@@ -1068,7 +1068,7 @@ class SummaryOpsTest(test_util.TensorFlowTestCase):
 
     with test.mock.patch.object(logging, 'warn') as mock_log:
       f()
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_log.call_args), 'Cannot enable trace inside a tf.function.')
 
   @test_util.run_v2_only
@@ -1076,7 +1076,7 @@ class SummaryOpsTest(test_util.TensorFlowTestCase):
     with test.mock.patch.object(logging, 'warn') as mock_log:
       with context.graph_mode():
         summary_ops.trace_on(graph=True, profiler=False)
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_log.call_args), 'Must enable trace in eager mode.')
 
   @test_util.run_v2_only
@@ -1098,16 +1098,15 @@ class SummaryOpsTest(test_util.TensorFlowTestCase):
 
     with test.mock.patch.object(logging, 'warn') as mock_log:
       f()
-      self.assertRegexpMatches(
-          str(mock_log.call_args),
-          'Cannot export trace inside a tf.function.')
+      self.assertRegex(
+          str(mock_log.call_args), 'Cannot export trace inside a tf.function.')
 
   @test_util.run_v2_only
   def testTrace_cannotExportTraceInGraphMode(self):
     with test.mock.patch.object(logging, 'warn') as mock_log:
       with context.graph_mode():
         summary_ops.trace_export(name='foo', step=1)
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_log.call_args),
           'Can only export trace while executing eagerly.')
 
diff --git a/tensorflow/python/kernel_tests/svd_op_test.py b/tensorflow/python/kernel_tests/svd_op_test.py
index cad131dda74..c8180df2d07 100644
--- a/tensorflow/python/kernel_tests/svd_op_test.py
+++ b/tensorflow/python/kernel_tests/svd_op_test.py
@@ -52,12 +52,12 @@ class SvdOpTest(test.TestCase):
   def testWrongDimensions(self):
     # The input to svd should be a tensor of at least rank 2.
     scalar = constant_op.constant(1.)
-    with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
-                                 "rank.* 2.*0"):
+    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
+                                "rank.* 2.*0"):
       linalg_ops.svd(scalar)
     vector = constant_op.constant([1., 2.])
-    with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
-                                 "rank.* 2.*1"):
+    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
+                                "rank.* 2.*1"):
       linalg_ops.svd(vector)
 
   @test_util.run_in_graph_and_eager_modes(use_gpu=True)
diff --git a/tensorflow/python/kernel_tests/template_test.py b/tensorflow/python/kernel_tests/template_test.py
index b9e9fa027b2..37982e047d7 100644
--- a/tensorflow/python/kernel_tests/template_test.py
+++ b/tensorflow/python/kernel_tests/template_test.py
@@ -195,13 +195,13 @@ class TemplateTest(test.TestCase):
     tmpl1()
     tmpl2 = template.make_template(
         "_", variable_scoped_function, unique_name_="s1")
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Variable s1/dummy already exists, disallowed.*"):
       tmpl2()
 
   def test_unique_name_raise_error_in_eager(self):
     with context.eager_mode():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "unique_name_ cannot be used when eager execution is enabled."):
         template.make_template(
@@ -258,8 +258,7 @@ class TemplateTest(test.TestCase):
 
   @test_util.run_in_graph_and_eager_modes
   def test_template_without_name(self):
-    with self.assertRaisesRegexp(
-        ValueError, "name cannot be None."):
+    with self.assertRaisesRegex(ValueError, "name cannot be None."):
       template.make_template(None, variable_scoped_function)
 
   @test_util.run_in_graph_and_eager_modes
@@ -591,31 +590,36 @@ class TemplateTest(test.TestCase):
     linear1 = make_linear_module(output_size=2, name="foo")
     outputs_a, w1 = linear1(inputs)
     outputs_b, _ = linear1(inputs)
-    self.assertEquals("foo", linear1.variable_scope.name)
-    self.assertEquals("foo/w:0", w1.name)
+    self.assertEqual("foo", linear1.variable_scope.name)
+    self.assertEqual("foo/w:0", w1.name)
     if not context.executing_eagerly():
-      self.assertEquals("foo/add:0", outputs_a.name,
-                        "First application of template should get "
-                        "same name scope as variables.")
-      self.assertEquals("foo_1/add:0", outputs_b.name,
-                        "Second application of template should get "
-                        "a freshly uniquified name scope.")
+      self.assertEqual(
+          "foo/add:0", outputs_a.name,
+          "First application of template should get "
+          "same name scope as variables.")
+      self.assertEqual(
+          "foo_1/add:0", outputs_b.name,
+          "Second application of template should get "
+          "a freshly uniquified name scope.")
 
     linear2 = make_linear_module(output_size=2, name="foo")
     outputs_c, w2 = linear2(inputs)
     outputs_d, _ = linear2(inputs)
-    self.assertEquals("foo_1", linear2.variable_scope.name,
-                      "New template gets a freshly uniquified variable scope "
-                      "because 'foo' is already taken.")
-    self.assertEquals("foo_1/w:0", w2.name)
+    self.assertEqual(
+        "foo_1", linear2.variable_scope.name,
+        "New template gets a freshly uniquified variable scope "
+        "because 'foo' is already taken.")
+    self.assertEqual("foo_1/w:0", w2.name)
     if not context.executing_eagerly():
-      self.assertEquals("foo_1_1/add:0", outputs_c.name,
-                        "First application of template would get "
-                        "same name scope as variables, but 'foo_1' is already "
-                        "a name scope.")
-      self.assertEquals("foo_1_2/add:0", outputs_d.name,
-                        "Second application of template should also get "
-                        "a freshly uniquified name scope.")
+      self.assertEqual(
+          "foo_1_1/add:0", outputs_c.name,
+          "First application of template would get "
+          "same name scope as variables, but 'foo_1' is already "
+          "a name scope.")
+      self.assertEqual(
+          "foo_1_2/add:0", outputs_d.name,
+          "Second application of template should also get "
+          "a freshly uniquified name scope.")
 
   @test_util.run_in_graph_and_eager_modes
   def test_global_variables(self):
diff --git a/tensorflow/python/kernel_tests/tensor_array_ops_test.py b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
index 5d587954858..27c56decca3 100644
--- a/tensorflow/python/kernel_tests/tensor_array_ops_test.py
+++ b/tensorflow/python/kernel_tests/tensor_array_ops_test.py
@@ -454,8 +454,8 @@ class TensorArrayTest(test.TestCase):
           "|"
           "Invalid data types; op elements string but list elements float"
           ")")
-      with self.assertRaisesRegexp(
-          (TypeError, errors.InvalidArgumentError), error_msg_regex):
+      with self.assertRaisesRegex((TypeError, errors.InvalidArgumentError),
+                                  error_msg_regex):
         self.evaluate(ta.write(0, "wrong_type_scalar").flow)
 
       if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
@@ -552,7 +552,7 @@ class TensorArrayTest(test.TestCase):
       error_msg = ("Incompatible ranks"
                    if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
                    not context.executing_eagerly() else "shape")
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, error_msg):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, error_msg):
         self.evaluate(w3.concat())
 
   def testTensorArraySplitIncompatibleShapesFails(self):
@@ -577,7 +577,7 @@ class TensorArrayTest(test.TestCase):
 
       ta = _make_ta(1, "baz")
       if control_flow_util.ENABLE_CONTROL_FLOW_V2 and not in_eager_mode:
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError, "Shape must be at least rank 1 but is rank 0"):
           self.evaluate(ta.split(1.0, [1]).flow)
       else:
@@ -657,8 +657,8 @@ class TensorArrayTest(test.TestCase):
       # Make sure shape inference worked.
       self.assertAllEqual([None, None, 2, 3], read_value.shape.as_list())
       # Writing with wrong shape should not work.
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "Could not write to TensorArray"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "Could not write to TensorArray"):
         fed_value = np.random.random([2, 3])
         sess.run(read_value, feed_dict={value: fed_value})
       # Writing with correct shape should work.
diff --git a/tensorflow/python/kernel_tests/tensordot_op_test.py b/tensorflow/python/kernel_tests/tensordot_op_test.py
index 7f8c5e9781b..a031f9bca07 100644
--- a/tensorflow/python/kernel_tests/tensordot_op_test.py
+++ b/tensorflow/python/kernel_tests/tensordot_op_test.py
@@ -55,8 +55,8 @@ class TensordotTest(test_lib.TestCase):
     if context.executing_eagerly():
       return
     with self.cached_session() as sess:
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Matrix size-incompatible"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Matrix size-incompatible"):
         a_ph = array_ops.placeholder(dtypes.float32)
         b_ph = array_ops.placeholder(dtypes.float32)
         axes_ph = array_ops.placeholder(dtypes.int32)
diff --git a/tensorflow/python/kernel_tests/topk_op_test.py b/tensorflow/python/kernel_tests/topk_op_test.py
index 7872e62050a..eb74d96786b 100644
--- a/tensorflow/python/kernel_tests/topk_op_test.py
+++ b/tensorflow/python/kernel_tests/topk_op_test.py
@@ -203,8 +203,8 @@ class TopKTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testKTooLarge(self):
     inputs = [[0.1, 0.2], [0.3, 0.4]]
-    with self.assertRaisesRegexp(ValueError,
-                                 r"must have last dimension >= k = 4"):
+    with self.assertRaisesRegex(ValueError,
+                                r"must have last dimension >= k = 4"):
       nn_ops.top_k(inputs, 4)
 
   @test_util.run_deprecated_v1
diff --git a/tensorflow/python/kernel_tests/unicode_decode_op_test.py b/tensorflow/python/kernel_tests/unicode_decode_op_test.py
index 6bd9b15af0f..bd38dae393c 100644
--- a/tensorflow/python/kernel_tests/unicode_decode_op_test.py
+++ b/tensorflow/python/kernel_tests/unicode_decode_op_test.py
@@ -386,7 +386,7 @@ class UnicodeDecodeTest(test_util.TensorFlowTestCase,
            exception=(ValueError, errors.InvalidArgumentError)),
   ])  # pyformat: disable
   def testExceptions(self, exception=None, message=None, **args):
-    with self.assertRaisesRegexp(exception, message):
+    with self.assertRaisesRegex(exception, message):
       self.evaluate(ragged_string_ops.unicode_decode(**args))
 
   def testUnknownRankError(self):
@@ -394,7 +394,7 @@ class UnicodeDecodeTest(test_util.TensorFlowTestCase,
       return
     s = array_ops.placeholder(dtypes.string)
     message = "Rank of `input` must be statically known."
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
 
   @parameterized.parameters([
@@ -710,7 +710,7 @@ class UnicodeSplitTest(test_util.TensorFlowTestCase,
            exception=(ValueError, errors.InvalidArgumentError)),
   ])  # pyformat: disable
   def testExceptions(self, exception=None, message=None, **args):
-    with self.assertRaisesRegexp(exception, message):
+    with self.assertRaisesRegex(exception, message):
       self.evaluate(ragged_string_ops.unicode_split(**args))
 
   def testUnknownRankError(self):
@@ -718,7 +718,7 @@ class UnicodeSplitTest(test_util.TensorFlowTestCase,
       return
     s = array_ops.placeholder(dtypes.string)
     message = "Rank of `input` must be statically known."
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
 
 
diff --git a/tensorflow/python/kernel_tests/unicode_encode_op_test.py b/tensorflow/python/kernel_tests/unicode_encode_op_test.py
index 2f483b7fb68..4f7f175c75b 100644
--- a/tensorflow/python/kernel_tests/unicode_encode_op_test.py
+++ b/tensorflow/python/kernel_tests/unicode_encode_op_test.py
@@ -293,7 +293,7 @@ class UnicodeEncodeOpTest(test.TestCase, parameterized.TestCase):
     def f(v):
       return ragged_string_ops.unicode_encode(v, "UTF-8")
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Rank of input_tensor must be statically known."):
       f([72, 101, 108, 108, 111])
 
diff --git a/tensorflow/python/kernel_tests/unicode_transcode_op_test.py b/tensorflow/python/kernel_tests/unicode_transcode_op_test.py
index a3b4fd03474..5188645b140 100644
--- a/tensorflow/python/kernel_tests/unicode_transcode_op_test.py
+++ b/tensorflow/python/kernel_tests/unicode_transcode_op_test.py
@@ -343,7 +343,7 @@ class UnicodeTranscodeOpTest(test.TestCase, parameterized.TestCase):
           "Could not create converter for input encoding: invalid"):
         self.evaluate(outputs)
 
-    with self.assertRaisesRegexp(ValueError, "Op passed string 'invalid'"):
+    with self.assertRaisesRegex(ValueError, "Op passed string 'invalid'"):
       with self.cached_session() as sess:
         outputs = string_ops.unicode_transcode(
             strings,
@@ -358,7 +358,7 @@ class UnicodeTranscodeOpTest(test.TestCase, parameterized.TestCase):
   def test_invalid_error_policy_causes_errors(self):
     strings = [[b"a", b"abc"], [b"ABC", b"DEF"]]
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "'invalid' not in: \"strict\", \"replace\", \"ignore\"."):
       with self.cached_session() as sess:
         outputs = string_ops.unicode_transcode(
diff --git a/tensorflow/python/kernel_tests/unstack_op_test.py b/tensorflow/python/kernel_tests/unstack_op_test.py
index 13611b278bc..65217dde255 100644
--- a/tensorflow/python/kernel_tests/unstack_op_test.py
+++ b/tensorflow/python/kernel_tests/unstack_op_test.py
@@ -147,8 +147,8 @@ class UnstackOpTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testCannotInferNumFromUnknownShape(self):
     x = array_ops.placeholder(np.float32)
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Cannot infer num from shape <unknown>'):
+    with self.assertRaisesRegex(ValueError,
+                                r'Cannot infer num from shape <unknown>'):
       array_ops.unstack(x)
 
   @test_util.run_deprecated_v1
@@ -159,8 +159,8 @@ class UnstackOpTest(test.TestCase):
   @test_util.run_deprecated_v1
   def testCannotInferNumFromNoneShape(self):
     x = array_ops.placeholder(np.float32, shape=(None,))
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Cannot infer num from shape \((\?|None),\)'):
+    with self.assertRaisesRegex(ValueError,
+                                r'Cannot infer num from shape \((\?|None),\)'):
       array_ops.unstack(x)
 
   def testAgainstNumpy(self):
@@ -186,12 +186,12 @@ class UnstackOpTest(test.TestCase):
 
   def testAxisOutOfRange(self):
     a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
-    with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
+    with self.assertRaisesRegex(ValueError, r'axis = 2 not in \[-2, 2\)'):
       array_ops.unstack(a, axis=2)
 
   def testAxisOutOfNegativeRange(self):
     a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
-    with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
+    with self.assertRaisesRegex(ValueError, r'axis = -3 not in \[-2, 2\)'):
       array_ops.unstack(a, axis=-3)
 
   def testZeroLengthDim(self):
diff --git a/tensorflow/python/kernel_tests/variable_scope_test.py b/tensorflow/python/kernel_tests/variable_scope_test.py
index dc534f7cfec..3a88a787acc 100644
--- a/tensorflow/python/kernel_tests/variable_scope_test.py
+++ b/tensorflow/python/kernel_tests/variable_scope_test.py
@@ -415,7 +415,7 @@ class VariableScopeTest(test.TestCase):
     self.evaluate(variables_lib.variables_initializer([w]))
     self.assertAllClose(self.evaluate(w.value()), 0.1)
 
-    with self.assertRaisesRegexp(ValueError, "shape"):
+    with self.assertRaisesRegex(ValueError, "shape"):
       # We disallow explicit shape specification when initializer is constant.
       variable_scope.get_variable("u", [1], initializer=init)
 
@@ -431,7 +431,7 @@ class VariableScopeTest(test.TestCase):
     self.assertEqual(t.dtype.base_dtype, dtypes.int32)
 
     # Raise error if `initializer` dtype and `dtype` are not identical.
-    with self.assertRaisesRegexp(ValueError, "don't match"):
+    with self.assertRaisesRegex(ValueError, "don't match"):
       variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
 
   # TODO(mihaimaruseac): Not converted to use wrap_function because of
@@ -449,16 +449,16 @@ class VariableScopeTest(test.TestCase):
             "v1", [1], initializer=init_ops.constant_initializer(1))
         add = v1 + v0
       # v0 should be uninitialized.
-      with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
+      with self.assertRaisesRegex(errors.OpError, "uninitialized"):
         self.evaluate(v0)
       # We should be able to initialize and run v1 without initializing
       # v0, even if the variable was created with a control dep on v0.
       self.evaluate(v1.initializer)
       self.assertEqual(1, self.evaluate(v1))
       # v0 should still be uninitialized.
-      with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
+      with self.assertRaisesRegex(errors.OpError, "uninitialized"):
         self.evaluate(v0)
-      with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
+      with self.assertRaisesRegex(errors.OpError, "uninitialized"):
         self.evaluate(add)
       # If we initialize v0 we should be able to run 'add'.
       self.evaluate(v0.initializer)
@@ -512,10 +512,10 @@ class VariableScopeTest(test.TestCase):
       self.evaluate(v2.initializer)
       self.assertEqual([2], self.evaluate(v2))
       # v0 should still be uninitialized.
-      with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
+      with self.assertRaisesRegex(errors.OpError, "uninitialized"):
         self.evaluate(v0)
       # We should not be able to run 'add' yet.
-      with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
+      with self.assertRaisesRegex(errors.OpError, "uninitialized"):
         self.evaluate(add)
       # If we initialize v0 we should be able to run 'add'.
       self.evaluate(v0.initializer)
@@ -1061,19 +1061,19 @@ class VariableScopeTest(test.TestCase):
   @run_inside_wrap_function_in_eager_mode
   def testAuxiliaryNameScopeIsInvalid(self):
     with self.cached_session():
-      with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
+      with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
         with variable_scope.variable_scope(
             None, default_name="scope", auxiliary_name_scope="invalid"):
           pass
 
-      with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
+      with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
         with variable_scope.variable_scope(
             "scope", auxiliary_name_scope="invalid"):
           pass
 
       with variable_scope.variable_scope("scope") as scope:
         pass
-      with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
+      with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
         with variable_scope.variable_scope(
             scope, auxiliary_name_scope="invalid"):
           pass
@@ -1350,7 +1350,7 @@ class VariableScopeTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   @run_inside_wrap_function_in_eager_mode
   def testGetVariableWithInitializerWhichTakesUnprovidedArgsAndNoShape(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "The initializer passed is not valid. It should be a callable with no "
         "arguments and the shape should not be provided or an instance of "
@@ -1369,7 +1369,7 @@ class VariableScopeTest(test.TestCase):
           with variable_scope.variable_scope("_"):
             pass
 
-    self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
+    self.assertRaisesRegex(ValueError, "'_' is not a valid scope name", f)
 
 
 def axis0_into1_partitioner(shape=None, **unused_kwargs):
@@ -1415,7 +1415,7 @@ class VariableScopeWithPartitioningTest(test.TestCase):
 
     with variable_scope.variable_scope(
         "scope0", partitioner=axis0_into3_partitioner, reuse=True):
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "Trying to reuse partitioned variable .* but specified partitions "
           ".* and found partitions .*"):
@@ -1423,7 +1423,7 @@ class VariableScopeWithPartitioningTest(test.TestCase):
 
     with variable_scope.variable_scope(
         "scope0", partitioner=axis0_into1_partitioner, reuse=True):
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "Trying to reuse partitioned variable .* but specified partitions "
           ".* and found partitions .*"):
@@ -1523,12 +1523,10 @@ class VariableScopeWithCustomGetterTest(test.TestCase):
   @test_util.run_in_graph_and_eager_modes
   @run_inside_wrap_function_in_eager_mode
   def testNonCallableGetterFails(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 r"custom_getter .* not callable:"):
+    with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
       with variable_scope.variable_scope("scope0", custom_getter=3):
         variable_scope.get_variable("name0")
-    with self.assertRaisesRegexp(ValueError,
-                                 r"custom_getter .* not callable:"):
+    with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
       variable_scope.get_variable("name0", custom_getter=3)
 
   @test_util.run_in_graph_and_eager_modes
@@ -1811,7 +1809,7 @@ class VariableScopeMultithreadedTest(test.TestCase):
         with variable_scope.variable_scope("foo"):
           if i == 0:
             v = variable_scope.get_variable("v", [])
-            self.assertEquals("foo/v:0", v.name)
+            self.assertEqual("foo/v:0", v.name)
           else:
             # Any thread after the first one should fail to create variable
             # with the same name.
@@ -1841,7 +1839,7 @@ class VariableScopeMultithreadedTest(test.TestCase):
         with variable_scope.variable_scope("foo"):
           if i == 0:
             v = variable_scope.get_variable("v", [])
-            self.assertEquals("foo/v:0", v.name)
+            self.assertEqual("foo/v:0", v.name)
           else:
             # Any thread after the first one should fail to create variable
             # with the same name.
@@ -1881,12 +1879,12 @@ class VariableScopeMultithreadedTest(test.TestCase):
         with variable_scope.variable_scope(main_thread_scope):
           with variable_scope.variable_scope("foo"):
             v = variable_scope.get_variable("v", [])
-            self.assertEquals("main/foo/v:0", v.name)
+            self.assertEqual("main/foo/v:0", v.name)
 
         # Variable created outside main scope will not have prefix "main".
         with variable_scope.variable_scope("bar"):
           v = variable_scope.get_variable("v", [])
-          self.assertEquals("bar/v:0", v.name)
+          self.assertEqual("bar/v:0", v.name)
 
     graph = ops.get_default_graph()
     with variable_scope.variable_scope("main") as main_thread_scope:
diff --git a/tensorflow/python/kernel_tests/variables_test.py b/tensorflow/python/kernel_tests/variables_test.py
index 19bdd9429e5..790749b1fa4 100644
--- a/tensorflow/python/kernel_tests/variables_test.py
+++ b/tensorflow/python/kernel_tests/variables_test.py
@@ -122,10 +122,10 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
       self.assertIs(initial_value, cyclic)
 
   def testIterable(self):
-    with self.assertRaisesRegexp(TypeError, "not iterable"):
+    with self.assertRaisesRegex(TypeError, "not iterable"):
       for _ in variables.Variable(0.0):
         pass
-    with self.assertRaisesRegexp(TypeError, "not iterable"):
+    with self.assertRaisesRegex(TypeError, "not iterable"):
       for _ in variables.Variable([0.0, 1.0]):
         pass
 
@@ -170,8 +170,7 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
   def testAssignDifferentShapesEagerNotAllowed(self):
     with context.eager_mode():
       var = variables.Variable(np.zeros(shape=[1, 1]))
-      with self.assertRaisesRegexp(ValueError,
-                                   "Shapes.*and.*are incompatible"):
+      with self.assertRaisesRegex(ValueError, "Shapes.*and.*are incompatible"):
         var.assign(np.zeros(shape=[2, 2]))
 
   @test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@@ -274,10 +273,10 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
       self.evaluate(v2.initializer)
       self.assertEqual([2], self.evaluate(v2))
       # v0 should still be uninitialized.
-      with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
+      with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
         self.evaluate(v0)
       # We should not be able to run 'add' yet.
-      with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
+      with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
         self.evaluate(add)
       # If we initialize v0 we should be able to run 'add'.
       self.evaluate(v0.initializer)
@@ -294,7 +293,7 @@ class VariablesTestCase(test.TestCase, parameterized.TestCase):
       v = variables.Variable(initial_value=zero)
       return (i + 1, v.read_value())
 
-    with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
+    with self.assertRaisesRegex(ValueError, "inside a control-flow"):
       control_flow_ops.while_loop(cond, body, [0, 0])
 
   @test_util.run_deprecated_v1
@@ -745,7 +744,7 @@ class PartitionedVariableTest(test.TestCase):
 
   def testPartitionedVariableFailures(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(ValueError, "empty"):
+      with self.assertRaisesRegex(ValueError, "empty"):
         variables.PartitionedVariable(
             name="fail",
             shape=2,
@@ -753,7 +752,7 @@ class PartitionedVariableTest(test.TestCase):
             variable_list=[],
             partitions=[])
 
-      with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"):
+      with self.assertRaisesRegex(ValueError, "must have a save_slice_info"):
         v0 = variables.Variable([0])
         partitions = [1]
         variables.PartitionedVariable(
@@ -763,7 +762,7 @@ class PartitionedVariableTest(test.TestCase):
             variable_list=[v0],
             partitions=partitions)
 
-      with self.assertRaisesRegexp(ValueError, "full shapes must match"):
+      with self.assertRaisesRegex(ValueError, "full shapes must match"):
         v0 = variables.Variable([0])
         v1 = variables.Variable([1])
         v0._set_save_slice_info(
@@ -779,7 +778,7 @@ class PartitionedVariableTest(test.TestCase):
             variable_list=[v1, v0],
             partitions=partitions)
 
-      with self.assertRaisesRegexp(ValueError, "must be positive"):
+      with self.assertRaisesRegex(ValueError, "must be positive"):
         v0 = variables.Variable([0])
         v0._set_save_slice_info(
             variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
diff --git a/tensorflow/python/kernel_tests/weights_broadcast_test.py b/tensorflow/python/kernel_tests/weights_broadcast_test.py
index 677d8f2f22f..b9855fa2475 100644
--- a/tensorflow/python/kernel_tests/weights_broadcast_test.py
+++ b/tensorflow/python/kernel_tests/weights_broadcast_test.py
@@ -103,14 +103,14 @@ class AssertBroadcastableTest(test.TestCase):
 
   def _test_invalid(self, weights, values):
     error_msg = 'weights can not be broadcast to values'
-    with self.assertRaisesRegexp(ValueError, error_msg):
+    with self.assertRaisesRegex(ValueError, error_msg):
       weights_broadcast_ops.assert_broadcastable(weights=weights, values=values)
     weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
     values_placeholder = array_ops.placeholder(dtypes_lib.float32)
     dynamic_op = weights_broadcast_ops.assert_broadcastable(
         weights=weights_placeholder, values=values_placeholder)
     with self.cached_session():
-      with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
+      with self.assertRaisesRegex(errors_impl.OpError, error_msg):
         dynamic_op.run(feed_dict={
             weights_placeholder: weights,
             values_placeholder: values,
@@ -245,14 +245,14 @@ class BroadcastWeightsTest(test.TestCase):
 
   def _test_invalid(self, weights, values):
     error_msg = 'weights can not be broadcast to values'
-    with self.assertRaisesRegexp(ValueError, error_msg):
+    with self.assertRaisesRegex(ValueError, error_msg):
       weights_broadcast_ops.broadcast_weights(weights=weights, values=values)
     weights_placeholder = array_ops.placeholder(dtypes_lib.float32)
     values_placeholder = array_ops.placeholder(dtypes_lib.float32)
     dynamic_op = weights_broadcast_ops.broadcast_weights(
         weights=weights_placeholder, values=values_placeholder)
     with self.cached_session():
-      with self.assertRaisesRegexp(errors_impl.OpError, error_msg):
+      with self.assertRaisesRegex(errors_impl.OpError, error_msg):
         dynamic_op.eval(feed_dict={
             weights_placeholder: weights,
             values_placeholder: values,
diff --git a/tensorflow/python/kernel_tests/while_v2_test.py b/tensorflow/python/kernel_tests/while_v2_test.py
index 95bbea156f2..1012a8b7690 100644
--- a/tensorflow/python/kernel_tests/while_v2_test.py
+++ b/tensorflow/python/kernel_tests/while_v2_test.py
@@ -146,7 +146,7 @@ class WhileV2Test(test.TestCase, parameterized.TestCase):
 
       while_loop_v2(lambda x: x < 10, Body, [x])
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError,
         r"Loop var Const:0 enters the loop with type <dtype: 'float32'> "
         r"but has type <dtype: 'float16'> after 1 iteration."):
@@ -908,25 +908,21 @@ class WhileV2Test(test.TestCase, parameterized.TestCase):
     with ops.Graph().as_default():
       while_op = self._createWhile(None)
       self.assertEqual(while_op.name, "while")
-      self.assertRegexpMatches(
-          while_op.get_attr("cond").name, r"while_cond_\d*")
-      self.assertRegexpMatches(
-          while_op.get_attr("body").name, r"while_body_\d*")
+      self.assertRegex(while_op.get_attr("cond").name, r"while_cond_\d*")
+      self.assertRegex(while_op.get_attr("body").name, r"while_body_\d*")
 
     with ops.Graph().as_default():
       with ops.name_scope("foo"):
         while1_op = self._createWhile("")
         self.assertEqual(while1_op.name, "foo/while")
-        self.assertRegexpMatches(
-            while1_op.get_attr("cond").name, r"foo_while_cond_\d*")
-        self.assertRegexpMatches(
-            while1_op.get_attr("body").name, r"foo_while_body_\d*")
+        self.assertRegex(while1_op.get_attr("cond").name, r"foo_while_cond_\d*")
+        self.assertRegex(while1_op.get_attr("body").name, r"foo_while_body_\d*")
 
         while2_op = self._createWhile(None)
         self.assertEqual(while2_op.name, "foo/while_1")
-        self.assertRegexpMatches(
+        self.assertRegex(
             while2_op.get_attr("cond").name, r"foo_while_1_cond_\d*")
-        self.assertRegexpMatches(
+        self.assertRegex(
             while2_op.get_attr("body").name, r"foo_while_1_body_\d*")
 
   @test_util.enable_control_flow_v2
diff --git a/tensorflow/python/kernel_tests/xent_op_test.py b/tensorflow/python/kernel_tests/xent_op_test.py
index 54e0aa21ff3..6e60a935e93 100644
--- a/tensorflow/python/kernel_tests/xent_op_test.py
+++ b/tensorflow/python/kernel_tests/xent_op_test.py
@@ -117,9 +117,9 @@ class XentTest(test.TestCase):
                                                     4.]]]).astype(dtype)
       np_labels = np.array([[[0., 0., 0., 1.]], [[0., .5, .5,
                                                   0.]]]).astype(dtype)
-      self.assertRaisesRegexp(ValueError, "rank 2, but is rank 3",
-                              gen_nn_ops.softmax_cross_entropy_with_logits,
-                              np_features, np_labels)
+      self.assertRaisesRegex(ValueError, "rank 2, but is rank 3",
+                             gen_nn_ops.softmax_cross_entropy_with_logits,
+                             np_features, np_labels)
 
   def testNpXent(self):
     # We create 2 batches of logits for testing.
diff --git a/tensorflow/python/lib/io/tf_record_test.py b/tensorflow/python/lib/io/tf_record_test.py
index dfdbb663a9c..c6d8fca3bfa 100644
--- a/tensorflow/python/lib/io/tf_record_test.py
+++ b/tensorflow/python/lib/io/tf_record_test.py
@@ -491,7 +491,7 @@ class TFRecordRandomReaderTest(TFCompressionTestCase):
       self.assertEqual(record, records[i])
       offsets.append(offset)
     # Reading off the bound should lead to error.
-    with self.assertRaisesRegexp(IndexError, r"Out of range.*offset"):
+    with self.assertRaisesRegex(IndexError, r"Out of range.*offset"):
       reader.read(offset)
     # Do a pass of backward reading.
     for i in range(self._num_records - 1, 0, -1):
@@ -503,8 +503,7 @@ class TFRecordRandomReaderTest(TFCompressionTestCase):
     records = [self._Record(0, i) for i in range(self._num_records)]
     fn = self._WriteRecordsToFile(records, "uncompressed_records")
     reader = tf_record.tf_record_random_reader(fn)
-    with self.assertRaisesRegexp(
-        errors_impl.DataLossError, r"corrupted record"):
+    with self.assertRaisesRegex(errors_impl.DataLossError, r"corrupted record"):
       reader.read(1)  # 1 is guaranteed to be an invalid offset.
 
   def testClosingRandomReaderCausesErrorsForFurtherReading(self):
@@ -512,8 +511,7 @@ class TFRecordRandomReaderTest(TFCompressionTestCase):
     fn = self._WriteRecordsToFile(records, "uncompressed_records")
     reader = tf_record.tf_record_random_reader(fn)
     reader.close()
-    with self.assertRaisesRegexp(
-        errors_impl.FailedPreconditionError, r"closed"):
+    with self.assertRaisesRegex(errors_impl.FailedPreconditionError, r"closed"):
       reader.read(0)
 
 
diff --git a/tensorflow/python/module/module_test.py b/tensorflow/python/module/module_test.py
index 0578823c7fb..e15bc734230 100644
--- a/tensorflow/python/module/module_test.py
+++ b/tensorflow/python/module/module_test.py
@@ -109,7 +109,7 @@ class TestModuleNaming(test_util.TensorFlowTestCase):
 
   def test_invalid_name(self):
     msg = ".* is not a valid module name"
-    with self.assertRaisesRegexp(ValueError, msg):
+    with self.assertRaisesRegex(ValueError, msg):
       module.Module(name="$Foo")
 
   @test_util.run_in_graph_and_eager_modes
@@ -303,7 +303,7 @@ class AbcTest(test_util.TensorFlowTestCase):
 
   def testAbstract(self):
     msg = "Can't instantiate .* abstract methods"
-    with self.assertRaisesRegexp(TypeError, msg):
+    with self.assertRaisesRegex(TypeError, msg):
       AbstractModule()  # pylint: disable=abstract-class-instantiated
 
   def testConcrete(self):
diff --git a/tensorflow/python/ops/batch_ops_test.py b/tensorflow/python/ops/batch_ops_test.py
index f63f39d27d8..5749be96033 100644
--- a/tensorflow/python/ops/batch_ops_test.py
+++ b/tensorflow/python/ops/batch_ops_test.py
@@ -345,8 +345,8 @@ class BatchOpsTest(test.TestCase):
           captured_tensors=computation.captured_inputs,
           Tout=[o.type for o in computation.definition.signature.output_arg])
 
-      with self.assertRaisesRegexp(InvalidArgumentError,
-                                   ".*2 arguments.*but 1.*"):
+      with self.assertRaisesRegex(InvalidArgumentError,
+                                  ".*2 arguments.*but 1.*"):
         sess.run([result], feed_dict={inp: [2]})
 
   def testBatchFunctionOpWithLargeBatchSplitted(self):
diff --git a/tensorflow/python/ops/bincount_ops_test.py b/tensorflow/python/ops/bincount_ops_test.py
index 74fd17cae2b..baf0018fb32 100644
--- a/tensorflow/python/ops/bincount_ops_test.py
+++ b/tensorflow/python/ops/bincount_ops_test.py
@@ -748,13 +748,13 @@ class TestSparseCountFailureModes(test.TestCase):
     x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
     weights = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
-    with self.assertRaisesRegexp(ValueError, "must be a tf.Tensor"):
+    with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_dense_input_ragged_weights_fails(self):
     x = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
     weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
-    with self.assertRaisesRegexp(ValueError, "must be a tf.Tensor"):
+    with self.assertRaisesRegex(ValueError, "must be a tf.Tensor"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_dense_input_wrong_shape_fails(self):
@@ -764,25 +764,25 @@ class TestSparseCountFailureModes(test.TestCase):
     # will fail with a ValueError from the shape checking logic, while Eager
     # will fail with an InvalidArgumentError from the kernel itself.
     if context.executing_eagerly():
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "must have the same shape"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "must have the same shape"):
         self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
     else:
-      with self.assertRaisesRegexp(ValueError, "both shapes must be equal"):
+      with self.assertRaisesRegex(ValueError, "both shapes must be equal"):
         self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_sparse_input_dense_weights_fails(self):
     x = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
     weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
-    with self.assertRaisesRegexp(ValueError, "must be a SparseTensor"):
+    with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_sparse_input_ragged_weights_fails(self):
     x = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
     weights = ragged_factory_ops.constant([[6, 0.5, 2], [14], [10, 0.25, 5, 3]])
-    with self.assertRaisesRegexp(ValueError, "must be a SparseTensor"):
+    with self.assertRaisesRegex(ValueError, "must be a SparseTensor"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_sparse_input_wrong_indices_fails(self):
@@ -790,8 +790,8 @@ class TestSparseCountFailureModes(test.TestCase):
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
     weights = sparse_ops.from_dense(
         np.array([[3, 1, 0, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "must have the same indices"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "must have the same indices"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_sparse_input_too_many_indices_fails(self):
@@ -799,8 +799,8 @@ class TestSparseCountFailureModes(test.TestCase):
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
     weights = sparse_ops.from_dense(
         np.array([[3, 1, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "Incompatible shapes"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "Incompatible shapes"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_sparse_input_wrong_shape_fails(self):
@@ -809,28 +809,28 @@ class TestSparseCountFailureModes(test.TestCase):
     weights = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4], [0, 0, 0, 0]],
                  dtype=np.int32))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "must have the same dense shape"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "must have the same dense shape"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_ragged_input_dense_weights_fails(self):
     x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
     weights = np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32)
-    with self.assertRaisesRegexp(ValueError, "must be a RaggedTensor"):
+    with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_ragged_input_sparse_weights_fails(self):
     x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
     weights = sparse_ops.from_dense(
         np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]], dtype=np.int32))
-    with self.assertRaisesRegexp(ValueError, "must be a RaggedTensor"):
+    with self.assertRaisesRegex(ValueError, "must be a RaggedTensor"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
   def test_ragged_input_different_shape_fails(self):
     x = ragged_factory_ops.constant([[6, 1, 2], [14], [10, 1, 5, 3]])
     weights = ragged_factory_ops.constant([[6, 0.5, 2], [], [10, 0.25, 5, 3]])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 "must have the same row splits"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "must have the same row splits"):
       self.evaluate(bincount_ops.sparse_bincount(x, weights=weights, axis=-1))
 
 
diff --git a/tensorflow/python/ops/clustering_ops_test.py b/tensorflow/python/ops/clustering_ops_test.py
index 5804c660e67..2a3d102ba87 100644
--- a/tensorflow/python/ops/clustering_ops_test.py
+++ b/tensorflow/python/ops/clustering_ops_test.py
@@ -92,7 +92,7 @@ class KMC2InitializationLargeTest(test.TestCase):
         sample = self.evaluate(
             clustering_ops.kmc2_chain_initialization(self._distances, seed + i))
         counts[sample] = counts.get(sample, 0) + 1
-      self.assertEquals(len(counts), 2)
+      self.assertEqual(len(counts), 2)
       self.assertTrue(500 in counts)
       self.assertTrue(1000 in counts)
       self.assertGreaterEqual(counts[500], 5)
diff --git a/tensorflow/python/ops/collective_ops_gpu_test.py b/tensorflow/python/ops/collective_ops_gpu_test.py
index 872fb49834c..efa97bd9555 100644
--- a/tensorflow/python/ops/collective_ops_gpu_test.py
+++ b/tensorflow/python/ops/collective_ops_gpu_test.py
@@ -100,7 +100,7 @@ class CollectiveOpGPUTest(test.TestCase):
           t = constant_op.constant(inputs[i], dtype=dtypes.int32)
           collectives.append(collective_ops.all_reduce(
               t, self._group_size, group_key, instance_key, 'Add', 'Div'))
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors.InternalError,
           'does not support datatype DT_INT32 on DEVICE_GPU'):
         sess.run(collectives)
@@ -190,7 +190,7 @@ class CollectiveOpGPUTest(test.TestCase):
           t = constant_op.constant(tensor_value)
           collectives.append(collective_ops.broadcast_recv(
               t.shape, t.dtype, self._group_size, group_key, instance_key))
-      with self.assertRaisesRegexp(errors.InternalError, 'found no source'):
+      with self.assertRaisesRegex(errors.InternalError, 'found no source'):
         sess.run(collectives)
 
   def testNcclBroadcastDoubleSend(self):
@@ -209,7 +209,7 @@ class CollectiveOpGPUTest(test.TestCase):
           t = constant_op.constant(tensor_value)
           collectives.append(collective_ops.broadcast_send(
               t, t.shape, t.dtype, self._group_size, group_key, instance_key))
-      with self.assertRaisesRegexp(errors.InternalError, 'already has source'):
+      with self.assertRaisesRegex(errors.InternalError, 'already has source'):
         sess.run(collectives)
 
   def testBasicNcclAllGather(self):
@@ -255,8 +255,8 @@ class CollectiveOpGPUTest(test.TestCase):
                                        instance_key, 'Add', 'Id')
       run_options = config_pb2.RunOptions()
       run_options.experimental.collective_graph_key = 100
-      with self.assertRaisesRegexp(errors.InternalError,
-                                   'but that group has type'):
+      with self.assertRaisesRegex(errors.InternalError,
+                                  'but that group has type'):
         sess.run([c0, c1], options=run_options)
 
   @test_util.run_v2_only
diff --git a/tensorflow/python/ops/collective_ops_test.py b/tensorflow/python/ops/collective_ops_test.py
index 8e3a95d7dbf..300863ec03a 100644
--- a/tensorflow/python/ops/collective_ops_test.py
+++ b/tensorflow/python/ops/collective_ops_test.py
@@ -426,8 +426,8 @@ class CollectiveOpTest(test.TestCase):
         run_options = config_pb2.RunOptions()
         run_options.experimental.collective_graph_key = 1
         sess.run([c0, c1], options=run_options)
-        with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                     'Shape mismatch'):
+        with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                    'Shape mismatch'):
           sess.run([c0, c2], options=run_options)
 
   def testCollectiveGatherShapeMismatchAcrossDevices(self):
@@ -447,8 +447,8 @@ class CollectiveOpTest(test.TestCase):
           c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
         run_options = config_pb2.RunOptions()
         run_options.experimental.collective_graph_key = 1
-        with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                     'Shape mismatch'):
+        with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                    'Shape mismatch'):
           sess.run([c0, c1], options=run_options)
 
   def testCollectiveGatherPolymorphicShape(self):
@@ -510,8 +510,8 @@ class CollectiveOpTest(test.TestCase):
             merge_op='Add', final_op='Id')
       return c0, c1
 
-    with self.assertRaisesRegexp(errors.InternalError,
-                                 'but that group has size'):
+    with self.assertRaisesRegex(errors.InternalError,
+                                'but that group has size'):
       run_all_reduce()
 
   @test_util.run_v2_only
diff --git a/tensorflow/python/ops/control_flow_ops_test.py b/tensorflow/python/ops/control_flow_ops_test.py
index d1d0f65e07c..aa95d22c119 100644
--- a/tensorflow/python/ops/control_flow_ops_test.py
+++ b/tensorflow/python/ops/control_flow_ops_test.py
@@ -151,10 +151,10 @@ class ShapeTestCase(test_util.TensorFlowTestCase):
 
   def testShape(self):
     tensor = constant_op.constant([1.0, 2.0])
-    self.assertEquals([2], tensor.get_shape())
-    self.assertEquals([2],
-                      control_flow_ops.with_dependencies(
-                          [constant_op.constant(1.0)], tensor).get_shape())
+    self.assertEqual([2], tensor.get_shape())
+    self.assertEqual([2],
+                     control_flow_ops.with_dependencies(
+                         [constant_op.constant(1.0)], tensor).get_shape())
 
 
 class WithDependenciesTestCase(test_util.TensorFlowTestCase):
@@ -169,9 +169,9 @@ class WithDependenciesTestCase(test_util.TensorFlowTestCase):
         constant_op.constant(7))
 
     self.evaluate(variables.global_variables_initializer())
-    self.assertEquals(0, self.evaluate(counter))
-    self.assertEquals(7, self.evaluate(const_with_dep))
-    self.assertEquals(1, self.evaluate(counter))
+    self.assertEqual(0, self.evaluate(counter))
+    self.assertEqual(7, self.evaluate(const_with_dep))
+    self.assertEqual(1, self.evaluate(counter))
 
   @test_util.run_deprecated_v1
   def testListDependencies(self):
@@ -183,9 +183,9 @@ class WithDependenciesTestCase(test_util.TensorFlowTestCase):
         constant_op.constant(7))
 
     self.evaluate(variables.global_variables_initializer())
-    self.assertEquals(0, self.evaluate(counter))
-    self.assertEquals(7, self.evaluate(const_with_dep))
-    self.assertEquals(1, self.evaluate(counter))
+    self.assertEqual(0, self.evaluate(counter))
+    self.assertEqual(7, self.evaluate(const_with_dep))
+    self.assertEqual(1, self.evaluate(counter))
 
 
 class SwitchTestCase(test_util.TensorFlowTestCase):
@@ -316,7 +316,7 @@ class SwitchTestCase(test_util.TensorFlowTestCase):
         grad_wr_inputs = ops.convert_to_tensor(r)
         o, grad = sess.run([outputs, grad_wr_inputs],
                            feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]})
-        self.assertEquals(o, 20)
+        self.assertEqual(o, 20)
         self.assertAllEqual(grad, [1] * num_steps)
 
   @test_util.run_v1_only("b/120545219")
@@ -344,7 +344,7 @@ class SwitchTestCase(test_util.TensorFlowTestCase):
         grad_wr_inputs = ops.convert_to_tensor(r)
         o, grad = sess.run([outputs, grad_wr_inputs],
                            feed_dict={inputs: [1, 3, 2]})
-        self.assertEquals(o, 6)
+        self.assertEqual(o, 6)
         self.assertAllEqual(grad, [1] * 3)
 
   @test_util.run_deprecated_v1
@@ -354,8 +354,8 @@ class SwitchTestCase(test_util.TensorFlowTestCase):
     x_false, x_true = control_flow_ops.switch(x, s)
     grad_x_true = gradients_impl.gradients(x_true, x)[0]
     grad_x_false = gradients_impl.gradients(x_false, x)[0]
-    self.assertEquals(self.evaluate(grad_x_true), 1.)
-    self.assertEquals(self.evaluate(grad_x_false), 0.)
+    self.assertEqual(self.evaluate(grad_x_true), 1.)
+    self.assertEqual(self.evaluate(grad_x_false), 0.)
 
 
 class CondTest(test_util.TensorFlowTestCase):
@@ -367,7 +367,7 @@ class CondTest(test_util.TensorFlowTestCase):
         math_ops.less(
             x,
             y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23))
-    self.assertEquals(self.evaluate(z), 34)
+    self.assertEqual(self.evaluate(z), 34)
 
   def testCondFalse(self):
     x = constant_op.constant(2)
@@ -376,7 +376,7 @@ class CondTest(test_util.TensorFlowTestCase):
         math_ops.less(
             x,
             y), lambda: math_ops.multiply(x, 17), lambda: math_ops.add(y, 23))
-    self.assertEquals(self.evaluate(z), 24)
+    self.assertEqual(self.evaluate(z), 24)
 
   def testCondTrueLegacy(self):
     x = constant_op.constant(2)
@@ -385,7 +385,7 @@ class CondTest(test_util.TensorFlowTestCase):
         math_ops.less(x, y),
         fn1=lambda: math_ops.multiply(x, 17),
         fn2=lambda: math_ops.add(y, 23))
-    self.assertEquals(self.evaluate(z), 34)
+    self.assertEqual(self.evaluate(z), 34)
 
   def testCondFalseLegacy(self):
     x = constant_op.constant(2)
@@ -394,7 +394,7 @@ class CondTest(test_util.TensorFlowTestCase):
         math_ops.less(x, y),
         fn1=lambda: math_ops.multiply(x, 17),
         fn2=lambda: math_ops.add(y, 23))
-    self.assertEquals(self.evaluate(z), 24)
+    self.assertEqual(self.evaluate(z), 24)
 
   @test_util.run_v1_only("Exercises Ref variables")
   def testCondModifyBoolPred(self):
@@ -408,8 +408,8 @@ class CondTest(test_util.TensorFlowTestCase):
           true_fn=lambda: state_ops.assign(bool_var, False),
           false_fn=lambda: True)
       self.evaluate(bool_var.initializer)
-      self.assertEquals(self.evaluate(cond_on_bool_var), False)
-      self.assertEquals(self.evaluate(cond_on_bool_var), True)
+      self.assertEqual(self.evaluate(cond_on_bool_var), False)
+      self.assertEqual(self.evaluate(cond_on_bool_var), True)
 
   def testCondMissingArg1(self):
     x = constant_op.constant(1)
@@ -533,10 +533,9 @@ class ContextTest(test_util.TensorFlowTestCase):
           values_def=c._to_values_def(), import_scope="test_scope")
 
       # _values and _external_values should be have scope prepended.
-      self.assertEquals(
-          c_with_scope._values, set(["test_scope/a", "test_scope/b"]))
-      self.assertEquals(
-          c_with_scope._external_values, {"test_scope/a": b2})
+      self.assertEqual(c_with_scope._values,
+                       set(["test_scope/a", "test_scope/b"]))
+      self.assertEqual(c_with_scope._external_values, {"test_scope/a": b2})
 
       # Calling _to_proto() with export_scope should remove "test_scope".
       self.assertProtoEquals(
@@ -1191,7 +1190,7 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi))
 
     branches = {i: make_func(i) for i in range(0, 6, 2)}
-    with self.assertRaisesRegexp(ValueError, "must form contiguous"):
+    with self.assertRaisesRegex(ValueError, "must form contiguous"):
       control_flow_ops.switch_case(array_ops.constant(0), branches)
 
   def testCase_validateIndicesDup(self):
@@ -1201,7 +1200,7 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
     branches = [(i, make_func(i)) for i in range(0, 6, 2)]
     branches.append((0, make_func(7)))
-    with self.assertRaisesRegexp(ValueError, "must form contiguous"):
+    with self.assertRaisesRegex(ValueError, "must form contiguous"):
       control_flow_ops.switch_case(array_ops.constant(0), branches)
 
   def testCase_validateBranchIndex(self):
@@ -1210,7 +1209,7 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi))
 
     branches = {i: make_func(i) for i in range(5)}
-    with self.assertRaisesRegexp(TypeError, "branch_index.*Tensor"):
+    with self.assertRaisesRegex(TypeError, "branch_index.*Tensor"):
       control_flow_ops.switch_case(1, branches)
 
   def testCase_validateNonIntKeys(self):
@@ -1219,7 +1218,7 @@ class IndexedCaseTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       return lambda: array_ops.constant(bi * 10., name="br{}_out".format(bi))
 
     branches = [(array_ops.constant(i), make_func(i)) for i in range(5)]
-    with self.assertRaisesRegexp(TypeError, "must be a Python `int`"):
+    with self.assertRaisesRegex(TypeError, "must be a Python `int`"):
       control_flow_ops.switch_case(array_ops.constant(1), branches)
 
 
@@ -1359,7 +1358,7 @@ class CaseTest(test_util.TensorFlowTestCase):
     with self.cached_session() as sess:
       self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
       self.assertEqual(sess.run(output, feed_dict={x: 3}), 8)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, "Input error:"):
         sess.run(output, feed_dict={x: 2})
 
   @test_util.run_deprecated_v1
@@ -1386,7 +1385,7 @@ class CaseTest(test_util.TensorFlowTestCase):
       self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
       self.assertEqual(sess.run(output, feed_dict={x: 2}), 4)
       self.assertEqual(sess.run(output, feed_dict={x: 3}), 6)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, "Input error:"):
         sess.run(output, feed_dict={x: 4})
 
   @test_util.run_deprecated_v1
@@ -1396,7 +1395,7 @@ class CaseTest(test_util.TensorFlowTestCase):
     output = control_flow_ops.case(conditions, exclusive=True)
     with self.cached_session() as sess:
       self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, "Input error:"):
         sess.run(output, feed_dict={x: 4})
 
   @test_util.run_in_graph_and_eager_modes
diff --git a/tensorflow/python/ops/gradient_checker_test.py b/tensorflow/python/ops/gradient_checker_test.py
index c8ebf12569a..7ecad0a2a8e 100644
--- a/tensorflow/python/ops/gradient_checker_test.py
+++ b/tensorflow/python/ops/gradient_checker_test.py
@@ -182,9 +182,9 @@ class GradientCheckerTest(test.TestCase):
         with g.gradient_override_map({"Identity": "BadGrad"}):
           y = array_ops.identity(x)
         bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
-        with self.assertRaisesRegexp(ValueError, bad):
+        with self.assertRaisesRegex(ValueError, bad):
           gradient_checker.compute_gradient(x, (0, 3), y, (0, 3))
-        with self.assertRaisesRegexp(ValueError, bad):
+        with self.assertRaisesRegex(ValueError, bad):
           gradient_checker.compute_gradient_error(x, (0, 3), y, (0, 3))
 
   def testNaNGradFails(self):
@@ -196,7 +196,7 @@ class GradientCheckerTest(test.TestCase):
           error = gradient_checker.compute_gradient_error(x, (), y, ())
           # Typical test would assert error < max_err, so assert this test would
           # raise AssertionError, since NaN is not < 1.0.
-          with self.assertRaisesRegexp(AssertionError, "False is not true"):
+          with self.assertRaisesRegex(AssertionError, "False is not true"):
             self.assertTrue(error < 1.0)
 
 
diff --git a/tensorflow/python/ops/gradient_checker_v2_test.py b/tensorflow/python/ops/gradient_checker_v2_test.py
index d59228d78d1..91d29702079 100644
--- a/tensorflow/python/ops/gradient_checker_v2_test.py
+++ b/tensorflow/python/ops/gradient_checker_v2_test.py
@@ -234,7 +234,7 @@ class GradientCheckerTest(test.TestCase):
     x = constant_op.constant(
         np.random.random_sample((0, 3)), dtype=dtypes.float32)
     bad = r"Empty gradient has wrong shape: expected \(0, 3\), got \(3, 0\)"
-    with self.assertRaisesRegexp(ValueError, bad):
+    with self.assertRaisesRegex(ValueError, bad):
       gradient_checker.compute_gradient(f, [x])
 
   def testNaNGradFails(self):
@@ -259,7 +259,7 @@ class GradientCheckerTest(test.TestCase):
         *gradient_checker.compute_gradient(f, [x]))
     # Typical test would assert error < max_err, so assert this test would
     # raise AssertionError, since NaN is not < 1.0.
-    with self.assertRaisesRegexp(AssertionError, "nan not less than 1.0"):
+    with self.assertRaisesRegex(AssertionError, "nan not less than 1.0"):
       self.assertLess(error, 1.0)
 
   def testGradGrad(self):
diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py
index 78fbcdd6e6f..760463bcd65 100644
--- a/tensorflow/python/ops/gradients_test.py
+++ b/tensorflow/python/ops/gradients_test.py
@@ -73,8 +73,8 @@ class GradientsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       xw = math_ops.matmul(inp, w, name="xw")
       h = bias_add(xw, b, name="h")
       w_grad = gradients.gradients(h, w)[0]
-    self.assertEquals("MatMul", w_grad.op.type)
-    self.assertEquals(w_grad.op._original_op, xw.op)
+    self.assertEqual("MatMul", w_grad.op.type)
+    self.assertEqual(w_grad.op._original_op, xw.op)
     self.assertTrue(w_grad.op.get_attr("transpose_a"))
     self.assertFalse(w_grad.op.get_attr("transpose_b"))
 
@@ -86,7 +86,7 @@ class GradientsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
       split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
       c = math_ops.reduce_sum(split_wx[1])
       gw = gradients.gradients(c, [w])[0]
-    self.assertEquals("MatMul", gw.op.type)
+    self.assertEqual("MatMul", gw.op.type)
 
   def testColocateGradients(self):
     with ops.Graph().as_default() as g:
@@ -218,7 +218,7 @@ class GradientsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
       def _TestOpGrad(_, float_grad, string_grad):
         """Gradient function for TestStringOutput."""
-        self.assertEquals(float_grad.dtype, dtypes.float32)
+        self.assertEqual(float_grad.dtype, dtypes.float32)
         self.assertFalse(string_grad)
         return float_grad
 
@@ -427,7 +427,7 @@ class GradientsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     with ops.Graph().as_default():
       x = constant(1.0)
       y = constant(1.0)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
         gradients.gradients([y], [x], unconnected_gradients="nonsense")
 
@@ -573,7 +573,7 @@ class FunctionGradientsTest(test_util.TensorFlowTestCase):
       grad_func = framework_function.Defun(dtypes.float32, dtypes.float32,
                                            dtypes.float32)(
                                                self.XSquarePlusBGradient)
-      with self.assertRaisesRegexp(ValueError, "Gradient defined twice"):
+      with self.assertRaisesRegex(ValueError, "Gradient defined twice"):
         f = self._GetFunc(
             grad_func=grad_func, python_grad_func=self._PythonGradient)
         f.add_to_graph(ops.Graph())
@@ -704,7 +704,7 @@ class PreventGradientTest(test_util.TensorFlowTestCase):
     with ops.Graph().as_default():
       inp = constant(1.0, shape=[100, 32], name="in")
       out = array_ops.prevent_gradient(inp)
-      with self.assertRaisesRegexp(LookupError, "explicitly disabled"):
+      with self.assertRaisesRegex(LookupError, "explicitly disabled"):
         _ = gradients.gradients(out, inp)
 
 
@@ -920,9 +920,8 @@ class OnlyRealGradientsTest(test_util.TensorFlowTestCase):
   def testRealOnly(self):
     x = constant_op.constant(7+3j, dtype=dtypes.complex64)
     y = math_ops.square(x)
-    with self.assertRaisesRegexp(
-        TypeError,
-        r"Gradients of complex tensors must set grad_ys "
+    with self.assertRaisesRegex(
+        TypeError, r"Gradients of complex tensors must set grad_ys "
         r"\(y\.dtype = tf\.complex64\)"):
       gradients.gradients(y, x)
 
diff --git a/tensorflow/python/ops/histogram_ops_test.py b/tensorflow/python/ops/histogram_ops_test.py
index b48ef67196b..94217d931d8 100644
--- a/tensorflow/python/ops/histogram_ops_test.py
+++ b/tensorflow/python/ops/histogram_ops_test.py
@@ -88,20 +88,19 @@ class HistogramFixedWidthTest(test.TestCase):
   @test_util.run_deprecated_v1
   def test_with_invalid_value_range(self):
     values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
-    with self.assertRaisesRegexp(
-        ValueError, "Shape must be rank 1 but is rank 0"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 1 but is rank 0"):
       histogram_ops.histogram_fixed_width(values, 1.0)
-    with self.assertRaisesRegexp(ValueError, "Dimension must be 2 but is 3"):
+    with self.assertRaisesRegex(ValueError, "Dimension must be 2 but is 3"):
       histogram_ops.histogram_fixed_width(values, [1.0, 2.0, 3.0])
 
   @test_util.run_deprecated_v1
   def test_with_invalid_nbins(self):
     values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
-    with self.assertRaisesRegexp(
-        ValueError, "Shape must be rank 0 but is rank 1"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 0 but is rank 1"):
       histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=[1, 2])
-    with self.assertRaisesRegexp(
-        ValueError, "Requires nbins > 0"):
+    with self.assertRaisesRegex(ValueError, "Requires nbins > 0"):
       histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=-5)
 
   def test_empty_input_gives_all_zero_counts(self):
@@ -163,7 +162,7 @@ class HistogramFixedWidthTest(test.TestCase):
 
       hist = histogram_ops.histogram_fixed_width(
           values, value_range, nbins=placeholder)
-      self.assertEquals(hist.shape.ndims, 1)
+      self.assertEqual(hist.shape.ndims, 1)
       self.assertIs(hist.shape.dims[0].value, None)
       self.assertEqual(dtypes.int32, hist.dtype)
       self.assertAllClose(expected_bin_counts, hist.eval({placeholder: 5}))
diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py
index da0492e3a56..539a305995b 100644
--- a/tensorflow/python/ops/image_ops_test.py
+++ b/tensorflow/python/ops/image_ops_test.py
@@ -228,7 +228,7 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
 
       # this is the error message we expect the function to raise
       err_msg = "Last dimension of a grayscale image should be size 1"
-      with self.assertRaisesRegexp(ValueError, err_msg):
+      with self.assertRaisesRegex(ValueError, err_msg):
         image_ops.grayscale_to_rgb(x_tf)
 
     # tests if an exception is raised if a two dimensional
@@ -241,7 +241,7 @@ class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
 
       # this is the error message we expect the function to raise
       err_msg = "must be at least two-dimensional"
-      with self.assertRaisesRegexp(ValueError, err_msg):
+      with self.assertRaisesRegex(ValueError, err_msg):
         image_ops.grayscale_to_rgb(x_tf)
 
   @test_util.run_deprecated_v1
@@ -283,7 +283,7 @@ class AdjustGamma(test_util.TensorFlowTestCase):
       x = constant_op.constant(x_np, shape=x_np.shape)
 
       err_msg = "Gamma should be a non-negative real number"
-      with self.assertRaisesRegexp(ValueError, err_msg):
+      with self.assertRaisesRegex(ValueError, err_msg):
         image_ops.adjust_gamma(x, gamma=-1)
 
   @test_util.run_deprecated_v1
@@ -296,7 +296,7 @@ class AdjustGamma(test_util.TensorFlowTestCase):
       x = constant_op.constant(x_np, shape=x_np.shape)
 
       err_msg = "Gamma should be a non-negative real number"
-      with self.assertRaisesRegexp(ValueError, err_msg):
+      with self.assertRaisesRegex(ValueError, err_msg):
         image_ops.adjust_gamma(x, gamma=-1)
 
   @test_util.run_deprecated_v1
@@ -312,7 +312,7 @@ class AdjustGamma(test_util.TensorFlowTestCase):
       image = image_ops.adjust_gamma(x, gamma=y)
 
       err_msg = "Gamma should be a non-negative real number"
-      with self.assertRaisesRegexp(errors.InvalidArgumentError, err_msg):
+      with self.assertRaisesRegex(errors.InvalidArgumentError, err_msg):
         self.evaluate(image)
 
   def _test_adjust_gamma_uint8(self, gamma):
@@ -525,7 +525,7 @@ class AdjustHueTest(test_util.TensorFlowTestCase):
     x_np = np.random.rand(2, 3) * 255.
     delta_h = np.random.rand() * 2.0 - 1.0
     fused = False
-    with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
+    with self.assertRaisesRegex(ValueError, "Shape must be at least rank 3"):
       self._adjustHueTf(x_np, delta_h)
     x_np = np.random.rand(4, 2, 4) * 255.
     delta_h = np.random.rand() * 2.0 - 1.0
@@ -1311,7 +1311,7 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
       transformed_unknown_width = op(p_unknown_width)
       self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
 
-      with self.assertRaisesRegexp(ValueError, "must be > 0"):
+      with self.assertRaisesRegex(ValueError, "must be > 0"):
         op(p_zero_dim)
 
     #Ops that support 4D input
@@ -1324,8 +1324,8 @@ class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
       self.assertEqual(4, transformed_unknown_dims_4.get_shape().ndims)
       transformed_unknown_batch = op(p_unknown_batch)
       self.assertEqual(4, transformed_unknown_batch.get_shape().ndims)
-      with self.assertRaisesRegexp(ValueError,
-                                   "must be at least three-dimensional"):
+      with self.assertRaisesRegex(ValueError,
+                                  "must be at least three-dimensional"):
         op(p_wrong_rank)
 
   def testRot90GroupOrder(self):
@@ -1448,8 +1448,8 @@ class AdjustContrastTest(test_util.TensorFlowTestCase):
     x_shape = [1, 2, 2, 3]
     x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
     x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
-    with self.assertRaisesRegexp(
-        ValueError, 'Shape must be rank 0 but is rank 1'):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 0 but is rank 1"):
       image_ops.adjust_contrast(x_np, [2.0])
 
 
@@ -4169,8 +4169,8 @@ class ConvertImageTest(test_util.TensorFlowTestCase):
       image = constant_op.constant([1], dtype=dtypes.uint8)
       image_ops.convert_image_dtype(image, dtypes.uint8)
       y = image_ops.convert_image_dtype(image, dtypes.uint8)
-      self.assertEquals(y.op.type, "Identity")
-      self.assertEquals(y.op.inputs[0], image)
+      self.assertEqual(y.op.type, "Identity")
+      self.assertEqual(y.op.inputs[0], image)
 
   @test_util.run_deprecated_v1
   def testConvertBetweenInteger(self):
@@ -4445,42 +4445,42 @@ class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
   @test_util.run_deprecated_v1
   def testInvalidShape(self):
     # The boxes should be 2D of shape [num_boxes, 4].
-    with self.assertRaisesRegexp(ValueError,
-                                 "Shape must be rank 2 but is rank 1"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 2 but is rank 1"):
       boxes = constant_op.constant([0.0, 0.0, 1.0, 1.0])
       scores = constant_op.constant([0.9])
       image_ops.non_max_suppression(boxes, scores, 3, 0.5)
 
-    with self.assertRaisesRegexp(ValueError, "Dimension must be 4 but is 3"):
+    with self.assertRaisesRegex(ValueError, "Dimension must be 4 but is 3"):
       boxes = constant_op.constant([[0.0, 0.0, 1.0]])
       scores = constant_op.constant([0.9])
       image_ops.non_max_suppression(boxes, scores, 3, 0.5)
 
     # The boxes is of shape [num_boxes, 4], and the scores is
     # of shape [num_boxes]. So an error will be thrown.
-    with self.assertRaisesRegexp(ValueError,
-                                 "Dimensions must be equal, but are 1 and 2"):
+    with self.assertRaisesRegex(ValueError,
+                                "Dimensions must be equal, but are 1 and 2"):
       boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
       scores = constant_op.constant([0.9, 0.75])
       image_ops.non_max_suppression(boxes, scores, 3, 0.5)
 
     # The scores should be 1D of shape [num_boxes].
-    with self.assertRaisesRegexp(ValueError,
-                                 "Shape must be rank 1 but is rank 2"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 1 but is rank 2"):
       boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
       scores = constant_op.constant([[0.9]])
       image_ops.non_max_suppression(boxes, scores, 3, 0.5)
 
     # The max_output_size should be a scalar (0-D).
-    with self.assertRaisesRegexp(ValueError,
-                                 "Shape must be rank 0 but is rank 1"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 0 but is rank 1"):
       boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
       scores = constant_op.constant([0.9])
       image_ops.non_max_suppression(boxes, scores, [3], 0.5)
 
     # The iou_threshold should be a scalar (0-D).
-    with self.assertRaisesRegexp(ValueError,
-                                 "Shape must be rank 0 but is rank 2"):
+    with self.assertRaisesRegex(ValueError,
+                                "Shape must be rank 0 but is rank 2"):
       boxes = constant_op.constant([[0.0, 0.0, 1.0, 1.0]])
       scores = constant_op.constant([0.9])
       image_ops.non_max_suppression(boxes, scores, 3, [[0.5]])
diff --git a/tensorflow/python/ops/init_ops_v2_test.py b/tensorflow/python/ops/init_ops_v2_test.py
index 9a8865fbc35..d45d5f6f6b3 100644
--- a/tensorflow/python/ops/init_ops_v2_test.py
+++ b/tensorflow/python/ops/init_ops_v2_test.py
@@ -110,11 +110,11 @@ class ConstantInitializersTest(InitializersTest):
   @test_util.run_in_graph_and_eager_modes
   def testConstantInvalidValue(self):
     c = constant_op.constant([1.0, 2.0, 3.0])
-    with self.assertRaisesRegexp(
-        TypeError, r"Invalid type for initial value: .*Tensor.*"):
+    with self.assertRaisesRegex(TypeError,
+                                r"Invalid type for initial value: .*Tensor.*"):
       init_ops_v2.constant_initializer(c)
     v = variables.Variable([3.0, 2.0, 1.0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         TypeError, r"Invalid type for initial value: .*Variable.*"):
       init_ops_v2.constant_initializer(v)
 
diff --git a/tensorflow/python/ops/math_ops_test.py b/tensorflow/python/ops/math_ops_test.py
index 9699f6d2b78..c5448a39be4 100644
--- a/tensorflow/python/ops/math_ops_test.py
+++ b/tensorflow/python/ops/math_ops_test.py
@@ -73,7 +73,7 @@ class ReduceTest(test_util.TensorFlowTestCase):
       return
     x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
     axis = np.array([[0], [1]])
-    with self.assertRaisesRegexp(ValueError, "must be at most rank 1"):
+    with self.assertRaisesRegex(ValueError, "must be at most rank 1"):
       math_ops.reduce_sum(x, axis)
 
   def testReduceVar(self):
@@ -83,7 +83,7 @@ class ReduceTest(test_util.TensorFlowTestCase):
         self.evaluate(math_ops.reduce_variance(x, axis=0)), [0, 0, 0])
 
     x = [[1, 2, 1, 1], [1, 1, 0, 1]]
-    with self.assertRaisesRegexp(TypeError, "must be either real or complex"):
+    with self.assertRaisesRegex(TypeError, "must be either real or complex"):
       math_ops.reduce_variance(x)
 
     x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
@@ -110,7 +110,7 @@ class ReduceTest(test_util.TensorFlowTestCase):
         self.evaluate(math_ops.reduce_std(x, axis=0)), [0, 0, 0])
 
     x = [[1, 2, 1, 1], [1, 1, 0, 1]]
-    with self.assertRaisesRegexp(TypeError, "must be either real or complex"):
+    with self.assertRaisesRegex(TypeError, "must be either real or complex"):
       math_ops.reduce_std(x)
 
     x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
@@ -176,8 +176,8 @@ class LogSumExpTest(test_util.TensorFlowTestCase):
     for dtype in [np.float16, np.float32, np.double]:
       x_np = np.array(x, dtype=dtype)
       max_np = np.max(x_np)
-      with self.assertRaisesRegexp(RuntimeWarning,
-                                   "overflow encountered in exp"):
+      with self.assertRaisesRegex(RuntimeWarning,
+                                  "overflow encountered in exp"):
         out = np.log(np.sum(np.exp(x_np)))
         if out == np.inf:
           raise RuntimeWarning("overflow encountered in exp")
@@ -193,8 +193,8 @@ class LogSumExpTest(test_util.TensorFlowTestCase):
     for dtype in [np.float16, np.float32, np.double]:
       x_np = np.array(x, dtype=dtype)
       max_np = np.max(x_np)
-      with self.assertRaisesRegexp(RuntimeWarning,
-                                   "divide by zero encountered in log"):
+      with self.assertRaisesRegex(RuntimeWarning,
+                                  "divide by zero encountered in log"):
         out = np.log(np.sum(np.exp(x_np)))
         if out == -np.inf:
           raise RuntimeWarning("divide by zero encountered in log")
@@ -314,7 +314,7 @@ class ApproximateEqualTest(test_util.TensorFlowTestCase):
       x = np.array([1, 2], dtype=dtype)
       y = np.array([[1, 2]], dtype=dtype)
       # The inputs 'x' and 'y' must have the same shape.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           (ValueError, errors.InvalidArgumentError),
           "Shapes must be equal rank|must be of the same shape"):
         math_ops.approximate_equal(x, y)
@@ -761,7 +761,7 @@ class BinaryOpsTest(test_util.TensorFlowTestCase):
       error_message = (
           "Input 'y' of 'Add(V2)?' Op has type float32 that does not "
           "match type int32 of argument 'x'.")
-    with self.assertRaisesRegexp(error, error_message):
+    with self.assertRaisesRegex(error, error_message):
       a = array_ops.ones([1], dtype=dtypes.int32) + 1.0
       self.evaluate(a)
 
@@ -786,7 +786,8 @@ class BinaryOpsTest(test_util.TensorFlowTestCase):
 
       def __radd__(self, other):
         raise TypeError("RHS not implemented")
-    with self.assertRaisesRegexp(error, error_message):
+
+    with self.assertRaisesRegex(error, error_message):
       a = array_ops.ones([1], dtype=dtypes.int32) + RHSRaisesError()
       self.evaluate(a)
 
@@ -794,13 +795,15 @@ class BinaryOpsTest(test_util.TensorFlowTestCase):
 
       def __radd__(self, other):
         return NotImplemented
-    with self.assertRaisesRegexp(error, error_message):
+
+    with self.assertRaisesRegex(error, error_message):
       a = array_ops.ones([1], dtype=dtypes.int32) + RHSReturnsNotImplemented()
       self.evaluate(a)
 
     class RHSNotImplemented(object):
       pass
-    with self.assertRaisesRegexp(error, error_message):
+
+    with self.assertRaisesRegex(error, error_message):
       a = array_ops.ones([1], dtype=dtypes.int32) + RHSNotImplemented()
       self.evaluate(a)
 
diff --git a/tensorflow/python/ops/nccl_ops_test.py b/tensorflow/python/ops/nccl_ops_test.py
index d481bd3c2bd..5b3e3e68921 100644
--- a/tensorflow/python/ops/nccl_ops_test.py
+++ b/tensorflow/python/ops/nccl_ops_test.py
@@ -141,9 +141,9 @@ class AllReduceTest(NcclTestCase):
         partial(_NcclAllReduce, nccl_ops.all_sum), lambda x, y: x + y)
 
   def testErrors(self):
-    with self.assertRaisesRegexp(ValueError, 'Device assignment required'):
+    with self.assertRaisesRegex(ValueError, 'Device assignment required'):
       nccl_ops.all_sum([array_ops.identity(np.random.random_sample((3, 4)))])
-    with self.assertRaisesRegexp(ValueError, 'Must pass >0 tensors'):
+    with self.assertRaisesRegex(ValueError, 'Must pass >0 tensors'):
       nccl_ops.all_sum([])
 
 
@@ -173,7 +173,7 @@ class BroadcastTest(NcclTestCase):
       self._Test(_NcclBroadcast, lambda x, y: x,
                  (['/device:GPU:0', '/device:CPU:0'],))
     except errors.NotFoundError as e:
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(e), "No registered '_NcclBroadcastRecv' OpKernel for CPU devices")
     else:
       # Session isn't executed when no GPU is available.
diff --git a/tensorflow/python/ops/nn_batchnorm_test.py b/tensorflow/python/ops/nn_batchnorm_test.py
index 5f0616b384f..de936b68a80 100644
--- a/tensorflow/python/ops/nn_batchnorm_test.py
+++ b/tensorflow/python/ops/nn_batchnorm_test.py
@@ -295,8 +295,8 @@ class BatchNormalizationTest(test.TestCase):
                                                shift_after_normalization)
             tf_batch_norm, keep_dims_tf_batch_norm = sess.run(
                 [bn, keep_dims_bn])
-            self.assertEquals(x_shape, tf_batch_norm.shape)
-            self.assertEquals(x_shape, keep_dims_tf_batch_norm.shape)
+            self.assertEqual(x_shape, tf_batch_norm.shape)
+            self.assertEqual(x_shape, keep_dims_tf_batch_norm.shape)
             self.assertAllClose(
                 tf_batch_norm, keep_dims_tf_batch_norm, atol=0.000001)
 
@@ -328,8 +328,8 @@ class BatchNormalizationTest(test.TestCase):
                                               scale_after_normalization,
                                               shift_after_normalization)
             [tf_batch_norm] = self.evaluate([bn])
-            self.assertEquals(x_shape, np_batch_norm.shape)
-            self.assertEquals(x_shape, tf_batch_norm.shape)
+            self.assertEqual(x_shape, np_batch_norm.shape)
+            self.assertEqual(x_shape, tf_batch_norm.shape)
             self.assertAllClose(np_batch_norm, tf_batch_norm, atol=atol)
 
   def testBatchNormArbitraryShapes(self):
diff --git a/tensorflow/python/ops/nn_loss_scaling_utilities_test.py b/tensorflow/python/ops/nn_loss_scaling_utilities_test.py
index 9b1c8cc791a..4f96f9ba6a3 100644
--- a/tensorflow/python/ops/nn_loss_scaling_utilities_test.py
+++ b/tensorflow/python/ops/nn_loss_scaling_utilities_test.py
@@ -97,9 +97,9 @@ class LossUtilitiesTest(test_lib.TestCase, parameterized.TestCase):
           self.evaluate(loss), (2. * 0.3 + 0.5 * 0.7 + 4. * 0.2 + 1. * 0.8) / 2)
 
   def testComputeAverageLossInvalidSampleWeights(self):
-    with self.assertRaisesRegexp((ValueError, errors_impl.InvalidArgumentError),
-                                 (r"Incompatible shapes: \[3\] vs. \[2\]|"
-                                  "Dimensions must be equal")):
+    with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
+                                (r"Incompatible shapes: \[3\] vs. \[2\]|"
+                                 "Dimensions must be equal")):
       nn_impl.compute_average_loss([2.5, 6.2, 5.],
                                    sample_weight=[0.2, 0.8],
                                    global_batch_size=10)
diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py
index bfe11b63eea..b5bcc2c3099 100644
--- a/tensorflow/python/ops/nn_test.py
+++ b/tensorflow/python/ops/nn_test.py
@@ -1027,7 +1027,7 @@ class LeakyReluTest(test_lib.TestCase):
     inputs = constant_op.constant(inputs)
 
     outputs = nn_ops.leaky_relu(inputs)
-    self.assertEquals(inputs.shape, outputs.shape)
+    self.assertEqual(inputs.shape, outputs.shape)
 
     inputs, outputs = self.evaluate([inputs, outputs])
 
diff --git a/tensorflow/python/ops/nn_xent_test.py b/tensorflow/python/ops/nn_xent_test.py
index 3e5c198fc6a..81b25a396d7 100644
--- a/tensorflow/python/ops/nn_xent_test.py
+++ b/tensorflow/python/ops/nn_xent_test.py
@@ -106,7 +106,7 @@ class SigmoidCrossEntropyWithLogitsTest(test.TestCase):
     self.assertAllClose(grads, [0.5, -0.5])
 
   def testShapeError(self):
-    with self.assertRaisesRegexp(ValueError, "must have the same shape"):
+    with self.assertRaisesRegex(ValueError, "must have the same shape"):
       nn_impl.sigmoid_cross_entropy_with_logits(labels=[1, 2, 3],
                                                 logits=[[2, 1]])
 
@@ -174,7 +174,7 @@ class WeightedCrossEntropyTest(test.TestCase):
     self.assertLess(err, 1e-7)
 
   def testShapeError(self):
-    with self.assertRaisesRegexp(ValueError, "must have the same shape"):
+    with self.assertRaisesRegex(ValueError, "must have the same shape"):
       nn_impl.weighted_cross_entropy_with_logits(
           targets=[1, 2, 3], logits=[[2, 1]], pos_weight=2.0)
 
diff --git a/tensorflow/python/ops/numpy_ops/np_utils_test.py b/tensorflow/python/ops/numpy_ops/np_utils_test.py
index 38b51f05e6e..001991e244e 100644
--- a/tensorflow/python/ops/numpy_ops/np_utils_test.py
+++ b/tensorflow/python/ops/numpy_ops/np_utils_test.py
@@ -68,20 +68,20 @@ f docstring.
       return
 
     # pylint: disable=unused-variable
-    with self.assertRaisesRegexp(TypeError, 'Cannot find parameter'):
+    with self.assertRaisesRegex(TypeError, 'Cannot find parameter'):
 
       @np_utils.np_doc(None, np_fun=np_fun)
       def f1(a):
         return
 
-    with self.assertRaisesRegexp(TypeError, 'is of kind'):
+    with self.assertRaisesRegex(TypeError, 'is of kind'):
 
       @np_utils.np_doc(None, np_fun=np_fun)
       def f2(x, kwargs):
         return
 
-    with self.assertRaisesRegexp(TypeError,
-                                 'Parameter "y" should have a default value'):
+    with self.assertRaisesRegex(TypeError,
+                                'Parameter "y" should have a default value'):
 
       @np_utils.np_doc(None, np_fun=np_fun)
       def f3(x, y):
diff --git a/tensorflow/python/ops/parallel_for/array_test.py b/tensorflow/python/ops/parallel_for/array_test.py
index 93fc8be78cc..85a2f6c191b 100644
--- a/tensorflow/python/ops/parallel_for/array_test.py
+++ b/tensorflow/python/ops/parallel_for/array_test.py
@@ -209,7 +209,7 @@ class ArrayTest(PForTestCase):
       x1 = array_ops.gather(x, i)
       return array_ops.tile(x1, [i, 1])
 
-    with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
+    with self.assertRaisesRegex(ValueError, "expected to be loop invariant"):
       pfor_control_flow_ops.pfor(loop_fn, 2, fallback_to_while_loop=False)
 
   def test_pack(self):
@@ -458,7 +458,7 @@ class ArrayTest(PForTestCase):
     # handled.
     self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
     # Without fallback, ValueError is thrown.
-    with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
+    with self.assertRaisesRegex(ValueError, "expected to be loop invariant"):
       self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
 
   def test_depth_to_space(self):
diff --git a/tensorflow/python/ops/parallel_for/control_flow_ops_test.py b/tensorflow/python/ops/parallel_for/control_flow_ops_test.py
index 605400aa45b..f8e4e4762ac 100644
--- a/tensorflow/python/ops/parallel_for/control_flow_ops_test.py
+++ b/tensorflow/python/ops/parallel_for/control_flow_ops_test.py
@@ -82,7 +82,7 @@ class PForTest(PForTestCase):
       x_i = array_ops.gather(x, i)
       return nn.top_k(x_i)
 
-    with self.assertRaisesRegexp(ValueError, "No pfor vectorization"):
+    with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
       self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
     self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
 
@@ -103,14 +103,14 @@ class PForTest(PForTestCase):
           parallel_iterations=parallel_iterations)
 
   def test_parallel_iterations_zero(self):
-    with self.assertRaisesRegexp(ValueError, "positive integer"):
+    with self.assertRaisesRegex(ValueError, "positive integer"):
       pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
-    with self.assertRaisesRegexp(TypeError, "positive integer"):
+    with self.assertRaisesRegex(TypeError, "positive integer"):
       pfor_control_flow_ops.for_loop(
           lambda i: 1, dtypes.int32, 8, parallel_iterations=0)
 
   def test_parallel_iterations_one(self):
-    with self.assertRaisesRegexp(ValueError, "Use for_loop instead"):
+    with self.assertRaisesRegex(ValueError, "Use for_loop instead"):
       pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
 
   def test_vectorized_map(self):
@@ -273,8 +273,8 @@ class ReductionTest(PForTestCase):
       x_i = array_ops.gather(x, i)
       return pfor_config.reduce_sum(x_i)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "parallel_iterations currently unsupported"):
+    with self.assertRaisesRegex(ValueError,
+                                "parallel_iterations currently unsupported"):
       pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)
 
 
@@ -1084,7 +1084,7 @@ class StackTest(PForTestCase):
     def loop_fn(_):
       return data_flow_ops.stack_push_v2(s, 7)
 
-    with self.assertRaisesRegexp(ValueError, "StackPushV2 not allowed.*"):
+    with self.assertRaisesRegex(ValueError, "StackPushV2 not allowed.*"):
       pfor_control_flow_ops.pfor(loop_fn, iters=2)
 
 
@@ -2092,7 +2092,7 @@ class VariableTest(PForTestCase):
       return math_ops.matmul(z, a_var / 16)
 
     # Note that this error is only raised under v2 behavior.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "tf.function-decorated function tried to create variables on non-first"
     ):
diff --git a/tensorflow/python/ops/parallel_for/gradients_test.py b/tensorflow/python/ops/parallel_for/gradients_test.py
index fdb70c52778..90cc2a0ef31 100644
--- a/tensorflow/python/ops/parallel_for/gradients_test.py
+++ b/tensorflow/python/ops/parallel_for/gradients_test.py
@@ -439,7 +439,7 @@ class GradientsTest(test.TestCase):
   def test_batch_jacobian_bad_shapes(self):
     x = random_ops.random_uniform([2, 2])
     y = random_ops.random_uniform([3, 2])
-    with self.assertRaisesRegexp(ValueError, "Need first dimension of output"):
+    with self.assertRaisesRegex(ValueError, "Need first dimension of output"):
       gradients.batch_jacobian(y, x, use_pfor=True)
 
   def test_batch_jacobian_bad_unknown_shapes(self):
@@ -447,8 +447,8 @@ class GradientsTest(test.TestCase):
       x = array_ops.placeholder(dtypes.float32)
       y = array_ops.concat([x, x], axis=0)
       jacobian = gradients.batch_jacobian(y, x)
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   "assertion failed"):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  "assertion failed"):
         sess.run(jacobian, feed_dict={x: [[1, 2], [3, 4]]})
 
   def test_batch_jacobian_fixed_shape(self):
diff --git a/tensorflow/python/ops/ragged/convert_to_tensor_or_ragged_tensor_op_test.py b/tensorflow/python/ops/ragged/convert_to_tensor_or_ragged_tensor_op_test.py
index 087f048befa..f63bae3fdee 100644
--- a/tensorflow/python/ops/ragged/convert_to_tensor_or_ragged_tensor_op_test.py
+++ b/tensorflow/python/ops/ragged/convert_to_tensor_or_ragged_tensor_op_test.py
@@ -82,7 +82,7 @@ class RaggedConvertToTensorOrRaggedTensorTest(test_util.TensorFlowTestCase,
                                    preferred_dtype=None):
     rt = ragged_factory_ops.constant(pylist)
 
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       ragged_tensor.convert_to_tensor_or_ragged_tensor(rt, dtype,
                                                        preferred_dtype)
 
@@ -139,7 +139,7 @@ class RaggedConvertToTensorOrRaggedTensorTest(test_util.TensorFlowTestCase,
                                         message,
                                         dtype=None,
                                         preferred_dtype=None):
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       ragged_tensor.convert_to_tensor_or_ragged_tensor(value, dtype,
                                                        preferred_dtype)
 
@@ -175,7 +175,7 @@ class RaggedConvertToTensorOrRaggedTensorTest(test_util.TensorFlowTestCase,
                              dtype=None,
                              preferred_dtype=None):
     tensor = constant_op.constant(pylist)
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       ragged_tensor.convert_to_tensor_or_ragged_tensor(tensor, dtype,
                                                        preferred_dtype)
 
@@ -225,7 +225,7 @@ class RaggedConvertToTensorOrRaggedTensorTest(test_util.TensorFlowTestCase,
                                  message,
                                  dtype=None,
                                  preferred_dtype=None):
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       ragged_tensor.convert_to_tensor_or_ragged_tensor(value, dtype,
                                                        preferred_dtype)
 
diff --git a/tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py b/tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py
index 549a660ee12..fd40764ddd4 100644
--- a/tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_batch_gather_op_test.py
@@ -476,12 +476,14 @@ class RaggedBatchGatherOpTest(test_util.TensorFlowTestCase,
     ragged_indices = ragged_tensor.RaggedTensor.from_row_splits(
         indices, [0, 2, 4])
 
-    with self.assertRaisesRegexp(ValueError, r'batch_dims may only be negative '
-                                 r'if rank\(indices\) is statically known.'):
+    with self.assertRaisesRegex(
+        ValueError, r'batch_dims may only be negative '
+        r'if rank\(indices\) is statically known.'):
       ragged_batch_gather_ops.batch_gather(params, indices)
 
-    with self.assertRaisesRegexp(ValueError, r'batch_dims may only be negative '
-                                 r'if rank\(indices\) is statically known.'):
+    with self.assertRaisesRegex(
+        ValueError, r'batch_dims may only be negative '
+        r'if rank\(indices\) is statically known.'):
       ragged_batch_gather_ops.batch_gather(params, ragged_indices)
 
   @parameterized.parameters(
@@ -527,7 +529,7 @@ class RaggedBatchGatherOpTest(test_util.TensorFlowTestCase,
                                        indices,
                                        message=None,
                                        error=ValueError):
-    with self.assertRaisesRegexp(error, message):
+    with self.assertRaisesRegex(error, message):
       ragged_batch_gather_ops.batch_gather(params, indices)
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py b/tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py
index aa3e5583c46..c06f280fe0c 100644
--- a/tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_boolean_mask_op_test.py
@@ -246,35 +246,36 @@ class RaggedBooleanMaskOpTest(test_util.TensorFlowTestCase,
 
   def testErrors(self):
     if not context.executing_eagerly():
-      self.assertRaisesRegexp(ValueError,
-                              r'mask\.shape\.ndims must be known statically',
-                              ragged_array_ops.boolean_mask, [[1, 2]],
-                              array_ops.placeholder(dtypes.bool))
+      self.assertRaisesRegex(ValueError,
+                             r'mask\.shape\.ndims must be known statically',
+                             ragged_array_ops.boolean_mask, [[1, 2]],
+                             array_ops.placeholder(dtypes.bool))
 
     self.assertRaises(TypeError, ragged_array_ops.boolean_mask, [[1, 2]],
                       [[0, 1]])
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         ValueError, 'Tensor conversion requested dtype bool for '
         'RaggedTensor with dtype int32', ragged_array_ops.boolean_mask,
         ragged_factory_ops.constant([[1, 2]]),
         ragged_factory_ops.constant([[0, 0]]))
 
-    self.assertRaisesRegexp(
-        ValueError, r'Shapes \(1, 2\) and \(1, 3\) are incompatible',
-        ragged_array_ops.boolean_mask, [[1, 2]], [[True, False, True]])
+    self.assertRaisesRegex(ValueError,
+                           r'Shapes \(1, 2\) and \(1, 3\) are incompatible',
+                           ragged_array_ops.boolean_mask, [[1, 2]],
+                           [[True, False, True]])
 
-    self.assertRaisesRegexp(errors.InvalidArgumentError,
-                            r'Inputs must have identical ragged splits',
-                            ragged_array_ops.boolean_mask,
-                            ragged_factory_ops.constant([[1, 2]]),
-                            ragged_factory_ops.constant([[True, False, True]]))
+    self.assertRaisesRegex(errors.InvalidArgumentError,
+                           r'Inputs must have identical ragged splits',
+                           ragged_array_ops.boolean_mask,
+                           ragged_factory_ops.constant([[1, 2]]),
+                           ragged_factory_ops.constant([[True, False, True]]))
 
-    self.assertRaisesRegexp(ValueError, 'mask cannot be scalar',
-                            ragged_array_ops.boolean_mask, [[1, 2]], True)
+    self.assertRaisesRegex(ValueError, 'mask cannot be scalar',
+                           ragged_array_ops.boolean_mask, [[1, 2]], True)
 
-    self.assertRaisesRegexp(ValueError, 'mask cannot be scalar',
-                            ragged_array_ops.boolean_mask,
-                            ragged_factory_ops.constant([[1, 2]]), True)
+    self.assertRaisesRegex(ValueError, 'mask cannot be scalar',
+                           ragged_array_ops.boolean_mask,
+                           ragged_factory_ops.constant([[1, 2]]), True)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/ops/ragged/ragged_concat_op_test.py b/tensorflow/python/ops/ragged/ragged_concat_op_test.py
index 4661061de33..1ce07f2acd3 100644
--- a/tensorflow/python/ops/ragged/ragged_concat_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_concat_op_test.py
@@ -275,8 +275,8 @@ class RaggedConcatOpTest(test_util.TensorFlowTestCase,
                       message=None,
                       ragged_ranks=None):
     rt_inputs = self._rt_inputs_to_tensors(rt_inputs, ragged_ranks)
-    self.assertRaisesRegexp(error, message, ragged_concat_ops.concat, rt_inputs,
-                            axis)
+    self.assertRaisesRegex(error, message, ragged_concat_ops.concat, rt_inputs,
+                           axis)
 
   @parameterized.parameters([
       dict(
@@ -294,7 +294,7 @@ class RaggedConcatOpTest(test_util.TensorFlowTestCase,
         array_ops.placeholder_with_default(rt, shape=None) for rt in rt_inputs
     ]
     concatenated = ragged_concat_ops.concat(rt_inputs, axis)
-    with self.assertRaisesRegexp(error, message):
+    with self.assertRaisesRegex(error, message):
       self.evaluate(concatenated)
 
   def testNegativeAxisWithUnknownRankError(self):
@@ -304,7 +304,7 @@ class RaggedConcatOpTest(test_util.TensorFlowTestCase,
         array_ops.placeholder(dtypes.int64),
         array_ops.placeholder(dtypes.int64)
     ]
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         ValueError, r'axis may only be negative if ndims is statically known.',
         ragged_concat_ops.concat, rt_inputs, -1)
 
diff --git a/tensorflow/python/ops/ragged/ragged_const_op_test.py b/tensorflow/python/ops/ragged/ragged_const_op_test.py
index 883a4a55d76..62e7a7087aa 100644
--- a/tensorflow/python/ops/ragged/ragged_const_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_const_op_test.py
@@ -301,7 +301,7 @@ class RaggedConstOpTest(test_util.TensorFlowTestCase,
                            exception=None,
                            message=None):
     """Tests that `ragged_const()` raises an expected exception."""
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         exception,
         message,
         ragged_factory_ops.constant,
@@ -341,9 +341,9 @@ class RaggedConstOpTest(test_util.TensorFlowTestCase,
                                   message=None):
     """Tests for the _find_scalar_and_max_depth helper function."""
     if exception is not None:
-      self.assertRaisesRegexp(exception, message,
-                              ragged_factory_ops._find_scalar_and_max_depth,
-                              pylist)
+      self.assertRaisesRegex(exception, message,
+                             ragged_factory_ops._find_scalar_and_max_depth,
+                             pylist)
     else:
       self.assertEqual(
           ragged_factory_ops._find_scalar_and_max_depth(pylist),
@@ -391,7 +391,7 @@ class RaggedConstOpTest(test_util.TensorFlowTestCase,
                                            message=None):
     """Tests for the _default_inner_shape_for_pylist helper function."""
     if exception is not None:
-      self.assertRaisesRegexp(
+      self.assertRaisesRegex(
           exception, message,
           ragged.ragged_factory_ops._default_inner_shape_for_pylist, pylist,
           ragged_rank)
diff --git a/tensorflow/python/ops/ragged/ragged_constant_value_op_test.py b/tensorflow/python/ops/ragged/ragged_constant_value_op_test.py
index 94df6617a74..9cc395a0bb9 100644
--- a/tensorflow/python/ops/ragged/ragged_constant_value_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_constant_value_op_test.py
@@ -306,7 +306,7 @@ class RaggedConstantValueOpTest(test_util.TensorFlowTestCase,
                             exception=None,
                             message=None):
     """Tests that `constant_value()` raises an expected exception."""
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         exception,
         message,
         ragged_factory_ops.constant_value,
diff --git a/tensorflow/python/ops/ragged/ragged_cross_op_test.py b/tensorflow/python/ops/ragged/ragged_cross_op_test.py
index 1b089868c73..07e5964ba83 100644
--- a/tensorflow/python/ops/ragged/ragged_cross_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_cross_op_test.py
@@ -364,7 +364,7 @@ class RaggedCrossOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
           message='inputs must all have the same batch dimension size'),
   ])
   def testStaticError(self, inputs, exception=ValueError, message=None):
-    with self.assertRaisesRegexp(exception, message):
+    with self.assertRaisesRegex(exception, message):
       ragged_array_ops.cross(inputs)
 
   @parameterized.named_parameters([
@@ -381,7 +381,7 @@ class RaggedCrossOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
                        inputs,
                        exception=errors.InvalidArgumentError,
                        message=None):
-    with self.assertRaisesRegexp(exception, message):
+    with self.assertRaisesRegex(exception, message):
       self.evaluate(ragged_array_ops.cross(inputs))
 
   def _ragged_to_sparse(self, t):
diff --git a/tensorflow/python/ops/ragged/ragged_dispatch_test.py b/tensorflow/python/ops/ragged/ragged_dispatch_test.py
index 193e329e18a..7ef0d9fd0b8 100644
--- a/tensorflow/python/ops/ragged/ragged_dispatch_test.py
+++ b/tensorflow/python/ops/ragged/ragged_dispatch_test.py
@@ -445,8 +445,8 @@ class RaggedDispatchTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     x = ragged_factory_ops.constant([[1, 2], [3]])
     y = ragged_tensor.RaggedTensor.from_row_splits(
         array_ops.placeholder_with_default([1, 2, 3], shape=None), x.row_splits)
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Unable to broadcast: unknown rank'):
+    with self.assertRaisesRegex(ValueError,
+                                r'Unable to broadcast: unknown rank'):
       math_ops.add(x, y)
 
   @parameterized.parameters([
diff --git a/tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py b/tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py
index 790cabdaf6f..7c92be76171 100644
--- a/tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_dynamic_partition_op_test.py
@@ -215,8 +215,8 @@ class RaggedSegmentStackOpTest(test_util.TensorFlowTestCase,
   def testRuntimeError(self, data, partitions, num_partitions, error):
     data = ragged_factory_ops.constant(data)
     partitions = ragged_factory_ops.constant(partitions, dtype=dtypes.int64)
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 error):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                error):
       self.evaluate(
           ragged_array_ops.stack_dynamic_partitions(data, partitions,
                                                     num_partitions))
@@ -239,8 +239,8 @@ class RaggedSegmentStackOpTest(test_util.TensorFlowTestCase,
           error='must have rank 0'),
   ])
   def testStaticError(self, data, partitions, num_partitions, error):
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 error):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                error):
       ragged_array_ops.stack_dynamic_partitions(data, partitions,
                                                 num_partitions)
 
@@ -248,8 +248,8 @@ class RaggedSegmentStackOpTest(test_util.TensorFlowTestCase,
     if context.executing_eagerly():
       return
     partitions = array_ops.placeholder(dtypes.int32, None)
-    with self.assertRaisesRegexp((ValueError, errors.InvalidArgumentError),
-                                 'partitions must have known rank'):
+    with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
+                                'partitions must have known rank'):
       ragged_array_ops.stack_dynamic_partitions(['a', 'b', 'c'], partitions, 10)
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py b/tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py
index aac9052d044..7076fbce682 100644
--- a/tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_from_sparse_op_test.py
@@ -51,21 +51,21 @@ class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):
 
   def testBadSparseTensorRank(self):
     st1 = sparse_tensor.SparseTensor(indices=[[0]], values=[0], dense_shape=[3])
-    self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2',
-                            RaggedTensor.from_sparse, st1)
+    self.assertRaisesRegex(ValueError, r'rank\(st_input\) must be 2',
+                           RaggedTensor.from_sparse, st1)
 
     st2 = sparse_tensor.SparseTensor(
         indices=[[0, 0, 0]], values=[0], dense_shape=[3, 3, 3])
-    self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2',
-                            RaggedTensor.from_sparse, st2)
+    self.assertRaisesRegex(ValueError, r'rank\(st_input\) must be 2',
+                           RaggedTensor.from_sparse, st2)
 
     if not context.executing_eagerly():
       st3 = sparse_tensor.SparseTensor(
           indices=array_ops.placeholder(dtypes.int64),
           values=[0],
           dense_shape=array_ops.placeholder(dtypes.int64))
-      self.assertRaisesRegexp(ValueError, r'rank\(st_input\) must be 2',
-                              RaggedTensor.from_sparse, st3)
+      self.assertRaisesRegex(ValueError, r'rank\(st_input\) must be 2',
+                             RaggedTensor.from_sparse, st3)
 
   def testGoodPartialSparseTensorRank(self):
     if not context.executing_eagerly():
@@ -91,20 +91,20 @@ class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):
     # index_suffix of first index is not zero.
     st1 = sparse_tensor.SparseTensor(
         indices=[[0, 1], [0, 2], [2, 0]], values=[1, 2, 3], dense_shape=[3, 3])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'.*SparseTensor is not right-ragged'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'.*SparseTensor is not right-ragged'):
       self.evaluate(RaggedTensor.from_sparse(st1))
     # index_suffix of an index that starts a new row is not zero.
     st2 = sparse_tensor.SparseTensor(
         indices=[[0, 0], [0, 1], [2, 1]], values=[1, 2, 3], dense_shape=[3, 3])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'.*SparseTensor is not right-ragged'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'.*SparseTensor is not right-ragged'):
       self.evaluate(RaggedTensor.from_sparse(st2))
     # index_suffix of an index that continues a row skips a cell.
     st3 = sparse_tensor.SparseTensor(
         indices=[[0, 1], [0, 1], [0, 3]], values=[1, 2, 3], dense_shape=[3, 3])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'.*SparseTensor is not right-ragged'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'.*SparseTensor is not right-ragged'):
       self.evaluate(RaggedTensor.from_sparse(st3))
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py b/tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py
index 110caa28b59..7395bf52ba8 100644
--- a/tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_from_tensor_op_test.py
@@ -628,8 +628,8 @@ class RaggedTensorFromTensorOpTest(test_util.TensorFlowTestCase,
                  ragged_rank=1,
                  error=None):
     dt = constant_op.constant(tensor)
-    self.assertRaisesRegexp(error[0], error[1], RaggedTensor.from_tensor, dt,
-                            lengths, padding, ragged_rank)
+    self.assertRaisesRegex(error[0], error[1], RaggedTensor.from_tensor, dt,
+                           lengths, padding, ragged_rank)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py b/tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py
index 19977b2f88b..a627d8848da 100644
--- a/tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_gather_nd_op_test.py
@@ -210,10 +210,10 @@ class RaggedGatherNdOpTest(test_util.TensorFlowTestCase,
     indices1 = array_ops.placeholder(dtypes.int32, shape=None)
     indices2 = array_ops.placeholder(dtypes.int32, shape=[None])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'indices.rank be statically known.'):
+    with self.assertRaisesRegex(ValueError,
+                                'indices.rank be statically known.'):
       ragged_gather_ops.gather_nd(params, indices1)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'indices.shape\[-1\] must be statically known.'):
       ragged_gather_ops.gather_nd(params, indices2)
 
@@ -236,7 +236,7 @@ class RaggedGatherNdOpTest(test_util.TensorFlowTestCase,
                                     indices,
                                     message=None,
                                     error=ValueError):
-    with self.assertRaisesRegexp(error, message):
+    with self.assertRaisesRegex(error, message):
       ragged_gather_ops.gather_nd(params, indices)
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_gather_op_test.py b/tensorflow/python/ops/ragged/ragged_gather_op_test.py
index 928e634989c..96ade51aaba 100644
--- a/tensorflow/python/ops/ragged/ragged_gather_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_gather_op_test.py
@@ -174,14 +174,14 @@ class RaggedGatherOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     tensor_indices = [0, 1, 2]
     ragged_params = ragged_factory_ops.constant([['a', 'b'], ['c']])
     ragged_indices = ragged_factory_ops.constant([[0, 3]])
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'indices\[1\] = 3 is not in \[0, 3\)'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'indices\[1\] = 3 is not in \[0, 3\)'):
       self.evaluate(ragged_gather_ops.gather(tensor_params, ragged_indices))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'indices\[2\] = 2 is not in \[0, 2\)'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'indices\[2\] = 2 is not in \[0, 2\)'):
       self.evaluate(ragged_gather_ops.gather(ragged_params, tensor_indices))
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'indices\[1\] = 3 is not in \[0, 2\)'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'indices\[1\] = 3 is not in \[0, 2\)'):
       self.evaluate(ragged_gather_ops.gather(ragged_params, ragged_indices))
 
   def testUnknownIndicesRankError(self):
@@ -190,9 +190,9 @@ class RaggedGatherOpTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     params = ragged_factory_ops.constant([], ragged_rank=1)
     indices = constant_op.constant([0], dtype=dtypes.int64)
     indices = array_ops.placeholder_with_default(indices, None)
-    self.assertRaisesRegexp(ValueError,
-                            r'rank\(indices\) must be known statically',
-                            ragged_gather_ops.gather, params, indices)
+    self.assertRaisesRegex(ValueError,
+                           r'rank\(indices\) must be known statically',
+                           ragged_gather_ops.gather, params, indices)
 
   # pylint: disable=bad-whitespace
   @parameterized.parameters([
diff --git a/tensorflow/python/ops/ragged/ragged_getitem_test.py b/tensorflow/python/ops/ragged/ragged_getitem_test.py
index f02e308a29d..f5ed69b6853 100644
--- a/tensorflow/python/ops/ragged/ragged_getitem_test.py
+++ b/tensorflow/python/ops/ragged/ragged_getitem_test.py
@@ -158,9 +158,9 @@ class RaggedGetItemTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def _TestGetItemException(self, rt, slice_spec, expected, message):
     """Helper function for testing RaggedTensor.__getitem__ exceptions."""
     tensor_slice_spec = _make_tensor_slice_spec(slice_spec, True)
-    with self.assertRaisesRegexp(expected, message):
+    with self.assertRaisesRegex(expected, message):
       self.evaluate(rt.__getitem__(slice_spec))
-    with self.assertRaisesRegexp(expected, message):
+    with self.assertRaisesRegex(expected, message):
       self.evaluate(rt.__getitem__(tensor_slice_spec))
 
   @parameterized.parameters(
diff --git a/tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py b/tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py
index 3fb4887f38c..588a5473741 100644
--- a/tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_map_flat_values_op_test.py
@@ -178,17 +178,18 @@ class RaggedMapInnerValuesOpTest(test_util.TensorFlowTestCase):
   def testRaggedTensorSplitsRaggedRankMismatchError(self):
     x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
     y = ragged_factory_ops.constant([[[3, 1, 4], []], [], [[1, 5]]])
-    self.assertRaisesRegexp(
-        ValueError, r'Inputs must have identical ragged splits.*',
-        ragged_functional_ops.map_flat_values, math_ops.add, x, y)
+    self.assertRaisesRegex(ValueError,
+                           r'Inputs must have identical ragged splits.*',
+                           ragged_functional_ops.map_flat_values, math_ops.add,
+                           x, y)
 
   def testRaggedTensorSplitsValueMismatchError(self):
     x = ragged_factory_ops.constant([[3, 1, 4], [], [1, 5]])
     y = ragged_factory_ops.constant([[1], [2, 3], [4, 5]])
-    self.assertRaisesRegexp(errors.InvalidArgumentError,
-                            r'Inputs must have identical ragged splits.*',
-                            ragged_functional_ops.map_flat_values, math_ops.add,
-                            x, y)
+    self.assertRaisesRegex(errors.InvalidArgumentError,
+                           r'Inputs must have identical ragged splits.*',
+                           ragged_functional_ops.map_flat_values, math_ops.add,
+                           x, y)
 
   def testRaggedTensorSplitsMismatchErrorAtRuntime(self):
     splits1 = array_ops.placeholder_with_default(
@@ -197,8 +198,8 @@ class RaggedMapInnerValuesOpTest(test_util.TensorFlowTestCase):
         constant_op.constant([0, 1, 3, 5], dtypes.int64), None)
     x = ragged_tensor.RaggedTensor.from_row_splits([3, 1, 4, 1, 5], splits1)
     y = ragged_tensor.RaggedTensor.from_row_splits([1, 2, 3, 4, 5], splits2)
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'.*Inputs must have identical ragged splits'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'.*Inputs must have identical ragged splits'):
       self.evaluate(ragged_functional_ops.map_flat_values(math_ops.add, x, y))
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py
index 9e74de4bc35..8a40e396a68 100644
--- a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py
@@ -263,7 +263,7 @@ class RaggedMapOpTest(test_util.TensorFlowTestCase,
   def testMismatchRaggedRank(self):
     elems = ragged_factory_ops.constant([[[1, 2, 3]], [[4, 5], [6, 7]]])
     fn = lambda x: ragged_math_ops.reduce_sum(x, axis=0)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'(?s)Expected `fn` to return.*But it returned.*'):
       _ = ragged_map_ops.map_fn(
           fn,
@@ -274,7 +274,7 @@ class RaggedMapOpTest(test_util.TensorFlowTestCase,
   def testMismatchRaggedRank2(self):
     elems = ragged_factory_ops.constant([[1, 2, 3], [4, 5], [6, 7]])
     fn = lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'(?s)Expected `fn` to return.*But it returned.*'):
       _ = ragged_map_ops.map_fn(
           fn,
diff --git a/tensorflow/python/ops/ragged/ragged_merge_dims_op_test.py b/tensorflow/python/ops/ragged/ragged_merge_dims_op_test.py
index 0d81d926de9..5e810e1b49c 100644
--- a/tensorflow/python/ops/ragged/ragged_merge_dims_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_merge_dims_op_test.py
@@ -264,7 +264,7 @@ class RaggedMergeDimsOpTest(test_util.TensorFlowTestCase,
                                message=None,
                                ragged_rank=None):
     x = ragged_factory_ops.constant(rt, ragged_rank=ragged_rank)
-    with self.assertRaisesRegexp(exception, message):
+    with self.assertRaisesRegex(exception, message):
       self.evaluate(x.merge_dims(outer_axis, inner_axis))
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_one_hot_op_test.py b/tensorflow/python/ops/ragged/ragged_one_hot_op_test.py
index 83a4cf3605c..c65f4b16870 100644
--- a/tensorflow/python/ops/ragged/ragged_one_hot_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_one_hot_op_test.py
@@ -127,7 +127,7 @@ class RaggedOneHotTest(test_util.TensorFlowTestCase, parameterized.TestCase):
                  ragged_rank=None):
     ragged_indices = ragged_factory_ops.constant(
         indices, ragged_rank=ragged_rank)
-    with self.assertRaisesRegexp(exception, message):
+    with self.assertRaisesRegex(exception, message):
       array_ops.one_hot(
           ragged_indices,
           depth,
diff --git a/tensorflow/python/ops/ragged/ragged_operators_test.py b/tensorflow/python/ops/ragged/ragged_operators_test.py
index 8c7adf76b94..ff936d630e6 100644
--- a/tensorflow/python/ops/ragged/ragged_operators_test.py
+++ b/tensorflow/python/ops/ragged/ragged_operators_test.py
@@ -87,11 +87,11 @@ class RaggedElementwiseOpsTest(test_util.TensorFlowTestCase):
 
   def testDummyOperators(self):
     a = ragged_factory_ops.constant([[True, True], [False]])
-    with self.assertRaisesRegexp(TypeError,
-                                 'RaggedTensor may not be used as a boolean.'):
+    with self.assertRaisesRegex(TypeError,
+                                'RaggedTensor may not be used as a boolean.'):
       bool(a)
-    with self.assertRaisesRegexp(TypeError,
-                                 'RaggedTensor may not be used as a boolean.'):
+    with self.assertRaisesRegex(TypeError,
+                                'RaggedTensor may not be used as a boolean.'):
       if a:
         pass
 
diff --git a/tensorflow/python/ops/ragged/ragged_range_op_test.py b/tensorflow/python/ops/ragged/ragged_range_op_test.py
index d01a15fde83..b655fd1ea84 100644
--- a/tensorflow/python/ops/ragged/ragged_range_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_range_op_test.py
@@ -108,8 +108,8 @@ class RaggedRangeOpTest(test_util.TensorFlowTestCase):
                       ragged_math_ops.range, [0], [1, 2])
 
   def testKernelErrors(self):
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 r'Requires delta != 0'):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                r'Requires delta != 0'):
       self.evaluate(ragged_math_ops.range(0, 0, 0))
 
   def testShape(self):
diff --git a/tensorflow/python/ops/ragged/ragged_reduce_op_test.py b/tensorflow/python/ops/ragged/ragged_reduce_op_test.py
index 9331bc6e14f..a39090fa3a2 100644
--- a/tensorflow/python/ops/ragged/ragged_reduce_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_reduce_op_test.py
@@ -353,11 +353,11 @@ class RaggedReduceOpsTest(test_util.TensorFlowTestCase,
     axis = array_ops.placeholder_with_default(constant_op.constant([0]), None)
 
     if not context.executing_eagerly():
-      self.assertRaisesRegexp(
-          ValueError, r'axis must be known at graph construction time.',
-          ragged_math_ops.reduce_sum, rt_input, axis)
-    self.assertRaisesRegexp(TypeError, r'axis must be an int; got str.*',
-                            ragged_math_ops.reduce_sum, rt_input, ['x'])
+      self.assertRaisesRegex(ValueError,
+                             r'axis must be known at graph construction time.',
+                             ragged_math_ops.reduce_sum, rt_input, axis)
+    self.assertRaisesRegex(TypeError, r'axis must be an int; got str.*',
+                           ragged_math_ops.reduce_sum, rt_input, ['x'])
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/ops/ragged/ragged_reverse_op_test.py b/tensorflow/python/ops/ragged/ragged_reverse_op_test.py
index c0bd40941ab..b4dafd15a2f 100644
--- a/tensorflow/python/ops/ragged/ragged_reverse_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_reverse_op_test.py
@@ -79,11 +79,10 @@ class RaggedReverseOpTest(test_util.TensorFlowTestCase,
     self.assertAllClose(result, expected)
 
   def testErrors(self):
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         TypeError, '`axis` must be a list of int or a constant tensor *',
         ragged_array_ops.reverse,
-        ragged_factory_ops.constant([[1], [2, 3]], ragged_rank=1),
-        [0, None])
+        ragged_factory_ops.constant([[1], [2, 3]], ragged_rank=1), [0, None])
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py b/tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py
index b2e26e7e856..9cb064e749f 100644
--- a/tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_row_lengths_op_test.py
@@ -138,7 +138,7 @@ class RaggedRowLengthsOp(test_util.TensorFlowTestCase,
   ])
   def testErrors(self, rt_input, exception, message=None, axis=1):
     rt = ragged_factory_ops.constant(rt_input)
-    with self.assertRaisesRegexp(exception, message):
+    with self.assertRaisesRegex(exception, message):
       rt.row_lengths(axis)
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py b/tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py
index de09f667977..c53ed3be32b 100644
--- a/tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_row_splits_to_segment_ids_op_test.py
@@ -39,16 +39,15 @@ class RaggedSplitsToSegmentIdsOpTest(test_util.TensorFlowTestCase):
     self.assertAllEqual(segment_ids, [])
 
   def testErrors(self):
-    self.assertRaisesRegexp(ValueError, r'Invalid row_splits: \[\]',
-                            segment_id_ops.row_splits_to_segment_ids, [])
-    self.assertRaisesRegexp(
-        ValueError, r'splits must have dtype int32 or int64',
-        segment_id_ops.row_splits_to_segment_ids,
-        constant_op.constant([0.5]))
-    self.assertRaisesRegexp(ValueError, r'Shape \(\) must have rank 1',
-                            segment_id_ops.row_splits_to_segment_ids, 0)
-    self.assertRaisesRegexp(ValueError, r'Shape \(1, 1\) must have rank 1',
-                            segment_id_ops.row_splits_to_segment_ids, [[0]])
+    self.assertRaisesRegex(ValueError, r'Invalid row_splits: \[\]',
+                           segment_id_ops.row_splits_to_segment_ids, [])
+    self.assertRaisesRegex(ValueError, r'splits must have dtype int32 or int64',
+                           segment_id_ops.row_splits_to_segment_ids,
+                           constant_op.constant([0.5]))
+    self.assertRaisesRegex(ValueError, r'Shape \(\) must have rank 1',
+                           segment_id_ops.row_splits_to_segment_ids, 0)
+    self.assertRaisesRegex(ValueError, r'Shape \(1, 1\) must have rank 1',
+                           segment_id_ops.row_splits_to_segment_ids, [[0]])
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/ops/ragged/ragged_segment_ids_to_row_splits_op_test.py b/tensorflow/python/ops/ragged/ragged_segment_ids_to_row_splits_op_test.py
index 717e401693e..16559a256d9 100644
--- a/tensorflow/python/ops/ragged/ragged_segment_ids_to_row_splits_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_segment_ids_to_row_splits_op_test.py
@@ -39,14 +39,14 @@ class RaggedSplitsToSegmentIdsOpTest(test_util.TensorFlowTestCase):
     self.assertAllEqual(segment_ids, [0])
 
   def testErrors(self):
-    self.assertRaisesRegexp(TypeError,
-                            r'segment_ids must be an integer tensor.*',
-                            segment_id_ops.segment_ids_to_row_splits,
-                            constant_op.constant([0.5]))
-    self.assertRaisesRegexp(ValueError, r'Shape \(\) must have rank 1',
-                            segment_id_ops.segment_ids_to_row_splits, 0)
-    self.assertRaisesRegexp(ValueError, r'Shape \(1, 1\) must have rank 1',
-                            segment_id_ops.segment_ids_to_row_splits, [[0]])
+    self.assertRaisesRegex(TypeError,
+                           r'segment_ids must be an integer tensor.*',
+                           segment_id_ops.segment_ids_to_row_splits,
+                           constant_op.constant([0.5]))
+    self.assertRaisesRegex(ValueError, r'Shape \(\) must have rank 1',
+                           segment_id_ops.segment_ids_to_row_splits, 0)
+    self.assertRaisesRegex(ValueError, r'Shape \(1, 1\) must have rank 1',
+                           segment_id_ops.segment_ids_to_row_splits, [[0]])
 
   def testNumSegments(self):
     segment_ids = [0, 0, 0, 2, 2, 3, 4, 4, 4]
diff --git a/tensorflow/python/ops/ragged/ragged_segment_op_test.py b/tensorflow/python/ops/ragged/ragged_segment_op_test.py
index d29708a5f5d..953e1076c2b 100644
--- a/tensorflow/python/ops/ragged/ragged_segment_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_segment_op_test.py
@@ -187,7 +187,7 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
   def testShapeMismatchError1(self):
     dt = constant_op.constant([1, 2, 3, 4, 5, 6])
     segment_ids = ragged_factory_ops.constant([[1, 2], []])
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         ValueError, 'segment_ids.shape must be a prefix of data.shape, '
         'but segment_ids is ragged and data is not.',
         ragged_math_ops.segment_sum, dt, segment_ids, 3)
@@ -202,7 +202,7 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
     segment_ids = ragged_factory_ops.constant([[1, 2], [1], [1, 1, 2], [2]])
 
     # Error is raised at graph-building time if we can detect it then.
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         errors.InvalidArgumentError,
         'segment_ids.shape must be a prefix of data.shape.*',
         ragged_math_ops.segment_sum, rt, segment_ids, 3)
@@ -211,7 +211,7 @@ class RaggedSegmentOpsTest(test_util.TensorFlowTestCase,
     segment_ids2 = ragged_tensor.RaggedTensor.from_row_splits(
         array_ops.placeholder_with_default(segment_ids.values, None),
         array_ops.placeholder_with_default(segment_ids.row_splits, None))
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         errors.InvalidArgumentError,
         'segment_ids.shape must be a prefix of data.shape.*'):
       self.evaluate(ragged_math_ops.segment_sum(rt, segment_ids2, 3))
diff --git a/tensorflow/python/ops/ragged/ragged_stack_op_test.py b/tensorflow/python/ops/ragged/ragged_stack_op_test.py
index e6931c41904..6e1db50e180 100644
--- a/tensorflow/python/ops/ragged/ragged_stack_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_stack_op_test.py
@@ -365,8 +365,8 @@ class RaggedStackOpTest(test_util.TensorFlowTestCase,
           message='axis=3 out of bounds: expected -3<=axis<3'),
   )
   def testError(self, rt_inputs, axis, error, message):
-    self.assertRaisesRegexp(error, message, ragged_concat_ops.stack, rt_inputs,
-                            axis)
+    self.assertRaisesRegex(error, message, ragged_concat_ops.stack, rt_inputs,
+                           axis)
 
   def testSingleTensorInput(self):
     """Tests ragged_stack with a single tensor input.
diff --git a/tensorflow/python/ops/ragged/ragged_tensor_shape_test.py b/tensorflow/python/ops/ragged/ragged_tensor_shape_test.py
index afe3390ce6d..7014226dc99 100644
--- a/tensorflow/python/ops/ragged/ragged_tensor_shape_test.py
+++ b/tensorflow/python/ops/ragged/ragged_tensor_shape_test.py
@@ -373,9 +373,8 @@ class RaggedTensorShapeTest(test_util.TensorFlowTestCase,
 
   def testRepr(self):
     shape = RaggedTensorDynamicShape.from_dim_sizes([2, (2, 1), 2, 1])
-    self.assertRegexpMatches(
-        repr(shape),
-        r'RaggedTensorDynamicShape\('
+    self.assertRegex(
+        repr(shape), r'RaggedTensorDynamicShape\('
         r'partitioned_dim_sizes=\(<[^>]+>, <[^>]+>\), '
         r'inner_dim_sizes=<[^>]+>\)')
 
diff --git a/tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py b/tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py
index 4804184b7ff..a69d2426966 100644
--- a/tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_to_sparse_op_test.py
@@ -146,7 +146,7 @@ class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):
     bad_rt1 = ragged_tensor.RaggedTensor.from_row_splits(
         row_splits=[2, 3], values=[1, 2, 3], validate=False)
     bad_split0 = r'First value of ragged splits must be 0.*'
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, bad_split0):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, bad_split0):
       self.evaluate(bad_rt1.to_sparse())
 
     bad_rt2 = ragged_tensor.RaggedTensor.from_row_splits(
@@ -158,8 +158,8 @@ class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):
         validate=False)
     split_mismatch1_error = r'Final value of ragged splits must match.*'
     for rt in [bad_rt2, bad_rt3]:
-      with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                   split_mismatch1_error):
+      with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                  split_mismatch1_error):
         self.evaluate(rt.to_sparse())
 
     bad_rt4 = ragged_tensor.RaggedTensor.from_row_splits(
@@ -168,15 +168,15 @@ class RaggedTensorToSparseOpTest(test_util.TensorFlowTestCase):
             row_splits=[0], values=empty_vector, validate=False),
         validate=False)
     split_mismatch2_error = r'Final value of ragged splits must match.*'
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 split_mismatch2_error):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                split_mismatch2_error):
       self.evaluate(bad_rt4.to_sparse())
 
     bad_rt5 = ragged_tensor.RaggedTensor.from_row_splits(
         row_splits=empty_vector, values=[], validate=False)
     empty_splits_error = (r'ragged splits may not be empty.*')
-    with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                 empty_splits_error):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                empty_splits_error):
       self.evaluate(bad_rt5.to_sparse())
 
   def testGradient(self):
diff --git a/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py b/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
index 83b36394cc4..a4fa2dc292f 100644
--- a/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py
@@ -396,11 +396,11 @@ class RaggedTensorToTensorOpTest(test_util.TensorFlowTestCase,
                 shape=None):
 
     rt = ragged_factory_ops.constant(rt_input, ragged_rank=ragged_rank)
-    with self.assertRaisesRegexp(error_type, error):
+    with self.assertRaisesRegex(error_type, error):
       self.evaluate(rt.to_tensor(default_value=default, shape=shape))
     rt_placeholder = nest.map_structure(
         make_placeholder, rt, expand_composites=True)
-    with self.assertRaisesRegexp(error_type, error):
+    with self.assertRaisesRegex(error_type, error):
       self.evaluate(
           rt_placeholder.to_tensor(default_value=default, shape=shape))
 
diff --git a/tensorflow/python/ops/ragged/ragged_util_test.py b/tensorflow/python/ops/ragged/ragged_util_test.py
index b4af8ef2c78..c2a4b3d7af1 100644
--- a/tensorflow/python/ops/ragged/ragged_util_test.py
+++ b/tensorflow/python/ops/ragged/ragged_util_test.py
@@ -220,7 +220,7 @@ class RaggedUtilTest(test_util.TensorFlowTestCase,
       data = array_ops.placeholder_with_default(data, None)
       repeats = array_ops.placeholder_with_default(repeats, None)
 
-    with self.assertRaisesRegexp(exception, error):
+    with self.assertRaisesRegex(exception, error):
       ragged_util.repeat(data, repeats, axis)
 
 
diff --git a/tensorflow/python/ops/ragged/ragged_where_op_test.py b/tensorflow/python/ops/ragged/ragged_where_op_test.py
index a0c6cfa5bed..4d5d6cd666a 100644
--- a/tensorflow/python/ops/ragged/ragged_where_op_test.py
+++ b/tensorflow/python/ops/ragged/ragged_where_op_test.py
@@ -205,7 +205,7 @@ class RaggedWhereOpTest(test_util.TensorFlowTestCase,
           message='Input shapes do not match.'),
   ])
   def testRaggedWhereErrors(self, condition, error, message, x=None, y=None):
-    with self.assertRaisesRegexp(error, message):
+    with self.assertRaisesRegex(error, message):
       ragged_where_op.where(condition, x, y)
 
 
diff --git a/tensorflow/python/ops/ragged/row_partition_test.py b/tensorflow/python/ops/ragged/row_partition_test.py
index d3662a53ca9..25f8f0d2e99 100644
--- a/tensorflow/python/ops/ragged/row_partition_test.py
+++ b/tensorflow/python/ops/ragged/row_partition_test.py
@@ -77,24 +77,23 @@ class RowPartitionTest(test_util.TensorFlowTestCase, parameterized.TestCase):
   def testRaggedTensorConstructionErrors(self):
     row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 'RaggedTensor constructor is private'):
+    with self.assertRaisesRegex(ValueError,
+                                'RaggedTensor constructor is private'):
       RowPartition(row_splits=row_splits)
 
-    with self.assertRaisesRegexp(TypeError,
-                                 'Row-partitioning argument must be a Tensor'):
+    with self.assertRaisesRegex(TypeError,
+                                'Row-partitioning argument must be a Tensor'):
       RowPartition(
           row_splits=[0, 2, 2, 5, 6, 7],
           internal=row_partition._row_partition_factory_key)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Shape \(6, 1\) must have rank 1'):
+    with self.assertRaisesRegex(ValueError, r'Shape \(6, 1\) must have rank 1'):
       RowPartition(
           row_splits=array_ops.expand_dims(row_splits, 1),
           internal=row_partition._row_partition_factory_key)
 
-    with self.assertRaisesRegexp(TypeError,
-                                 'Cached value must be a Tensor or None.'):
+    with self.assertRaisesRegex(TypeError,
+                                'Cached value must be a Tensor or None.'):
       RowPartition(
           row_splits=row_splits,
           row_lengths=[2, 3, 4],
@@ -202,7 +201,7 @@ class RowPartitionTest(test_util.TensorFlowTestCase, parameterized.TestCase):
 
   def testFromRowSplitsWithEmptySplits(self):
     err_msg = 'row_splits tensor may not be empty'
-    with self.assertRaisesRegexp(ValueError, err_msg):
+    with self.assertRaisesRegex(ValueError, err_msg):
       RowPartition.from_row_splits([])
 
   def testFromRowStarts(self):
@@ -280,27 +279,26 @@ class RowPartitionTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
     nrows = constant_op.constant(5, dtypes.int64)
 
-    with self.assertRaisesRegexp(ValueError, r'Expected nrows >= 0; got -2'):
+    with self.assertRaisesRegex(ValueError, r'Expected nrows >= 0; got -2'):
       RowPartition.from_value_rowids(
           value_rowids=array_ops.placeholder_with_default(value_rowids, None),
           nrows=-2)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=2, '
         r'value_rowids\[-1\]=4'):
       RowPartition.from_value_rowids(value_rowids=value_rowids, nrows=2)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=4, '
         r'value_rowids\[-1\]=4'):
       RowPartition.from_value_rowids(value_rowids=value_rowids, nrows=4)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 r'Shape \(7, 1\) must have rank 1'):
+    with self.assertRaisesRegex(ValueError, r'Shape \(7, 1\) must have rank 1'):
       RowPartition.from_value_rowids(
           value_rowids=array_ops.expand_dims(value_rowids, 1), nrows=nrows)
 
-    with self.assertRaisesRegexp(ValueError, r'Shape \(1,\) must have rank 0'):
+    with self.assertRaisesRegex(ValueError, r'Shape \(1,\) must have rank 0'):
       RowPartition.from_value_rowids(
           value_rowids=value_rowids, nrows=array_ops.expand_dims(nrows, 0))
 
@@ -626,9 +624,9 @@ class RowPartitionTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     # Errors that are caught by static shape checks.
     x = x()
     y = y()
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       x.merge_precomputed_encodings(y).row_splits()
-    with self.assertRaisesRegexp(ValueError, message):
+    with self.assertRaisesRegex(ValueError, message):
       y.merge_precomputed_encodings(x).row_splits()
 
   @parameterized.named_parameters([
@@ -662,9 +660,9 @@ class RowPartitionTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     # Errors that are caught by runtime value checks.
     x = x()
     y = y()
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, message):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, message):
       self.evaluate(x.merge_precomputed_encodings(y).row_splits())
-    with self.assertRaisesRegexp(errors.InvalidArgumentError, message):
+    with self.assertRaisesRegex(errors.InvalidArgumentError, message):
       self.evaluate(y.merge_precomputed_encodings(x).row_splits())
 
 
@@ -713,7 +711,7 @@ class RowPartitionSpecTest(test_util.TensorFlowTestCase,
                             uniform_row_length=None,
                             dtype=dtypes.int64,
                             error=None):
-    with self.assertRaisesRegexp(ValueError, error):
+    with self.assertRaisesRegex(ValueError, error):
       RowPartitionSpec(nrows, nvals, uniform_row_length, dtype)
 
   def testValueType(self):
@@ -841,7 +839,7 @@ class RowPartitionSpecTest(test_util.TensorFlowTestCase,
       (RowPartitionSpec(), RowPartitionSpec(dtype=dtypes.int32)),
   ])
   def testMostSpecificCompatibleTypeError(self, spec1, spec2):
-    with self.assertRaisesRegexp(ValueError, 'not compatible'):
+    with self.assertRaisesRegex(ValueError, 'not compatible'):
       spec1.most_specific_compatible_type(spec2)
 
   def testFromValue(self):
diff --git a/tensorflow/python/ops/ragged/string_ngrams_op_test.py b/tensorflow/python/ops/ragged/string_ngrams_op_test.py
index e0e22c4b384..fed4b1441d5 100644
--- a/tensorflow/python/ops/ragged/string_ngrams_op_test.py
+++ b/tensorflow/python/ops/ragged/string_ngrams_op_test.py
@@ -332,7 +332,7 @@ class StringNgramsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
                  preserve_short_sequences=False,
                  error=None,
                  exception=ValueError):
-    with self.assertRaisesRegexp(exception, error):
+    with self.assertRaisesRegex(exception, error):
       ragged_string_ops.ngrams(data, ngram_width, separator, pad_values,
                                padding_width, preserve_short_sequences)
 
@@ -343,7 +343,7 @@ class StringNgramsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
     def f(v):
       return ragged_string_ops.ngrams(v, 2)
 
-    with self.assertRaisesRegexp(ValueError, "Rank of data must be known."):
+    with self.assertRaisesRegex(ValueError, "Rank of data must be known."):
       f([b"foo", b"bar"])
 
 
diff --git a/tensorflow/python/ops/raw_ops_test.py b/tensorflow/python/ops/raw_ops_test.py
index fff94f5c25a..850e96bb9ed 100644
--- a/tensorflow/python/ops/raw_ops_test.py
+++ b/tensorflow/python/ops/raw_ops_test.py
@@ -34,12 +34,12 @@ class RawOpsTest(test.TestCase):
     self.assertEqual([2], self.evaluate(gen_math_ops.Add(x=x, y=x)))
 
   def testRequiresKwargs(self):
-    with self.assertRaisesRegexp(TypeError, "only takes keyword args"):
+    with self.assertRaisesRegex(TypeError, "only takes keyword args"):
       gen_math_ops.Add(1., 1.)
 
   def testRequiresKwargs_providesSuggestion(self):
     msg = "possible keys: \\['x', 'y', 'name'\\]"
-    with self.assertRaisesRegexp(TypeError, msg):
+    with self.assertRaisesRegex(TypeError, msg):
       gen_math_ops.Add(1., y=2.)
 
   def testName(self):
diff --git a/tensorflow/python/ops/structured/structured_tensor_slice_test.py b/tensorflow/python/ops/structured/structured_tensor_slice_test.py
index 0eaef216a01..7be99600fd2 100644
--- a/tensorflow/python/ops/structured/structured_tensor_slice_test.py
+++ b/tensorflow/python/ops/structured/structured_tensor_slice_test.py
@@ -256,7 +256,7 @@ class StructuredTensorSliceTest(test_util.TensorFlowTestCase,
   ])
   def testGetItemError(self, slice_spec, error, exception=ValueError):
     struct = structured_tensor.StructuredTensor.from_pyval(EXAMPLE_STRUCT)
-    with self.assertRaisesRegexp(exception, error):
+    with self.assertRaisesRegex(exception, error):
       struct.__getitem__(slice_spec)
 
   @parameterized.parameters([
@@ -266,7 +266,7 @@ class StructuredTensorSliceTest(test_util.TensorFlowTestCase,
   def testGetItemFromVectorError(self, slice_spec, error, exception=ValueError):
     struct = structured_tensor.StructuredTensor.from_pyval(
         EXAMPLE_STRUCT_VECTOR)
-    with self.assertRaisesRegexp(exception, error):
+    with self.assertRaisesRegex(exception, error):
       struct.__getitem__(slice_spec)
 
 
diff --git a/tensorflow/python/ops/structured/structured_tensor_spec_test.py b/tensorflow/python/ops/structured/structured_tensor_spec_test.py
index 3684c84d8f5..4637a1a51e5 100644
--- a/tensorflow/python/ops/structured/structured_tensor_spec_test.py
+++ b/tensorflow/python/ops/structured/structured_tensor_spec_test.py
@@ -90,7 +90,7 @@ class StructuredTensorSpecTest(test_util.TensorFlowTestCase,
        r'field_specs must be a dictionary with TypeSpec values\.'),
   ])
   def testConstructionErrors(self, shape, field_specs, error):
-    with self.assertRaisesRegexp(TypeError, error):
+    with self.assertRaisesRegex(TypeError, error):
       structured_tensor.StructuredTensorSpec(shape, field_specs)
 
   def testValueType(self):
diff --git a/tensorflow/python/ops/structured/structured_tensor_test.py b/tensorflow/python/ops/structured/structured_tensor_test.py
index 896bfff1296..75aa5a872a6 100644
--- a/tensorflow/python/ops/structured/structured_tensor_test.py
+++ b/tensorflow/python/ops/structured/structured_tensor_test.py
@@ -73,8 +73,8 @@ class StructuredTensorTest(test_util.TensorFlowTestCase,
         self.assertAllEqual(a_value, b_value, msg)
 
   def testConstructorIsPrivate(self):
-    with self.assertRaisesRegexp(ValueError,
-                                 "StructuredTensor constructor is private"):
+    with self.assertRaisesRegex(ValueError,
+                                "StructuredTensor constructor is private"):
       structured_tensor.StructuredTensor({}, (), None, ())
 
   @parameterized.named_parameters([
@@ -453,7 +453,7 @@ class StructuredTensorTest(test_util.TensorFlowTestCase,
       nrows = nrows()  # deferred construction.
     if callable(row_partitions):
       row_partitions = row_partitions()  # deferred construction.
-    with self.assertRaisesRegexp(err, msg):
+    with self.assertRaisesRegex(err, msg):
       struct = StructuredTensor.from_fields(
           fields=fields,
           shape=shape,
@@ -468,7 +468,7 @@ class StructuredTensorTest(test_util.TensorFlowTestCase,
     nrows = constant_op.constant(5)
     static_nrows = tensor_shape.Dimension(5)
     value = constant_op.constant([1, 2, 3])
-    with self.assertRaisesRegexp(ValueError, "fields have incompatible nrows"):
+    with self.assertRaisesRegex(ValueError, "fields have incompatible nrows"):
       structured_tensor._merge_nrows(nrows, static_nrows, value, dtypes.int32,
                                      validate=False)
 
@@ -538,12 +538,12 @@ class StructuredTensorTest(test_util.TensorFlowTestCase,
   def testPartitionOuterDimsErrors(self):
     st = StructuredTensor.from_fields({})
     partition = row_partition.RowPartition.from_row_splits([0])
-    with self.assertRaisesRegexp(ValueError,
-                                 r"Shape \(\) must have rank at least 1"):
+    with self.assertRaisesRegex(ValueError,
+                                r"Shape \(\) must have rank at least 1"):
       st.partition_outer_dimension(partition)
 
-    with self.assertRaisesRegexp(TypeError,
-                                 "row_partition must be a RowPartition"):
+    with self.assertRaisesRegex(TypeError,
+                                "row_partition must be a RowPartition"):
       st.partition_outer_dimension(10)
 
   @parameterized.named_parameters([
@@ -728,13 +728,13 @@ class StructuredTensorTest(test_util.TensorFlowTestCase,
 
   ])  # pyformat: disable
   def testFromPyvalError(self, pyval, err=ValueError, type_spec=None, msg=None):
-    with self.assertRaisesRegexp(err, msg):
+    with self.assertRaisesRegex(err, msg):
       structured_tensor.StructuredTensor.from_pyval(pyval, type_spec)
 
   def testToPyvalRequiresEagerMode(self):
     st = structured_tensor.StructuredTensor.from_pyval({"a": 5})
     if not context.executing_eagerly():
-      with self.assertRaisesRegexp(ValueError, "only supported in eager mode."):
+      with self.assertRaisesRegex(ValueError, "only supported in eager mode."):
         st.to_pyval()
 
   @parameterized.named_parameters([
@@ -915,7 +915,7 @@ class StructuredTensorTest(test_util.TensorFlowTestCase,
 
   def testMergeDimsError(self):
     st = StructuredTensor.from_pyval([[[{"a": 5}]]])
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r"Expected outer_axis \(2\) to be less than inner_axis \(1\)"):
       st.merge_dims(2, 1)
@@ -925,7 +925,7 @@ class StructuredTensorTest(test_util.TensorFlowTestCase,
     self.assertAllEqual(st.field_value(("a",)), 5)
     self.assertAllEqual(st.field_value(("b", "c")), [1, 2, 3])
     expected = "Field path \(.*a.*,.*b.*\) not found in .*"
-    with self.assertRaisesRegexp(KeyError, expected):
+    with self.assertRaisesRegex(KeyError, expected):
       st.field_value(("a", "b"))
 
   def testRepr(self):
diff --git a/tensorflow/python/profiler/pprof_profiler_test.py b/tensorflow/python/profiler/pprof_profiler_test.py
index 3f5bd9e79be..4ad3eb02469 100644
--- a/tensorflow/python/profiler/pprof_profiler_test.py
+++ b/tensorflow/python/profiler/pprof_profiler_test.py
@@ -40,10 +40,10 @@ class PprofProfilerTest(test.TestCase):
     graph.get_operations.return_value = []
 
     profiles = pprof_profiler.get_profiles(graph, run_metadata)
-    self.assertEquals(0, len(profiles))
+    self.assertEqual(0, len(profiles))
     profile_files = pprof_profiler.profile(
         graph, run_metadata, output_dir)
-    self.assertEquals(0, len(profile_files))
+    self.assertEqual(0, len(profile_files))
 
   def testRunMetadataEmpty(self):
     output_dir = test.get_temp_dir()
@@ -56,10 +56,10 @@ class PprofProfilerTest(test.TestCase):
     graph.get_operations.return_value = [op1]
 
     profiles = pprof_profiler.get_profiles(graph, run_metadata)
-    self.assertEquals(0, len(profiles))
+    self.assertEqual(0, len(profiles))
     profile_files = pprof_profiler.profile(
         graph, run_metadata, output_dir)
-    self.assertEquals(0, len(profile_files))
+    self.assertEqual(0, len(profile_files))
 
   def testValidProfile(self):
     output_dir = test.get_temp_dir()
@@ -123,18 +123,18 @@ comment: 9
 """
     # Test with protos
     profiles = pprof_profiler.get_profiles(graph, run_metadata)
-    self.assertEquals(1, len(profiles))
+    self.assertEqual(1, len(profiles))
     self.assertTrue('deviceA' in profiles)
-    self.assertEquals(expected_proto, str(profiles['deviceA']))
+    self.assertEqual(expected_proto, str(profiles['deviceA']))
     # Test with files
     profile_files = pprof_profiler.profile(
         graph, run_metadata, output_dir)
-    self.assertEquals(1, len(profile_files))
+    self.assertEqual(1, len(profile_files))
     with gzip.open(profile_files[0]) as profile_file:
       profile_contents = profile_file.read()
       profile = profile_pb2.Profile()
       profile.ParseFromString(profile_contents)
-      self.assertEquals(expected_proto, str(profile))
+      self.assertEqual(expected_proto, str(profile))
 
   @test_util.run_v1_only('b/120545219')
   def testProfileWithWhileLoop(self):
@@ -150,16 +150,16 @@ comment: 9
       r = control_flow_ops.while_loop(c, b, [i])
       sess.run(r, options=options, run_metadata=run_metadata)
       profiles = pprof_profiler.get_profiles(sess.graph, run_metadata)
-      self.assertEquals(1, len(profiles))
+      self.assertEqual(1, len(profiles))
       profile = next(iter(profiles.values()))
       add_samples = []  # Samples for the while/Add node
       for sample in profile.sample:
         if profile.string_table[sample.label[0].str] == 'while/Add':
           add_samples.append(sample)
       # Values for same nodes are aggregated.
-      self.assertEquals(1, len(add_samples))
+      self.assertEqual(1, len(add_samples))
       # Value of "count" should be equal to number of iterations.
-      self.assertEquals(num_iters, add_samples[0].value[0])
+      self.assertEqual(num_iters, add_samples[0].value[0])
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/profiler/tfprof_logger_test.py b/tensorflow/python/profiler/tfprof_logger_test.py
index caf3869f56d..9afdc7af131 100644
--- a/tensorflow/python/profiler/tfprof_logger_test.py
+++ b/tensorflow/python/profiler/tfprof_logger_test.py
@@ -54,10 +54,10 @@ class TFProfLoggerTest(test.TestCase):
     graph2 = ops.Graph()
     # Use copy_op_to_graph to remove shape information.
     y2 = copy_elements.copy_op_to_graph(y, graph2, [])
-    self.assertEquals('<unknown>', str(y2.get_shape()))
+    self.assertEqual('<unknown>', str(y2.get_shape()))
 
     tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
-    self.assertEquals('(2, 2)', str(y2.get_shape()))
+    self.assertEqual('(2, 2)', str(y2.get_shape()))
 
   def testFailedFillMissingShape(self):
     y = self._BuildSmallModel()
@@ -69,10 +69,10 @@ class TFProfLoggerTest(test.TestCase):
 
     graph2 = ops.Graph()
     y2 = copy_elements.copy_op_to_graph(y, graph2, [])
-    self.assertEquals('<unknown>', str(y2.get_shape()))
+    self.assertEqual('<unknown>', str(y2.get_shape()))
     # run_metadata has special name for MatMul, hence failed to fill shape.
     tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
-    self.assertEquals('<unknown>', str(y2.get_shape()))
+    self.assertEqual('<unknown>', str(y2.get_shape()))
   """
 
 
diff --git a/tensorflow/python/saved_model/load_test.py b/tensorflow/python/saved_model/load_test.py
index c392c7feb31..320182385f8 100644
--- a/tensorflow/python/saved_model/load_test.py
+++ b/tensorflow/python/saved_model/load_test.py
@@ -471,8 +471,8 @@ class LoadTest(test.TestCase, parameterized.TestCase):
 
     imported = cycle(root, cycles)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Could not find matching function to call"):
+    with self.assertRaisesRegex(ValueError,
+                                "Could not find matching function to call"):
       imported.f(input2)
 
     self.assertEqual(31, imported.f(input1).numpy())
@@ -547,8 +547,8 @@ class LoadTest(test.TestCase, parameterized.TestCase):
 
     imported = cycle(root, cycles)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Could not find matching function to call.*"):
+    with self.assertRaisesRegex(ValueError,
+                                "Could not find matching function to call.*"):
       imported.f(x, learning_rate=0.5, epochs=4)
 
     self.assertEqual(7, imported.f(x, learning_rate=0.5, epochs=3).numpy())
@@ -840,7 +840,7 @@ class LoadTest(test.TestCase, parameterized.TestCase):
 
     imported = cycle(root, cycles)
 
-    with self.assertRaisesRegexp(ValueError, "Python inputs incompatible"):
+    with self.assertRaisesRegex(ValueError, "Python inputs incompatible"):
       # We cannot call the function with a constant of shape ().
       imported.f(constant_op.constant(2)).numpy()
 
@@ -875,8 +875,8 @@ class LoadTest(test.TestCase, parameterized.TestCase):
 
     self.assertAllEqual([2, 4, 6, 8],
                         concrete(x=constant_op.constant([1, 2, 3, 4])).numpy())
-    with self.assertRaisesRegexp(ValueError,
-                                 "Could not find matching function to call"):
+    with self.assertRaisesRegex(ValueError,
+                                "Could not find matching function to call"):
       imported.f.get_concrete_function(
           tensor_spec.TensorSpec([None], dtypes.int32))
     imported.f.get_concrete_function(
@@ -1183,7 +1183,7 @@ class LoadTest(test.TestCase, parameterized.TestCase):
         signatures={"key": exported.f.get_concrete_function()})
     self.assertEqual(1., imported.signatures["key"]()["output_0"].numpy())
     imported.signatures = {"key1": imported.signatures["key"]}
-    with self.assertRaisesRegexp(ValueError, "signatures"):
+    with self.assertRaisesRegex(ValueError, "signatures"):
       save.save(imported, tempfile.mkdtemp(prefix=self.get_temp_dir()))
 
   def test_signature_loading(self, cycles):
@@ -1346,8 +1346,7 @@ class LoadTest(test.TestCase, parameterized.TestCase):
     self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True])
     self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False])
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Could not find matching function"):
+    with self.assertRaisesRegex(ValueError, "Could not find matching function"):
       root.f(["hello", 1.0])
 
   def test_prefer_specific_trace(self, cycles):
@@ -1568,8 +1567,8 @@ class LoadTest(test.TestCase, parameterized.TestCase):
 
     self.assertEqual(4.0, imported({"a": 3.0}).numpy())
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Could not find matching function to call"):
+    with self.assertRaisesRegex(ValueError,
+                                "Could not find matching function to call"):
       imported({"a": 2.0, "b": 3.0})
 
   def test_shapes_available(self, cycles):
@@ -1741,8 +1740,8 @@ class LoadTest(test.TestCase, parameterized.TestCase):
     del imported
 
     # Try to destroy the resource again, should fail.
-    with self.assertRaisesRegexp(errors.NotFoundError,
-                                 r"Resource .* does not exist."):
+    with self.assertRaisesRegex(errors.NotFoundError,
+                                r"Resource .* does not exist."):
       resource_variable_ops.destroy_resource_op(
           handle, ignore_lookup_error=False)
 
@@ -1859,9 +1858,8 @@ class SingleCycleTests(test.TestCase, parameterized.TestCase):
     self.assertEqual(5, self.evaluate(imported.a))
 
     root.a = variables.Variable(3.)
-    with self.assertRaisesRegexp(
-        ValueError,
-        "object has an attribute named a, which is reserved."):
+    with self.assertRaisesRegex(
+        ValueError, "object has an attribute named a, which is reserved."):
       save.save(root, path)
 
   def test_save_cached_variable(self):
diff --git a/tensorflow/python/saved_model/load_v1_in_v2_test.py b/tensorflow/python/saved_model/load_v1_in_v2_test.py
index 37b439fe649..bafeea128ed 100644
--- a/tensorflow/python/saved_model/load_v1_in_v2_test.py
+++ b/tensorflow/python/saved_model/load_v1_in_v2_test.py
@@ -182,7 +182,7 @@ class LoadTest(test.TestCase):
     return path
 
   def test_multi_meta_graph_loading(self):
-    with self.assertRaisesRegexp(ValueError, "2 MetaGraphs"):
+    with self.assertRaisesRegex(ValueError, "2 MetaGraphs"):
       load.load(self._v1_multi_metagraph_saved_model())
     first_imported = load.load(self._v1_multi_metagraph_saved_model(),
                                tags=["first"])
@@ -191,9 +191,9 @@ class LoadTest(test.TestCase):
                          first_start=constant_op.constant(2.))))
     second_imported = load.load(self._v1_multi_metagraph_saved_model(),
                                 tags=set(["second"]))
-    with self.assertRaisesRegexp(TypeError, "second_start"):
+    with self.assertRaisesRegex(TypeError, "second_start"):
       second_imported.signatures["second_key"](x=constant_op.constant(2.))
-    with self.assertRaisesRegexp(TypeError, "second_start"):
+    with self.assertRaisesRegex(TypeError, "second_start"):
       second_imported.signatures["second_key"](
           second_start=constant_op.constant(2.),
           x=constant_op.constant(2.))
@@ -424,7 +424,7 @@ class LoadTest(test.TestCase):
 
   def test_unfed_placeholder_exception(self):
     path = self._unfed_placeholder_signature()
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         lift_to_graph.UnliftableError,
         "signature needs an input for each placeholder.*\n\nUnable to lift"):
       load.load(path)
diff --git a/tensorflow/python/saved_model/loader_test.py b/tensorflow/python/saved_model/loader_test.py
index 3e27c0801cd..c1072664c3d 100644
--- a/tensorflow/python/saved_model/loader_test.py
+++ b/tensorflow/python/saved_model/loader_test.py
@@ -266,7 +266,7 @@ class SavedModelLoaderTest(test.TestCase, parameterized.TestCase):
     self.assertEqual(graph.get_tensor_by_name("y:0"), ret[0])
     self.assertEqual(graph.get_tensor_by_name("x:0"), ret[1])
 
-    with self.assertRaisesRegexp(ValueError, "not found in graph"):
+    with self.assertRaisesRegex(ValueError, "not found in graph"):
       loader.load_graph(graph, ["foo_graph"], return_elements=["z:0"])
 
 
diff --git a/tensorflow/python/saved_model/model_utils/export_output_test.py b/tensorflow/python/saved_model/model_utils/export_output_test.py
index 13bbeec38b5..8a3f107ce6c 100644
--- a/tensorflow/python/saved_model/model_utils/export_output_test.py
+++ b/tensorflow/python/saved_model/model_utils/export_output_test.py
@@ -39,26 +39,26 @@ class ExportOutputTest(test.TestCase):
   def test_regress_value_must_be_float(self):
     with context.graph_mode():
       value = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1')
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'Regression output value must be a float32 Tensor'):
         export_output_lib.RegressionOutput(value)
 
   def test_classify_classes_must_be_strings(self):
     with context.graph_mode():
       classes = array_ops.placeholder(dtypes.float32, 1, name='output-tensor-1')
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'Classification classes must be a string Tensor'):
         export_output_lib.ClassificationOutput(classes=classes)
 
   def test_classify_scores_must_be_float(self):
     with context.graph_mode():
       scores = array_ops.placeholder(dtypes.string, 1, name='output-tensor-1')
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, 'Classification scores must be a float32 Tensor'):
         export_output_lib.ClassificationOutput(scores=scores)
 
   def test_classify_requires_classes_or_scores(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, 'At least one of scores and classes must be set.'):
       export_output_lib.ClassificationOutput()
 
@@ -216,14 +216,12 @@ class ExportOutputTest(test.TestCase):
     export_output_lib.PredictOutput(constant_op.constant([0]))
 
   def test_predict_outputs_invalid(self):
-    with self.assertRaisesRegexp(
-        ValueError,
-        'Prediction output key must be a string'):
+    with self.assertRaisesRegex(ValueError,
+                                'Prediction output key must be a string'):
       export_output_lib.PredictOutput({1: constant_op.constant([0])})
 
-    with self.assertRaisesRegexp(
-        ValueError,
-        'Prediction output value must be a Tensor'):
+    with self.assertRaisesRegex(ValueError,
+                                'Prediction output value must be a Tensor'):
       export_output_lib.PredictOutput({
           'prediction1': sparse_tensor.SparseTensor(
               indices=[[0, 0]], values=[1], dense_shape=[1, 1]),
@@ -276,13 +274,13 @@ class SupervisedOutputTest(test.TestCase):
     self.assertIsNone(outputter.metrics)
 
   def test_supervised_outputs_invalid(self):
-    with self.assertRaisesRegexp(ValueError, 'predictions output value must'):
+    with self.assertRaisesRegex(ValueError, 'predictions output value must'):
       MockSupervisedOutput(constant_op.constant([0]), [3], None)
-    with self.assertRaisesRegexp(ValueError, 'loss output value must'):
+    with self.assertRaisesRegex(ValueError, 'loss output value must'):
       MockSupervisedOutput('str', None, None)
-    with self.assertRaisesRegexp(ValueError, 'metrics output value must'):
+    with self.assertRaisesRegex(ValueError, 'metrics output value must'):
       MockSupervisedOutput(None, None, (15.3, 4))
-    with self.assertRaisesRegexp(ValueError, 'loss output key must'):
+    with self.assertRaisesRegex(ValueError, 'loss output key must'):
       MockSupervisedOutput({25: 'Tensor'}, None, None)
 
   def test_supervised_outputs_tuples(self):
diff --git a/tensorflow/python/saved_model/model_utils/mode_keys_test.py b/tensorflow/python/saved_model/model_utils/mode_keys_test.py
index 26795ef8b16..b0777e6f0c0 100644
--- a/tensorflow/python/saved_model/model_utils/mode_keys_test.py
+++ b/tensorflow/python/saved_model/model_utils/mode_keys_test.py
@@ -39,7 +39,7 @@ class ModeKeyMapTest(test.TestCase):
       _ = mode_map[mode_keys.KerasModeKeys.TRAIN]
     with self.assertRaises(KeyError):
       _ = mode_map[mode_keys.EstimatorModeKeys.TRAIN]
-    with self.assertRaisesRegexp(ValueError, 'Invalid mode'):
+    with self.assertRaisesRegex(ValueError, 'Invalid mode'):
       _ = mode_map['serve']
 
     # Test common dictionary methods
@@ -54,7 +54,7 @@ class ModeKeyMapTest(test.TestCase):
       mode_map[mode_keys.KerasModeKeys.TEST] = 1
 
   def test_invalid_init(self):
-    with self.assertRaisesRegexp(ValueError, 'Multiple keys/values found'):
+    with self.assertRaisesRegex(ValueError, 'Multiple keys/values found'):
       _ = mode_keys.ModeKeyMap(**{
           mode_keys.KerasModeKeys.PREDICT: 3,
           mode_keys.EstimatorModeKeys.PREDICT: 1
diff --git a/tensorflow/python/saved_model/nested_structure_coder_test.py b/tensorflow/python/saved_model/nested_structure_coder_test.py
index c68bc1017ee..9951ea64a49 100644
--- a/tensorflow/python/saved_model/nested_structure_coder_test.py
+++ b/tensorflow/python/saved_model/nested_structure_coder_test.py
@@ -269,8 +269,8 @@ class NestedStructureTest(test.TestCase):
     encoded = struct_pb2.StructuredValue()
     encoded.type_spec_value.type_spec_class = 0
     encoded.type_spec_value.type_spec_class_name = "FutureTensorSpec"
-    with self.assertRaisesRegexp(
-        ValueError, "The type 'FutureTensorSpec' is not supported"):
+    with self.assertRaisesRegex(ValueError,
+                                "The type 'FutureTensorSpec' is not supported"):
       self._coder.decode_proto(encoded)
 
   def testEncodeDecodeBoundedTensorSpec(self):
diff --git a/tensorflow/python/saved_model/save_test.py b/tensorflow/python/saved_model/save_test.py
index 2b846923dfc..0755f11ff71 100644
--- a/tensorflow/python/saved_model/save_test.py
+++ b/tensorflow/python/saved_model/save_test.py
@@ -176,7 +176,7 @@ class SaveTest(test.TestCase):
       return nested_f()
 
     root.f = f
-    with self.assertRaisesRegexp(ValueError, "ERROR MSG"):
+    with self.assertRaisesRegex(ValueError, "ERROR MSG"):
       save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
 
   def test_version_information_included(self):
@@ -196,8 +196,7 @@ class SaveTest(test.TestCase):
     root.f = def_function.function(lambda x: 2. * x)
     root.f(constant_op.constant(1.))
     save_dir = os.path.join(self.get_temp_dir(), "saved_model")
-    with self.assertRaisesRegexp(
-        ValueError, "Expected a TensorFlow function"):
+    with self.assertRaisesRegex(ValueError, "Expected a TensorFlow function"):
       save.save(root, save_dir, root.f)
 
   def test_captures_unreachable_variable(self):
@@ -216,7 +215,7 @@ class SaveTest(test.TestCase):
 
     save_dir = os.path.join(self.get_temp_dir(), "saved_model")
 
-    with self.assertRaisesRegexp(KeyError, "not reachable from root"):
+    with self.assertRaisesRegex(KeyError, "not reachable from root"):
       save.save(root, save_dir)
 
   def test_nested_inputs(self):
@@ -233,8 +232,7 @@ class SaveTest(test.TestCase):
     root.f(constant_op.constant(1.))
     to_save = root.f.get_concrete_function(constant_op.constant(1.))
     save_dir = os.path.join(self.get_temp_dir(), "saved_model")
-    with self.assertRaisesRegexp(
-        ValueError, "non-flat outputs"):
+    with self.assertRaisesRegex(ValueError, "non-flat outputs"):
       save.save(root, save_dir, to_save)
 
   def test_nested_dict_outputs(self):
@@ -244,8 +242,8 @@ class SaveTest(test.TestCase):
     root.f(constant_op.constant(1.))
     to_save = root.f.get_concrete_function(constant_op.constant(1.))
     save_dir = os.path.join(self.get_temp_dir(), "saved_model")
-    with self.assertRaisesRegexp(
-        ValueError, "dictionary containing non-Tensor value"):
+    with self.assertRaisesRegex(ValueError,
+                                "dictionary containing non-Tensor value"):
       save.save(root, save_dir, to_save)
 
   def test_variable(self):
@@ -355,7 +353,7 @@ class SaveTest(test.TestCase):
   def test_signature_attribute_reserved(self):
     root = util.Checkpoint(signatures=variables.Variable(1.))
     save_dir = os.path.join(self.get_temp_dir(), "saved_model")
-    with self.assertRaisesRegexp(ValueError, "del obj.signatures"):
+    with self.assertRaisesRegex(ValueError, "del obj.signatures"):
       save.save(root, save_dir)
     del root.signatures
     save.save(root, save_dir)
@@ -395,8 +393,8 @@ class SaveTest(test.TestCase):
       return 1
     root = tracking.AutoTrackable()
     root.f = f.get_concrete_function()
-    with self.assertRaisesRegexp(ValueError,
-                                 "tf.Variable inputs cannot be exported"):
+    with self.assertRaisesRegex(ValueError,
+                                "tf.Variable inputs cannot be exported"):
       save.save(root, os.path.join(self.get_temp_dir(), "saved_model"),
                 signatures=root.f)
 
@@ -472,7 +470,7 @@ class SavingOptionsTest(test.TestCase):
     graph_def = graph_pb2.GraphDef()
     text_format.Merge("node { name: 'A' op: 'Test>CustomOp' }",
                       graph_def)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Attempted to save ops from non-whitelisted namespaces"):
       save._verify_ops(graph_def, [])
     save._verify_ops(graph_def, ["Test"])
@@ -480,7 +478,7 @@ class SavingOptionsTest(test.TestCase):
     # Test with multiple carrots in op name.
     text_format.Merge("node { name: 'A' op: 'Test>>A>CustomOp' }",
                       graph_def)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Attempted to save ops from non-whitelisted namespaces"):
       save._verify_ops(graph_def, [])
     save._verify_ops(graph_def, ["Test"])
@@ -622,7 +620,8 @@ class AssetTests(test.TestCase):
     @def_function.function
     def _calls_save():
       save.save(root, export_dir)
-    with self.assertRaisesRegexp(AssertionError, "tf.function"):
+
+    with self.assertRaisesRegex(AssertionError, "tf.function"):
       _calls_save()
 
 
diff --git a/tensorflow/python/saved_model/saved_model_test.py b/tensorflow/python/saved_model/saved_model_test.py
index c1662af607f..f998bbfce38 100644
--- a/tensorflow/python/saved_model/saved_model_test.py
+++ b/tensorflow/python/saved_model/saved_model_test.py
@@ -172,9 +172,8 @@ class SavedModelTest(SavedModelTestBase):
     export_dir = self._get_export_dir("test_bad_saved_model_file_format")
     # Attempt to load a SavedModel from an export directory that does not exist.
     with self.session(graph=ops.Graph()) as sess:
-      with self.assertRaisesRegexp(IOError,
-                                   "SavedModel file does not exist at: %s" %
-                                   export_dir):
+      with self.assertRaisesRegex(
+          IOError, "SavedModel file does not exist at: %s" % export_dir):
         loader.load(sess, ["foo"], export_dir)
 
     os.makedirs(export_dir)
@@ -183,8 +182,8 @@ class SavedModelTest(SavedModelTestBase):
     with open(path_to_pb, "w") as f:
       f.write("invalid content")
     with self.session(graph=ops.Graph()) as sess:
-      with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
-                                   constants.SAVED_MODEL_FILENAME_PB):
+      with self.assertRaisesRegex(
+          IOError, "Cannot parse file.*%s" % constants.SAVED_MODEL_FILENAME_PB):
         loader.load(sess, ["foo"], export_dir)
 
     # Cleanup the directory and start again.
@@ -197,8 +196,9 @@ class SavedModelTest(SavedModelTestBase):
     with open(path_to_pbtxt, "w") as f:
       f.write("invalid content")
     with self.session(graph=ops.Graph()) as sess:
-      with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
-                                   constants.SAVED_MODEL_FILENAME_PBTXT):
+      with self.assertRaisesRegex(
+          IOError,
+          "Cannot parse file.*%s" % constants.SAVED_MODEL_FILENAME_PBTXT):
         loader.load(sess, ["foo"], export_dir)
 
   @test_util.run_deprecated_v1
@@ -1310,7 +1310,7 @@ class SavedModelTest(SavedModelTestBase):
     # does not have any attr values for the "TestAttr" node, and there is no
     # default specified in the TestAttr OpDef.
     sess = session.Session(graph=ops.Graph())
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"):
       loader.load(sess, ["foo"], export_dir)
 
@@ -1334,9 +1334,8 @@ class SavedModelTest(SavedModelTestBase):
     # Loading the SavedModel via the loader must fail because there is no
     # OpKernel registered to handle T = double.
     sess = session.Session(graph=ops.Graph())
-    with self.assertRaisesRegexp(
-        errors.InvalidArgumentError,
-        "No OpKernel was registered.*DOUBLE"):
+    with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                "No OpKernel was registered.*DOUBLE"):
       loader.load(sess, ["foo"], export_dir)
 
 
@@ -1429,11 +1428,11 @@ class SavedModelV1Test(SavedModelTestBase):
       ops.add_to_collection(key, control_flow_ops.no_op())
       # ValueError should be raised since the LEGACY_INIT_OP_KEY collection
       # is not empty and we don't support multiple init ops.
-      with self.assertRaisesRegexp(ValueError, "Graph already contains"):
+      with self.assertRaisesRegex(ValueError, "Graph already contains"):
         builder.add_meta_graph_and_variables(
             sess, ["foo"], legacy_init_op=init_op)
       # We shouldn't be able to add as MAIN_OP, either.
-      with self.assertRaisesRegexp(ValueError, "Graph already contains"):
+      with self.assertRaisesRegex(ValueError, "Graph already contains"):
         builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op)
 
   def testStripDefaultAttrs(self):
diff --git a/tensorflow/python/saved_model/utils_test.py b/tensorflow/python/saved_model/utils_test.py
index 11d74b7c002..abc93c3455e 100644
--- a/tensorflow/python/saved_model/utils_test.py
+++ b/tensorflow/python/saved_model/utils_test.py
@@ -108,7 +108,7 @@ class UtilsTest(test.TestCase):
 
   def testBuildTensorInfoEager(self):
     x = constant_op.constant(1, name="x")
-    with context.eager_mode(), self.assertRaisesRegexp(
+    with context.eager_mode(), self.assertRaisesRegex(
         RuntimeError, "build_tensor_info is not supported in Eager mode"):
       utils.build_tensor_info(x)
 
diff --git a/tensorflow/python/summary/summary_test.py b/tensorflow/python/summary/summary_test.py
index 64f0f315c58..6dcafed721d 100644
--- a/tensorflow/python/summary/summary_test.py
+++ b/tensorflow/python/summary/summary_test.py
@@ -59,9 +59,9 @@ class SummaryTest(test.TestCase):
       i = constant_op.constant(7)
       with ops.name_scope('outer'):
         im1 = summary_lib.scalar('inner', i, family='family')
-        self.assertEquals(im1.op.name, 'outer/family/inner')
+        self.assertEqual(im1.op.name, 'outer/family/inner')
         im2 = summary_lib.scalar('inner', i, family='family')
-        self.assertEquals(im2.op.name, 'outer/family/inner_1')
+        self.assertEqual(im2.op.name, 'outer/family/inner_1')
       sm1, sm2 = s.run([im1, im2])
     summary = summary_pb2.Summary()
 
@@ -114,7 +114,7 @@ class SummaryTest(test.TestCase):
       i = array_ops.ones((5, 2, 3, 1))
       with ops.name_scope('outer'):
         im = summary_lib.image('inner', i, max_outputs=3, family='family')
-        self.assertEquals(im.op.name, 'outer/family/inner')
+        self.assertEqual(im.op.name, 'outer/family/inner')
       summary_str = s.run(im)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summary_str)
@@ -143,7 +143,7 @@ class SummaryTest(test.TestCase):
       i = array_ops.ones((5, 4, 4, 3))
       with ops.name_scope('outer'):
         summ_op = summary_lib.histogram('inner', i, family='family')
-        self.assertEquals(summ_op.op.name, 'outer/family/inner')
+        self.assertEqual(summ_op.op.name, 'outer/family/inner')
       summary_str = s.run(summ_op)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summary_str)
@@ -177,7 +177,7 @@ class SummaryTest(test.TestCase):
       i = array_ops.ones((5, 3, 4))
       with ops.name_scope('outer'):
         aud = summary_lib.audio('inner', i, 0.2, max_outputs=3, family='family')
-        self.assertEquals(aud.op.name, 'outer/family/inner')
+        self.assertEqual(aud.op.name, 'outer/family/inner')
       summary_str = s.run(aud)
     summary = summary_pb2.Summary()
     summary.ParseFromString(summary_str)
@@ -221,9 +221,9 @@ class SummaryTest(test.TestCase):
     with ops.name_scope('outer'):
       i = constant_op.constant(11)
       summ = summary_lib.scalar('inner', i)
-      self.assertEquals(summ.op.name, 'outer/inner')
+      self.assertEqual(summ.op.name, 'outer/inner')
       summ_f = summary_lib.scalar('inner', i, family='family')
-      self.assertEquals(summ_f.op.name, 'outer/family/inner')
+      self.assertEqual(summ_f.op.name, 'outer/family/inner')
 
     metagraph_def, _ = meta_graph.export_scoped_meta_graph(export_scope='outer')
 
@@ -239,11 +239,11 @@ class SummaryTest(test.TestCase):
         new_summ_str, new_summ_f_str = s.run([new_summ, new_summ_f])
         new_summ_pb = summary_pb2.Summary()
         new_summ_pb.ParseFromString(new_summ_str)
-        self.assertEquals('outer/inner', new_summ_pb.value[0].tag)
+        self.assertEqual('outer/inner', new_summ_pb.value[0].tag)
         new_summ_f_pb = summary_pb2.Summary()
         new_summ_f_pb.ParseFromString(new_summ_f_str)
-        self.assertEquals('family/outer/family/inner',
-                          new_summ_f_pb.value[0].tag)
+        self.assertEqual('family/outer/family/inner',
+                         new_summ_f_pb.value[0].tag)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/summary/writer/writer_test.py b/tensorflow/python/summary/writer/writer_test.py
index 2fec2c446f9..19138b1372d 100644
--- a/tensorflow/python/summary/writer/writer_test.py
+++ b/tensorflow/python/summary/writer/writer_test.py
@@ -81,12 +81,12 @@ class FileWriterTestBase(object):
     # The first event should list the file_version.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals("brain.Event:2", ev.file_version)
+    self.assertEqual("brain.Event:2", ev.file_version)
 
     # The next event should have the graph.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(0, ev.step)
+    self.assertEqual(0, ev.step)
     ev_graph = graph_pb2.GraphDef()
     ev_graph.ParseFromString(ev.graph_def)
     self.assertProtoEquals(g.as_graph_def(add_shapes=has_shapes), ev_graph)
@@ -94,7 +94,7 @@ class FileWriterTestBase(object):
     # The next event should have the metagraph.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(0, ev.step)
+    self.assertEqual(0, ev.step)
     ev_meta_graph = meta_graph_pb2.MetaGraphDef()
     ev_meta_graph.ParseFromString(ev.meta_graph_def)
     self.assertProtoEquals(meta_graph_def, ev_meta_graph)
@@ -132,18 +132,18 @@ class FileWriterTestBase(object):
     # The first event should list the file_version.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals("brain.Event:2", ev.file_version)
+    self.assertEqual("brain.Event:2", ev.file_version)
 
     # The next event should be the START message.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(1, ev.step)
-    self.assertEquals(SessionLog.START, ev.session_log.status)
+    self.assertEqual(1, ev.step)
+    self.assertEqual(SessionLog.START, ev.session_log.status)
 
     # The next event should have the value 'mee=10.0'.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(10, ev.step)
+    self.assertEqual(10, ev.step)
     self.assertProtoEquals("""
       value { tag: 'mee' simple_value: 10.0 }
       """, ev.summary)
@@ -151,7 +151,7 @@ class FileWriterTestBase(object):
     # The next event should have the value 'boo=20.0'.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(20, ev.step)
+    self.assertEqual(20, ev.step)
     self.assertProtoEquals("""
       value { tag: 'boo' simple_value: 20.0 }
       """, ev.summary)
@@ -159,7 +159,7 @@ class FileWriterTestBase(object):
     # The next event should have the graph_def.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(30, ev.step)
+    self.assertEqual(30, ev.step)
     ev_graph = graph_pb2.GraphDef()
     ev_graph.ParseFromString(ev.graph_def)
     self.assertProtoEquals(g.as_graph_def(add_shapes=True), ev_graph)
@@ -167,8 +167,8 @@ class FileWriterTestBase(object):
     # The next event should have metadata for the run.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(40, ev.step)
-    self.assertEquals("test run", ev.tagged_run_metadata.tag)
+    self.assertEqual(40, ev.step)
+    self.assertEqual("test run", ev.tagged_run_metadata.tag)
     parsed_run_metadata = config_pb2.RunMetadata()
     parsed_run_metadata.ParseFromString(ev.tagged_run_metadata.run_metadata)
     self.assertProtoEquals(run_metadata, parsed_run_metadata)
@@ -245,19 +245,19 @@ class FileWriterTestBase(object):
 
     # We should now have 2 events files.
     event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
-    self.assertEquals(2, len(event_paths))
+    self.assertEqual(2, len(event_paths))
 
     # Check the first file contents.
     rr = summary_iterator.summary_iterator(event_paths[0])
     # The first event should list the file_version.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals("brain.Event:2", ev.file_version)
+    self.assertEqual("brain.Event:2", ev.file_version)
     # The next event should be the START message.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(1, ev.step)
-    self.assertEquals(SessionLog.START, ev.session_log.status)
+    self.assertEqual(1, ev.step)
+    self.assertEqual(SessionLog.START, ev.session_log.status)
     # We should be done.
     self.assertRaises(StopIteration, lambda: next(rr))
 
@@ -266,12 +266,12 @@ class FileWriterTestBase(object):
     # The first event should list the file_version.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals("brain.Event:2", ev.file_version)
+    self.assertEqual("brain.Event:2", ev.file_version)
     # The next event should be the START message.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(2, ev.step)
-    self.assertEquals(SessionLog.START, ev.session_log.status)
+    self.assertEqual(2, ev.step)
+    self.assertEqual(SessionLog.START, ev.session_log.status)
     # We should be done.
     self.assertRaises(StopIteration, lambda: next(rr))
 
@@ -307,7 +307,7 @@ class FileWriterTestBase(object):
     with self._FileWriter(test_dir) as sw:
       sw.add_session_log(event_pb2.SessionLog(status=SessionLog.START), 1)
     event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
-    self.assertEquals(1, len(event_paths))
+    self.assertEqual(1, len(event_paths))
 
   # Checks that values returned from session Run() calls are added correctly to
   # summaries.  These are numpy types so we need to check they fit in the
@@ -336,13 +336,13 @@ class FileWriterTestBase(object):
     ev = next(rr)
     self.assertTrue(ev)
     self._assertRecent(ev.wall_time)
-    self.assertEquals("brain.Event:2", ev.file_version)
+    self.assertEqual("brain.Event:2", ev.file_version)
 
     # Summary passed serialized.
     ev = next(rr)
     self.assertTrue(ev)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(1, ev.step)
+    self.assertEqual(1, ev.step)
     self.assertProtoEquals("""
       value { tag: 'i' simple_value: 1.0 }
       """, ev.summary)
@@ -351,7 +351,7 @@ class FileWriterTestBase(object):
     ev = next(rr)
     self.assertTrue(ev)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(2, ev.step)
+    self.assertEqual(2, ev.step)
     self.assertProtoEquals("""
       value { tag: 'l' simple_value: 2.0 }
       """, ev.summary)
@@ -383,13 +383,13 @@ class FileWriterTestBase(object):
     # The first event should list the file_version.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals("brain.Event:2", ev.file_version)
+    self.assertEqual("brain.Event:2", ev.file_version)
 
     # The next event should be the START message.
     ev = next(rr)
     self._assertRecent(ev.wall_time)
-    self.assertEquals(1, ev.step)
-    self.assertEquals(SessionLog.START, ev.session_log.status)
+    self.assertEqual(1, ev.step)
+    self.assertEqual(SessionLog.START, ev.session_log.status)
 
     # This is the first event with tag foo. It should contain SummaryMetadata.
     ev = next(rr)
diff --git a/tensorflow/python/tools/saved_model_cli_test.py b/tensorflow/python/tools/saved_model_cli_test.py
index cc9e2f21ddc..0baca7fef55 100644
--- a/tensorflow/python/tools/saved_model_cli_test.py
+++ b/tensorflow/python/tools/saved_model_cli_test.py
@@ -605,7 +605,7 @@ Defined Functions:
         'regress_x_to_y', '--input_examples', 'inputs={"x":8.0,"x2":5.0}',
         '--outdir', output_dir
     ])
-    with self.assertRaisesRegexp(ValueError, 'must be a list'):
+    with self.assertRaisesRegex(ValueError, 'must be a list'):
       saved_model_cli.run(args)
 
   def testRunCommandInputExamplesFeatureValueNotListError(self):
@@ -617,7 +617,7 @@ Defined Functions:
         'regress_x_to_y', '--input_examples', 'inputs=[{"x":8.0,"x2":5.0}]',
         '--outdir', output_dir
     ])
-    with self.assertRaisesRegexp(ValueError, 'feature value must be a list'):
+    with self.assertRaisesRegex(ValueError, 'feature value must be a list'):
       saved_model_cli.run(args)
 
   def testRunCommandInputExamplesFeatureBadType(self):
@@ -629,7 +629,7 @@ Defined Functions:
         'regress_x_to_y', '--input_examples', 'inputs=[{"x":[[1],[2]]}]',
         '--outdir', output_dir
     ])
-    with self.assertRaisesRegexp(ValueError, 'is not supported'):
+    with self.assertRaisesRegex(ValueError, 'is not supported'):
       saved_model_cli.run(args)
 
   def testRunCommandOutputFileExistError(self):
@@ -725,7 +725,7 @@ Defined Functions:
          '--output_prefix', output_dir,
          '--cpp_class', 'Compiled',
          '--signature_def_key', 'MISSING'])
-    with self.assertRaisesRegexp(ValueError, 'Unable to find signature_def'):
+    with self.assertRaisesRegex(ValueError, 'Unable to find signature_def'):
       saved_model_cli.aot_compile_cpu(args)
 
   class AOTCompileDummyModel(tracking.AutoTrackable):
@@ -791,7 +791,7 @@ Defined Functions:
     ])  # Use the default seving signature_key.
     with test.mock.patch.object(logging, 'warn') as captured_warn:
       saved_model_cli.aot_compile_cpu(args)
-    self.assertRegexpMatches(
+    self.assertRegex(
         str(captured_warn.call_args),
         'Signature input key \'y\'.*has been pruned while freezing the graph.')
     self.assertTrue(file_io.file_exists('{}.o'.format(output_prefix)))
diff --git a/tensorflow/python/tools/saved_model_utils_test.py b/tensorflow/python/tools/saved_model_utils_test.py
index 5512dea1f74..b36862dfe44 100644
--- a/tensorflow/python/tools/saved_model_utils_test.py
+++ b/tensorflow/python/tools/saved_model_utils_test.py
@@ -59,7 +59,7 @@ class SavedModelUtilTest(test.TestCase):
 
   def testReadSavedModelInvalid(self):
     saved_model_dir = os.path.join(test.get_temp_dir(), "invalid_saved_model")
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         IOError, "SavedModel file does not exist at: %s" % saved_model_dir):
       saved_model_utils.read_saved_model(saved_model_dir)
 
diff --git a/tensorflow/python/tpu/feature_column_v2_test.py b/tensorflow/python/tpu/feature_column_v2_test.py
index 932fe4e5a0a..ba5ea41754e 100644
--- a/tensorflow/python/tpu/feature_column_v2_test.py
+++ b/tensorflow/python/tpu/feature_column_v2_test.py
@@ -411,7 +411,7 @@ class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
           embedding_lookup_device='cpu',
           tensor_core_shape=[None, 3])
     dense_features = fc_lib.DenseFeatures(embedding_column)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'.*embedding_lookup_device=\"cpu\" during training is not'):
       dense_features(input_features)
@@ -432,7 +432,7 @@ class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
     context = tpu._TPUInferenceContext('tpu_inference')
     context.Enter()
     dense_features = fc_lib.DenseFeatures(embedding_column)
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         r'Using embedding_lookup_device=tpu_embedding_core during inference is '
     ):
@@ -522,7 +522,7 @@ class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
       dense_features = fc_lib.DenseFeatures(embedding_column)
       # Sqrtn combiner not supported for now.
       if combiner == 'sqrtn':
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError, 'Dense TPU Embedding does not support combiner'):
           embedding_lookup = dense_features(input_features)
         return
@@ -633,8 +633,7 @@ class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
   def test_error_dense_shape_invalid(self):
     categorical_column_input = fc_lib.categorical_column_with_identity(
         key='inp', num_buckets=5)
-    with self.assertRaisesRegexp(ValueError,
-                                 'tensor_core_shape must be size 2'):
+    with self.assertRaisesRegex(ValueError, 'tensor_core_shape must be size 2'):
       tpu_fc.shared_embedding_columns_v2([categorical_column_input],
                                          dimension=20,
                                          tensor_core_shape=[None, 20, 15])
diff --git a/tensorflow/python/tpu/tpu_test.py b/tensorflow/python/tpu/tpu_test.py
index beaa17715ed..c1a7e4dae92 100644
--- a/tensorflow/python/tpu/tpu_test.py
+++ b/tensorflow/python/tpu/tpu_test.py
@@ -130,12 +130,12 @@ class TPUGraphPruneTest(test.TestCase):
           tpu._TPU_REPLICATE_ATTR)
       self.assertEqual(b"0", x)
       # Verify that ops "b" and "y" have TPU_REPLICATE_ATTR removed.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "Operation \'import/b\' has no attr named \'_tpu_replicate\'"):
         graph.get_operation_by_name("import/b").get_attr(
             tpu._TPU_REPLICATE_ATTR)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError,
           "Operation \'import/y\' has no attr named \'_tpu_replicate\'"):
         graph.get_operation_by_name("import/y").get_attr(
diff --git a/tensorflow/python/training/adadelta_test.py b/tensorflow/python/training/adadelta_test.py
index 5bc2937e144..9b84a2efb65 100644
--- a/tensorflow/python/training/adadelta_test.py
+++ b/tensorflow/python/training/adadelta_test.py
@@ -86,19 +86,19 @@ class AdadeltaOptimizerTest(test.TestCase):
             self.assertEqual(["accum", "accum_update"],
                              adadelta_opt.get_slot_names())
             slot[0] = adadelta_opt.get_slot(var0, "accum")
-            self.assertEquals(slot[0].get_shape(), var0.get_shape())
+            self.assertEqual(slot[0].get_shape(), var0.get_shape())
             self.assertFalse(slot[0] in variables.trainable_variables())
 
             slot_update[0] = adadelta_opt.get_slot(var0, "accum_update")
-            self.assertEquals(slot_update[0].get_shape(), var0.get_shape())
+            self.assertEqual(slot_update[0].get_shape(), var0.get_shape())
             self.assertFalse(slot_update[0] in variables.trainable_variables())
 
             slot[1] = adadelta_opt.get_slot(var1, "accum")
-            self.assertEquals(slot[1].get_shape(), var1.get_shape())
+            self.assertEqual(slot[1].get_shape(), var1.get_shape())
             self.assertFalse(slot[1] in variables.trainable_variables())
 
             slot_update[1] = adadelta_opt.get_slot(var1, "accum_update")
-            self.assertEquals(slot_update[1].get_shape(), var1.get_shape())
+            self.assertEqual(slot_update[1].get_shape(), var1.get_shape())
             self.assertFalse(slot_update[1] in variables.trainable_variables())
 
           # Fetch params to validate initial values
diff --git a/tensorflow/python/training/adagrad_test.py b/tensorflow/python/training/adagrad_test.py
index 4c0ee1c66f5..60cef8a17bb 100644
--- a/tensorflow/python/training/adagrad_test.py
+++ b/tensorflow/python/training/adagrad_test.py
@@ -286,9 +286,9 @@ class AdagradOptimizerTest(test.TestCase):
             zip([grads0, grads1], [var0, var1]))
         self.assertEqual(["accumulator"], ada_opt.get_slot_names())
         slot0 = ada_opt.get_slot(var0, "accumulator")
-        self.assertEquals(slot0.get_shape(), var0.get_shape())
+        self.assertEqual(slot0.get_shape(), var0.get_shape())
         slot1 = ada_opt.get_slot(var1, "accumulator")
-        self.assertEquals(slot1.get_shape(), var1.get_shape())
+        self.assertEqual(slot1.get_shape(), var1.get_shape())
         self.evaluate(variables.global_variables_initializer())
 
         # Fetch params to validate initial values.
diff --git a/tensorflow/python/training/basic_loops_test.py b/tensorflow/python/training/basic_loops_test.py
index 511a8334d56..748116331dc 100644
--- a/tensorflow/python/training/basic_loops_test.py
+++ b/tensorflow/python/training/basic_loops_test.py
@@ -71,7 +71,7 @@ class BasicTrainLoopTest(test.TestCase):
     train_fn.counter = 0
 
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(RuntimeError, "Failed"):
+      with self.assertRaisesRegex(RuntimeError, "Failed"):
         basic_loops.basic_train_loop(sv, train_fn)
 
   @test_util.run_deprecated_v1
@@ -96,9 +96,9 @@ class BasicTrainLoopTest(test.TestCase):
 
     with ops.Graph().as_default():
       aar = AbortAndRetry()
-      with self.assertRaisesRegexp(RuntimeError, "Failed Again"):
+      with self.assertRaisesRegex(RuntimeError, "Failed Again"):
         basic_loops.basic_train_loop(sv, aar.train_fn)
-      self.assertEquals(0, aar.retries_left)
+      self.assertEqual(0, aar.retries_left)
 
 
 if __name__ == "__main__":
diff --git a/tensorflow/python/training/basic_session_run_hooks_test.py b/tensorflow/python/training/basic_session_run_hooks_test.py
index 678fea89f9e..9acce5e61aa 100644
--- a/tensorflow/python/training/basic_session_run_hooks_test.py
+++ b/tensorflow/python/training/basic_session_run_hooks_test.py
@@ -233,14 +233,14 @@ class LoggingTensorHookTest(test.TestCase):
     tf_logging.info = self._actual_log
 
   def test_illegal_args(self):
-    with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
+    with self.assertRaisesRegex(ValueError, 'nvalid every_n_iter'):
       basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=0)
-    with self.assertRaisesRegexp(ValueError, 'nvalid every_n_iter'):
+    with self.assertRaisesRegex(ValueError, 'nvalid every_n_iter'):
       basic_session_run_hooks.LoggingTensorHook(tensors=['t'], every_n_iter=-10)
-    with self.assertRaisesRegexp(ValueError, 'xactly one of'):
+    with self.assertRaisesRegex(ValueError, 'xactly one of'):
       basic_session_run_hooks.LoggingTensorHook(
           tensors=['t'], every_n_iter=5, every_n_secs=5)
-    with self.assertRaisesRegexp(ValueError, 'xactly one of'):
+    with self.assertRaisesRegex(ValueError, 'xactly one of'):
       basic_session_run_hooks.LoggingTensorHook(tensors=['t'])
 
   def test_print_at_end_only(self):
@@ -259,7 +259,7 @@ class LoggingTensorHookTest(test.TestCase):
         self.assertEqual(str(self.logged_message).find(t.name), -1)
 
       hook.end(sess)
-      self.assertRegexpMatches(str(self.logged_message), t.name)
+      self.assertRegex(str(self.logged_message), t.name)
 
   def _validate_print_every_n_steps(self, sess, at_end):
     t = constant_op.constant(42.0, name='foo')
@@ -271,7 +271,7 @@ class LoggingTensorHookTest(test.TestCase):
     mon_sess = monitored_session._HookedSession(sess, [hook])
     self.evaluate(variables_lib.global_variables_initializer())
     mon_sess.run(train_op)
-    self.assertRegexpMatches(str(self.logged_message), t.name)
+    self.assertRegex(str(self.logged_message), t.name)
     for _ in range(3):
       self.logged_message = ''
       for _ in range(9):
@@ -279,7 +279,7 @@ class LoggingTensorHookTest(test.TestCase):
         # assertNotRegexpMatches is not supported by python 3.1 and later
         self.assertEqual(str(self.logged_message).find(t.name), -1)
       mon_sess.run(train_op)
-      self.assertRegexpMatches(str(self.logged_message), t.name)
+      self.assertRegex(str(self.logged_message), t.name)
 
     # Add additional run to verify proper reset when called multiple times.
     self.logged_message = ''
@@ -290,7 +290,7 @@ class LoggingTensorHookTest(test.TestCase):
     self.logged_message = ''
     hook.end(sess)
     if at_end:
-      self.assertRegexpMatches(str(self.logged_message), t.name)
+      self.assertRegex(str(self.logged_message), t.name)
     else:
       # assertNotRegexpMatches is not supported by python 3.1 and later
       self.assertEqual(str(self.logged_message).find(t.name), -1)
@@ -318,7 +318,7 @@ class LoggingTensorHookTest(test.TestCase):
       mon_sess = monitored_session._HookedSession(sess, [hook])
       self.evaluate(variables_lib.global_variables_initializer())
       mon_sess.run(train_op)
-      self.assertRegexpMatches(str(self.logged_message), 'foo')
+      self.assertRegex(str(self.logged_message), 'foo')
       # in first run, elapsed time is None.
       self.assertEqual(str(self.logged_message).find('sec'), -1)
 
@@ -333,7 +333,7 @@ class LoggingTensorHookTest(test.TestCase):
     self.evaluate(variables_lib.global_variables_initializer())
 
     mon_sess.run(train_op)
-    self.assertRegexpMatches(str(self.logged_message), t.name)
+    self.assertRegex(str(self.logged_message), t.name)
 
     # assertNotRegexpMatches is not supported by python 3.1 and later
     self.logged_message = ''
@@ -343,12 +343,12 @@ class LoggingTensorHookTest(test.TestCase):
 
     self.logged_message = ''
     mon_sess.run(train_op)
-    self.assertRegexpMatches(str(self.logged_message), t.name)
+    self.assertRegex(str(self.logged_message), t.name)
 
     self.logged_message = ''
     hook.end(sess)
     if at_end:
-      self.assertRegexpMatches(str(self.logged_message), t.name)
+      self.assertRegex(str(self.logged_message), t.name)
     else:
       # assertNotRegexpMatches is not supported by python 3.1 and later
       self.assertEqual(str(self.logged_message).find(t.name), -1)
@@ -1070,9 +1070,8 @@ class StepCounterHookTest(test.TestCase):
       with test.mock.patch.object(tf_logging, 'log_first_n') as mock_log:
         for _ in range(30):
           mon_sess.run(train_op)
-        self.assertRegexpMatches(
-            str(mock_log.call_args),
-            'global step.*has not been increased')
+        self.assertRegex(
+            str(mock_log.call_args), 'global step.*has not been increased')
       hook.end(sess)
 
   def _setup_steps_per_run_test(self,
@@ -1422,12 +1421,11 @@ class FinalOpsHookTest(test.TestCase):
       with session_lib.Session() as session:
         session.run(read_ops)
         with test.mock.patch.object(tf_logging, 'warning') as mock_log:
-          with self.assertRaisesRegexp(errors.OutOfRangeError,
-                                       'End of sequence'):
+          with self.assertRaisesRegex(errors.OutOfRangeError,
+                                      'End of sequence'):
             hook.end(session)
-          self.assertRegexpMatches(
-              str(mock_log.call_args),
-              'dependency back to some input source')
+          self.assertRegex(
+              str(mock_log.call_args), 'dependency back to some input source')
 
   def test_final_ops_with_dictionary(self):
     with ops.Graph().as_default():
diff --git a/tensorflow/python/training/checkpoint_management_test.py b/tensorflow/python/training/checkpoint_management_test.py
index 34666e32ab6..f8c45306168 100644
--- a/tensorflow/python/training/checkpoint_management_test.py
+++ b/tensorflow/python/training/checkpoint_management_test.py
@@ -80,7 +80,7 @@ class LatestCheckpointWithRelativePaths(test.TestCase):
 
           # Should fail.
           saver = saver_module.Saver(sharded=False)
-          with self.assertRaisesRegexp(ValueError, "collides with"):
+          with self.assertRaisesRegex(ValueError, "collides with"):
             saver.save(sess, filepath)
 
           # Succeeds: the file will be named "checkpoint-<step>".
@@ -507,7 +507,7 @@ class CheckpointManagerTest(test.TestCase):
     with test.mock.patch.object(logging, "warning") as mock_log:
       second_manager = checkpoint_management.CheckpointManager(
           checkpoint, directory, max_to_keep=1)
-      self.assertRegexpMatches(
+      self.assertRegex(
           str(mock_log.call_args),
           "behind the last preserved checkpoint timestamp")
     # We should err on the side of keeping checkpoints around when we're not
diff --git a/tensorflow/python/training/coordinator_test.py b/tensorflow/python/training/coordinator_test.py
index f294df97b4b..4795ae5a7a4 100644
--- a/tensorflow/python/training/coordinator_test.py
+++ b/tensorflow/python/training/coordinator_test.py
@@ -154,7 +154,7 @@ class CoordinatorTest(test.TestCase):
         t.start()
       wait_for_stop_ev.set()
       has_stopped_ev.wait()
-      with self.assertRaisesRegexp(RuntimeError, "threads still running"):
+      with self.assertRaisesRegex(RuntimeError, "threads still running"):
         coord.join(threads, stop_grace_period_secs=stop_grace_period)
 
     TestWithGracePeriod(1e-10)
@@ -194,7 +194,7 @@ class CoordinatorTest(test.TestCase):
 
     ev_1.set()
 
-    with self.assertRaisesRegexp(RuntimeError, "First"):
+    with self.assertRaisesRegex(RuntimeError, "First"):
       coord.join(threads)
 
   def testJoinRaiseReportException(self):
@@ -213,7 +213,7 @@ class CoordinatorTest(test.TestCase):
       t.start()
 
     ev_1.set()
-    with self.assertRaisesRegexp(RuntimeError, "First"):
+    with self.assertRaisesRegex(RuntimeError, "First"):
       coord.join(threads)
 
   def testJoinIgnoresOutOfRange(self):
@@ -261,7 +261,7 @@ class CoordinatorTest(test.TestCase):
       t.start()
 
     ev_1.set()
-    with self.assertRaisesRegexp(RuntimeError, "First"):
+    with self.assertRaisesRegex(RuntimeError, "First"):
       coord.join(threads)
 
   def testClearStopClearsExceptionToo(self):
@@ -275,7 +275,7 @@ class CoordinatorTest(test.TestCase):
     for t in threads:
       t.start()
 
-    with self.assertRaisesRegexp(RuntimeError, "First"):
+    with self.assertRaisesRegex(RuntimeError, "First"):
       ev_1.set()
       coord.join(threads)
     coord.clear_stop()
@@ -286,7 +286,7 @@ class CoordinatorTest(test.TestCase):
     ]
     for t in threads:
       t.start()
-    with self.assertRaisesRegexp(RuntimeError, "Second"):
+    with self.assertRaisesRegex(RuntimeError, "Second"):
       ev_1.set()
       coord.join(threads)
 
@@ -295,7 +295,7 @@ class CoordinatorTest(test.TestCase):
     # Join the coordinator right away.
     coord.join([])
     reported = False
-    with self.assertRaisesRegexp(RuntimeError, "Too late"):
+    with self.assertRaisesRegex(RuntimeError, "Too late"):
       try:
         raise RuntimeError("Too late")
       except RuntimeError as e:
@@ -308,7 +308,7 @@ class CoordinatorTest(test.TestCase):
       raise RuntimeError("After clear")
     except RuntimeError as e:
       coord.request_stop(e)
-    with self.assertRaisesRegexp(RuntimeError, "After clear"):
+    with self.assertRaisesRegex(RuntimeError, "After clear"):
       coord.join([])
 
   def testRequestStopRaisesIfJoined_ExcInfo(self):
@@ -317,7 +317,7 @@ class CoordinatorTest(test.TestCase):
     # Join the coordinator right away.
     coord.join([])
     reported = False
-    with self.assertRaisesRegexp(RuntimeError, "Too late"):
+    with self.assertRaisesRegex(RuntimeError, "Too late"):
       try:
         raise RuntimeError("Too late")
       except RuntimeError:
@@ -330,7 +330,7 @@ class CoordinatorTest(test.TestCase):
       raise RuntimeError("After clear")
     except RuntimeError:
       coord.request_stop(sys.exc_info())
-    with self.assertRaisesRegexp(RuntimeError, "After clear"):
+    with self.assertRaisesRegex(RuntimeError, "After clear"):
       coord.join([])
 
 
diff --git a/tensorflow/python/training/experimental/loss_scale_optimizer_test.py b/tensorflow/python/training/experimental/loss_scale_optimizer_test.py
index 7e4e6983905..46d71fd8cbb 100644
--- a/tensorflow/python/training/experimental/loss_scale_optimizer_test.py
+++ b/tensorflow/python/training/experimental/loss_scale_optimizer_test.py
@@ -310,7 +310,7 @@ class MixedPrecisionLossScaleOptimizerTest(test.TestCase,
 
   def testPassingNoneToLossScale(self):
     opt = gradient_descent.GradientDescentOptimizer(1.0)
-    with self.assertRaisesRegexp(ValueError, r'loss_scale cannot be None'):
+    with self.assertRaisesRegex(ValueError, r'loss_scale cannot be None'):
       loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(opt, None)
 
 
diff --git a/tensorflow/python/training/experimental/loss_scaling_gradient_tape_test.py b/tensorflow/python/training/experimental/loss_scaling_gradient_tape_test.py
index bdac125ee82..5c6b4d71649 100644
--- a/tensorflow/python/training/experimental/loss_scaling_gradient_tape_test.py
+++ b/tensorflow/python/training/experimental/loss_scaling_gradient_tape_test.py
@@ -214,7 +214,7 @@ class LossScaleGradientTapeTest(test.TestCase, parameterized.TestCase):
       y = x * x
       z = y * y
     g.gradient(z, x)
-    with self.assertRaisesRegexp(RuntimeError, 'persistent'):
+    with self.assertRaisesRegex(RuntimeError, 'persistent'):
       g.gradient(y, x)
 
   @test_combinations.generate(test_combinations.combine(
@@ -512,7 +512,7 @@ class LossScaleGradientTapeTest(test.TestCase, parameterized.TestCase):
       self.assertAllEqual(dy_dx, np.full((2, 3), 2.))
 
   def test_passing_non_loss_scale_raises_error(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         '`loss_scale` must be an instance of LossScale, but got: 2.0'):
       lsgt.LossScaleGradientTape(2.0)
@@ -522,7 +522,7 @@ class LossScaleGradientTapeTest(test.TestCase, parameterized.TestCase):
     x = variables.Variable([1.0, 2.0])
     with lsgt.LossScaleGradientTape(loss_scale) as g:
       y = x * 2
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         NotImplementedError,
         'LossScaleGradientTape.jacobian is not yet implemented'):
       g.jacobian(y, x)
@@ -530,7 +530,7 @@ class LossScaleGradientTapeTest(test.TestCase, parameterized.TestCase):
     x = variables.Variable([[1.0, 2.0], [3.0, 4.0]])
     with lsgt.LossScaleGradientTape(loss_scale) as g:
       y = x * 2
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         NotImplementedError,
         'LossScaleGradientTape.batch_jacobian is not yet implemented'):
       g.batch_jacobian(y, x)
diff --git a/tensorflow/python/training/experimental/mixed_precision_test.py b/tensorflow/python/training/experimental/mixed_precision_test.py
index 2ce93245413..c3b7b94b8c8 100644
--- a/tensorflow/python/training/experimental/mixed_precision_test.py
+++ b/tensorflow/python/training/experimental/mixed_precision_test.py
@@ -85,7 +85,7 @@ class MixedPrecisionTest(test.TestCase, parameterized.TestCase):
     else:
       expected_regex = ('"opt" must be an instance of a tf.train.Optimizer or '
                         'a tf.keras.optimizers.Optimizer, but got')
-    with self.assertRaisesRegexp(ValueError, expected_regex):
+    with self.assertRaisesRegex(ValueError, expected_regex):
       enable_mixed_precision_graph_rewrite(opt)
     self.assertFalse(config.get_optimizer_experimental_options()
                      .get('auto_mixed_precision', False))
@@ -93,9 +93,9 @@ class MixedPrecisionTest(test.TestCase, parameterized.TestCase):
     opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
     opt = loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
                                                                    'dynamic')
-    with self.assertRaisesRegexp(ValueError,
-                                 '"opt" must not already be an instance of a '
-                                 'MixedPrecisionLossScaleOptimizer.'):
+    with self.assertRaisesRegex(
+        ValueError, '"opt" must not already be an instance of a '
+        'MixedPrecisionLossScaleOptimizer.'):
       enable_mixed_precision_graph_rewrite(opt)
     self.assertFalse(config.get_optimizer_experimental_options()
                      .get('auto_mixed_precision', False))
diff --git a/tensorflow/python/training/input_test.py b/tensorflow/python/training/input_test.py
index e874aaa3fa8..3dc889a7895 100644
--- a/tensorflow/python/training/input_test.py
+++ b/tensorflow/python/training/input_test.py
@@ -148,7 +148,7 @@ class InputProducerTest(test_lib.TestCase):
   @test_util.run_deprecated_v1
   def testShapeError(self):
     input_tensor = array_ops.placeholder(dtypes.float32, None)
-    with self.assertRaisesRegexp(ValueError, "fully defined shape"):
+    with self.assertRaisesRegex(ValueError, "fully defined shape"):
       _ = inp.input_producer(input_tensor)
 
 
@@ -268,7 +268,7 @@ class StringInputProducerTest(test_lib.TestCase):
           # writing of the `tf.Graph` object. However, many users
           # write code this way, so we include this test to ensure
           # that we can support it.
-          self.assertEquals(string, self.evaluate(queue.dequeue()))
+          self.assertEqual(string, self.evaluate(queue.dequeue()))
       coord.request_stop()
       coord.join(threads)
 
@@ -440,23 +440,23 @@ class DictHelperTest(test_lib.TestCase):
   def testListInputs(self):
     l = [1, 2, 3, 11, 22, 33]
     l2 = inp._as_tensor_list(l)
-    self.assertEquals(l, l2)
+    self.assertEqual(l, l2)
     l3 = inp._as_original_type(l, l2)
-    self.assertEquals(l, l3)
+    self.assertEqual(l, l3)
 
   def testDictInputs(self):
     d = {"a": 1, "b": 2, "c": 3, "aa": 11, "bb": 22, "cc": 33}
     l = inp._as_tensor_list(d)
-    self.assertEquals([1, 11, 2, 22, 3, 33], l)
+    self.assertEqual([1, 11, 2, 22, 3, 33], l)
     d2 = inp._as_original_type(d, l)
-    self.assertEquals(d, d2)
+    self.assertEqual(d, d2)
 
   def testHeterogeneousKeysDictInputs(self):
     d = {"z": 1, 1: 42, ("a", "b"): 100}
     l = inp._as_tensor_list(d)
-    self.assertEquals([100, 42, 1], l)
+    self.assertEqual([100, 42, 1], l)
     d2 = inp._as_original_type(d, l)
-    self.assertEquals(d, d2)
+    self.assertEqual(d, d2)
 
 
 class BatchTest(test_lib.TestCase):
@@ -790,7 +790,7 @@ class BatchTest(test_lib.TestCase):
   def testCannotInferRankError(self):
     with self.cached_session():
       x = array_ops.placeholder(dtype=dtypes.int64)
-      with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
+      with self.assertRaisesRegex(ValueError, "Cannot infer Tensor's rank"):
         inp.batch([x], batch_size=2)
 
   @test_util.run_deprecated_v1
@@ -900,20 +900,20 @@ class BatchTest(test_lib.TestCase):
   @test_util.run_deprecated_v1
   def testInvalidKeepInputVector(self):
     # Can't have vector `keep_input` with `enqueue_many=False`.
-    with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
+    with self.assertRaisesRegex(ValueError, "`keep_input` cannot be a vector"):
       inp.maybe_batch([array_ops.zeros(5)],
                       keep_input=constant_op.constant([True, False]),
                       batch_size=1,
                       enqueue_many=False)
     # Can't have `keep_input` with more than one dimension.
-    with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
+    with self.assertRaisesRegex(ValueError, "must be 0 or 1 dimensions"):
       inp.maybe_batch([array_ops.zeros(5)],
                       keep_input=constant_op.constant([[True], [False]]),
                       batch_size=1,
                       enqueue_many=True)
     # `keep_input` must have dimensions determined at graph construction.
-    with self.assertRaisesRegexp(ValueError,
-                                 "must be known at graph construction"):
+    with self.assertRaisesRegex(ValueError,
+                                "must be known at graph construction"):
       inp.maybe_batch([array_ops.zeros(5)],
                       keep_input=array_ops.placeholder(dtypes.bool),
                       batch_size=1,
@@ -1114,7 +1114,7 @@ class BatchJoinTest(test_lib.TestCase):
 
   @test_util.run_deprecated_v1
   def testMismatchedDictKeys(self):
-    with self.assertRaisesRegexp(ValueError, "must have the same keys"):
+    with self.assertRaisesRegex(ValueError, "must have the same keys"):
       inp.batch_join(
           [{
               "c": 12,
@@ -1437,7 +1437,7 @@ class BatchJoinTest(test_lib.TestCase):
   def testCannotInferRankError(self):
     with self.cached_session():
       x = array_ops.placeholder(dtype=dtypes.int64)
-      with self.assertRaisesRegexp(ValueError, "Cannot infer Tensor's rank"):
+      with self.assertRaisesRegex(ValueError, "Cannot infer Tensor's rank"):
         inp.batch_join([[x]], batch_size=2)
 
   @test_util.run_deprecated_v1
@@ -1514,20 +1514,20 @@ class BatchJoinTest(test_lib.TestCase):
   @test_util.run_deprecated_v1
   def testInvalidKeepInputVector(self):
     # Can't have vector `keep_input` with `enqueue_many=False`.
-    with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
+    with self.assertRaisesRegex(ValueError, "`keep_input` cannot be a vector"):
       inp.maybe_batch_join([[array_ops.zeros(5)]],
                            keep_input=constant_op.constant([True, False]),
                            batch_size=1,
                            enqueue_many=False)
     # Can't have `keep_input` with more than one dimension.
-    with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
+    with self.assertRaisesRegex(ValueError, "must be 0 or 1 dimensions"):
       inp.maybe_batch_join([[array_ops.zeros(5)]],
                            keep_input=constant_op.constant([[True], [False]]),
                            batch_size=1,
                            enqueue_many=True)
     # `keep_input` must have dimensions determined at graph construction.
-    with self.assertRaisesRegexp(ValueError,
-                                 "must be known at graph construction"):
+    with self.assertRaisesRegex(ValueError,
+                                "must be known at graph construction"):
       inp.maybe_batch_join([[array_ops.zeros(5)]],
                            keep_input=array_ops.placeholder(dtypes.bool),
                            batch_size=1,
@@ -1937,18 +1937,18 @@ class ShuffleBatchTest(test_lib.TestCase):
   @test_util.run_deprecated_v1
   def testInvalidKeepInputVector(self):
     # Can't have vector `keep_input` with `enqueue_many=False`.
-    with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
+    with self.assertRaisesRegex(ValueError, "`keep_input` cannot be a vector"):
       inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
                               keep_input=constant_op.constant([True, False]),
                               enqueue_many=False)
     # Can't have `keep_input` with more than one dimension.
-    with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
+    with self.assertRaisesRegex(ValueError, "must be 0 or 1 dimensions"):
       inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
                               keep_input=constant_op.constant([[True]]),
                               enqueue_many=True)
     # `keep_input` must have dimensions determined at graph construction.
-    with self.assertRaisesRegexp(ValueError,
-                                 "must be known at graph construction"):
+    with self.assertRaisesRegex(ValueError,
+                                "must be known at graph construction"):
       inp.maybe_shuffle_batch([array_ops.zeros(5)], 1, 10, 1,
                               keep_input=array_ops.placeholder(dtypes.bool),
                               enqueue_many=True)
@@ -2233,7 +2233,7 @@ class ShuffleBatchJoinTest(test_lib.TestCase):
 
   @test_util.run_deprecated_v1
   def testMismatchedDictKeys(self):
-    with self.assertRaisesRegexp(ValueError, "must have the same keys"):
+    with self.assertRaisesRegex(ValueError, "must have the same keys"):
       inp.shuffle_batch_join(
           [{
               "c": 12,
@@ -2341,20 +2341,20 @@ class ShuffleBatchJoinTest(test_lib.TestCase):
   @test_util.run_deprecated_v1
   def testInvalidKeepInputVector(self):
     # Can't have vector `keep_input` with `enqueue_many=False`.
-    with self.assertRaisesRegexp(ValueError, "`keep_input` cannot be a vector"):
+    with self.assertRaisesRegex(ValueError, "`keep_input` cannot be a vector"):
       inp.maybe_shuffle_batch_join(
           [[array_ops.zeros(5)]], 1, 10, 1,
           keep_input=constant_op.constant([True, False]),
           enqueue_many=False)
     # Can't have `keep_input` with more than one dimension.
-    with self.assertRaisesRegexp(ValueError, "must be 0 or 1 dimensions"):
+    with self.assertRaisesRegex(ValueError, "must be 0 or 1 dimensions"):
       inp.maybe_shuffle_batch_join(
           [[array_ops.zeros(5)]], 1, 10, 1,
           keep_input=constant_op.constant([[True]]),
           enqueue_many=True)
     # `keep_input` must have dimensions determined at graph construction.
-    with self.assertRaisesRegexp(ValueError,
-                                 "must be known at graph construction"):
+    with self.assertRaisesRegex(ValueError,
+                                "must be known at graph construction"):
       inp.maybe_shuffle_batch_join(
           [[array_ops.zeros(5)]], 1, 10, 1,
           keep_input=array_ops.placeholder(dtypes.bool),
diff --git a/tensorflow/python/training/momentum_test.py b/tensorflow/python/training/momentum_test.py
index 639276988a1..b69c828f84b 100644
--- a/tensorflow/python/training/momentum_test.py
+++ b/tensorflow/python/training/momentum_test.py
@@ -75,9 +75,9 @@ class MomentumOptimizerTest(test.TestCase):
       # Check we have slots
       self.assertEqual(["momentum"], mom_opt.get_slot_names())
       slot0 = mom_opt.get_slot(var0, "momentum")
-      self.assertEquals(slot0.get_shape(), var0.get_shape())
+      self.assertEqual(slot0.get_shape(), var0.get_shape())
       slot1 = mom_opt.get_slot(var1, "momentum")
-      self.assertEquals(slot1.get_shape(), var1.get_shape())
+      self.assertEqual(slot1.get_shape(), var1.get_shape())
       if not context.executing_eagerly():
         self.assertFalse(slot0 in variables.trainable_variables())
         self.assertFalse(slot1 in variables.trainable_variables())
@@ -146,7 +146,7 @@ class MomentumOptimizerTest(test.TestCase):
       optimizer_variables = optimizer.variables()
       self.assertStartsWith(optimizer_variables[0].name, "var0")
       self.assertStartsWith(optimizer_variables[1].name, "var1")
-      self.assertEquals(2, len(optimizer_variables))
+      self.assertEqual(2, len(optimizer_variables))
 
     with ops.Graph().as_default():
       var2 = resource_variable_ops.ResourceVariable(
@@ -158,7 +158,7 @@ class MomentumOptimizerTest(test.TestCase):
       optimizer_variables = optimizer.variables()
       self.assertStartsWith(optimizer_variables[0].name, "var2")
       self.assertStartsWith(optimizer_variables[1].name, "var3")
-      self.assertEquals(2, len(optimizer_variables))
+      self.assertEqual(2, len(optimizer_variables))
 
   @test_util.run_deprecated_v1
   def testNesterovMomentum(self):
@@ -299,10 +299,10 @@ class MomentumOptimizerTest(test.TestCase):
         # Check we have slots
         self.assertEqual(["momentum"], mom_opt.get_slot_names())
         slot0 = mom_opt.get_slot(var0, "momentum")
-        self.assertEquals(slot0.get_shape(), var0.get_shape())
+        self.assertEqual(slot0.get_shape(), var0.get_shape())
         self.assertFalse(slot0 in variables.trainable_variables())
         slot1 = mom_opt.get_slot(var1, "momentum")
-        self.assertEquals(slot1.get_shape(), var1.get_shape())
+        self.assertEqual(slot1.get_shape(), var1.get_shape())
         self.assertFalse(slot1 in variables.trainable_variables())
 
         # Fetch params to validate initial values
@@ -482,9 +482,9 @@ class MomentumOptimizerTest(test.TestCase):
         # Check we have slots
         self.assertEqual(["momentum"], mom_opt.get_slot_names())
         slot0 = mom_opt.get_slot(var0, "momentum")
-        self.assertEquals(slot0.get_shape(), var0.get_shape())
+        self.assertEqual(slot0.get_shape(), var0.get_shape())
         slot1 = mom_opt.get_slot(var1, "momentum")
-        self.assertEquals(slot1.get_shape(), var1.get_shape())
+        self.assertEqual(slot1.get_shape(), var1.get_shape())
 
         # Fetch params to validate initial values
         self.assertAllClose([0, 0], self.evaluate(var0)[0])
@@ -557,9 +557,9 @@ class MomentumOptimizerTest(test.TestCase):
 
         self.assertEqual(["momentum"], mom_opt.get_slot_names())
         slot0 = mom_opt.get_slot(var0, "momentum")
-        self.assertEquals(slot0.get_shape(), var0.get_shape())
+        self.assertEqual(slot0.get_shape(), var0.get_shape())
         slot1 = mom_opt.get_slot(var1, "momentum")
-        self.assertEquals(slot1.get_shape(), var1.get_shape())
+        self.assertEqual(slot1.get_shape(), var1.get_shape())
 
         # Fetch params to validate initial values
         self.assertAllClose([1.0, 2.0], self.evaluate(var0))
diff --git a/tensorflow/python/training/monitored_session_test.py b/tensorflow/python/training/monitored_session_test.py
index a78674a3d7c..3c4d5c781bd 100644
--- a/tensorflow/python/training/monitored_session_test.py
+++ b/tensorflow/python/training/monitored_session_test.py
@@ -133,7 +133,7 @@ class ScaffoldTest(test.TestCase):
       variables.VariableV1([1])
       ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
       ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
-      with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
+      with self.assertRaisesRegex(RuntimeError, 'More than one item'):
         monitored_session.Scaffold().finalize()
 
   def test_uses_passed_values(self):
@@ -163,8 +163,8 @@ class ScaffoldTest(test.TestCase):
     with ops.Graph().as_default():
       variables.VariableV1([1])
       monitored_session.Scaffold().finalize()
-      with self.assertRaisesRegexp(RuntimeError,
-                                   'Graph is finalized and cannot be modified'):
+      with self.assertRaisesRegex(RuntimeError,
+                                  'Graph is finalized and cannot be modified'):
         constant_op.constant([0])
 
   def test_new_scaffold_from_default_scaffold(self):
@@ -230,7 +230,7 @@ class ScaffoldTest(test.TestCase):
 
   def test_copy_from_scaffold_is_scaffold(self):
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           TypeError, 'copy_from_scaffold is not a Scaffold instance'):
         monitored_session.Scaffold(copy_from_scaffold=1)
 
@@ -583,8 +583,8 @@ class WrappedSessionTest(test.TestCase):
     with self.cached_session() as sess:
       constant_op.constant(0.0)
       wrapped_sess = monitored_session._WrappedSession(sess)
-      self.assertEquals(sess.graph, wrapped_sess.graph)
-      self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
+      self.assertEqual(sess.graph, wrapped_sess.graph)
+      self.assertEqual(sess.sess_str, wrapped_sess.sess_str)
 
   @test_util.run_deprecated_v1
   def test_should_stop_on_close(self):
@@ -647,8 +647,8 @@ class CoordinatedSessionTest(test.TestCase):
       constant_op.constant(0.0)
       coord = coordinator.Coordinator()
       coord_sess = monitored_session._CoordinatedSession(sess, coord)
-      self.assertEquals(sess.graph, coord_sess.graph)
-      self.assertEquals(sess.sess_str, coord_sess.sess_str)
+      self.assertEqual(sess.graph, coord_sess.graph)
+      self.assertEqual(sess.sess_str, coord_sess.sess_str)
 
   @test_util.run_deprecated_v1
   def test_run(self):
@@ -687,7 +687,7 @@ class CoordinatedSessionTest(test.TestCase):
       self.assertFalse(coord_sess.should_stop())
       self.assertEqual(0, coord_sess.run(c))
       self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
-      with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
+      with self.assertRaisesRegex(TypeError, 'None has invalid type'):
         coord_sess.run([None], feed_dict={c: 2})
       self.assertFalse(coord.should_stop())
       self.assertFalse(coord_sess.should_stop())
@@ -715,7 +715,7 @@ class CoordinatedSessionTest(test.TestCase):
       self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
       for t in threads:
         self.assertTrue(t.is_alive())
-      with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
+      with self.assertRaisesRegex(TypeError, 'None has invalid type'):
         coord_sess.run([None], feed_dict={c: 2})
       coord_sess.close()
       for t in threads:
@@ -894,8 +894,8 @@ class RecoverableSessionTest(test.TestCase):
       constant_op.constant(0.0)
       recoverable_sess = monitored_session._RecoverableSession(
           self._SessionReturner(sess))
-      self.assertEquals(sess.graph, recoverable_sess.graph)
-      self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
+      self.assertEqual(sess.graph, recoverable_sess.graph)
+      self.assertEqual(sess.sess_str, recoverable_sess.sess_str)
 
   @test_util.run_deprecated_v1
   def test_run(self):
@@ -950,7 +950,7 @@ class RecoverableSessionTest(test.TestCase):
       self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
       self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
       # This will fail and throw a real error as the pop() will fail.
-      with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
+      with self.assertRaisesRegex(IndexError, 'pop from empty list'):
         recoverable_sess.run(v, feed_dict={c: -12})
 
   @test_util.run_deprecated_v1
@@ -1394,7 +1394,7 @@ class HookedSessionTest(test.TestCase):
           None, feed_dict={a_tensor: [10]})
       self.evaluate(variables.global_variables_initializer())
 
-      with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
+      with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
         mon_sess.run(fetches=add_tensor)
 
   def testHooksAndUserFeedConflicts(self):
@@ -1412,7 +1412,7 @@ class HookedSessionTest(test.TestCase):
           None, feed_dict={b_tensor: [10]})
       self.evaluate(variables.global_variables_initializer())
 
-      with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
+      with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
         mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
 
 
@@ -1703,7 +1703,7 @@ class MonitoredSessionTest(test.TestCase):
       do_step = state_ops.assign_add(gstep, 1)
       hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
       session = monitored_session.MonitoredSession(hooks=[hook])
-      with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
+      with self.assertRaisesRegex(RuntimeError, 'regular exception'):
         with session:
           self.assertEqual(0, session.run(gstep))
           self.assertEqual(1, session.run(do_step))
@@ -1724,7 +1724,7 @@ class MonitoredSessionTest(test.TestCase):
       gstep = training_util.get_or_create_global_step()
       session = monitored_session.MonitoredSession()
       run_performed_without_error = False
-      with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
+      with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
         with session:
           self.assertEqual(0, session.run(gstep))
           # Report an exception through the coordinator.
@@ -1744,7 +1744,7 @@ class MonitoredSessionTest(test.TestCase):
     with ops.Graph().as_default():
       gstep = training_util.get_or_create_global_step()
       session = monitored_session.MonitoredSession()
-      with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
+      with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
         with session:
           self.assertEqual(0, session.run(gstep))
           # Report an exception through the coordinator.
@@ -1778,7 +1778,7 @@ class MonitoredSessionTest(test.TestCase):
       do_step = state_ops.assign_add(gstep, 1)
       session = monitored_session.MonitoredSession()
       # We should see that exception.
-      with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
+      with self.assertRaisesRegex(RuntimeError, 'regular exception'):
         with session:
           self.assertEqual(1, session.run(do_step))
           self.assertEqual(2, session.run(do_step))
@@ -1904,7 +1904,7 @@ class MonitoredSessionTest(test.TestCase):
   def test_with_statement_and_close(self):
     # Test case for https://github.com/tensorflow/tensorflow/issues/12224
     # where close() inside the with should have a better error message.
-    with self.assertRaisesRegexp(RuntimeError, 'Session is already closed'):
+    with self.assertRaisesRegex(RuntimeError, 'Session is already closed'):
       with monitored_session.MonitoredSession() as session:
         session.close()
 
@@ -1973,7 +1973,7 @@ class MonitoredSessionTest(test.TestCase):
         del step_context, extra_foo
 
       with monitored_session.MonitoredSession() as session:
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError,
             '`step_fn` may either have one `step_context` argument'):
           self.assertEqual(None, session.run_step_fn(step_fn))
@@ -2001,7 +2001,7 @@ class MonitoredSessionTest(test.TestCase):
           del step_context, extra_foo
 
       with monitored_session.MonitoredSession() as session:
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError,
             '`step_fn` may either have one `step_context` argument'):
           model = Model()
@@ -2173,7 +2173,7 @@ class MonitoredSessionTest(test.TestCase):
         return value
 
       with monitored_session.SingularMonitoredSession() as session:
-        with self.assertRaisesRegexp(errors_impl.AbortedError, 'Abort'):
+        with self.assertRaisesRegex(errors_impl.AbortedError, 'Abort'):
           self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
           self.fail()
 
@@ -2274,7 +2274,7 @@ class SingularMonitoredSessionTest(test.TestCase):
       gstep = training_util.get_or_create_global_step()
       session = monitored_session.SingularMonitoredSession()
       run_performed_without_error = False
-      with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
+      with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
         with session:
           self.assertEqual(0, session.run(gstep))
           # Report an exception through the coordinator.
diff --git a/tensorflow/python/training/optimizer_test.py b/tensorflow/python/training/optimizer_test.py
index 5775d0b8091..a0e07c5618f 100644
--- a/tensorflow/python/training/optimizer_test.py
+++ b/tensorflow/python/training/optimizer_test.py
@@ -126,7 +126,7 @@ class OptimizerTest(test.TestCase):
         return 5 * var0 + var1
       # pylint: enable=cell-var-from-loop
       sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
-      with self.assertRaisesRegexp(ValueError, 'No.*variables'):
+      with self.assertRaisesRegex(ValueError, 'No.*variables'):
         sgd_op.minimize(loss)
 
   @test_util.run_in_graph_and_eager_modes
@@ -143,7 +143,7 @@ class OptimizerTest(test.TestCase):
         return 5 * var0
       # pylint: enable=cell-var-from-loop
       sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
-      with self.assertRaisesRegexp(ValueError, 'No gradients'):
+      with self.assertRaisesRegex(ValueError, 'No gradients'):
         # var1 has no gradient
         sgd_op.minimize(loss, var_list=[var1])
 
@@ -159,8 +159,8 @@ class OptimizerTest(test.TestCase):
       def loss():
         return constant_op.constant(5.0)
       sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
-      with self.assertRaisesRegexp(ValueError,
-                                   'No gradients provided for any variable'):
+      with self.assertRaisesRegex(ValueError,
+                                  'No gradients provided for any variable'):
         sgd_op.minimize(loss, var_list=[var0, var1])
 
   @test_util.run_in_graph_and_eager_modes
@@ -173,8 +173,8 @@ class OptimizerTest(test.TestCase):
       var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype,
                                                     name='b_%d' % i)
       sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
-      with self.assertRaisesRegexp(ValueError,
-                                   'No gradients provided for any variable'):
+      with self.assertRaisesRegex(ValueError,
+                                  'No gradients provided for any variable'):
         sgd_op.apply_gradients([(None, var0), (None, var1)])
 
   @test_util.run_in_graph_and_eager_modes
diff --git a/tensorflow/python/training/queue_runner_test.py b/tensorflow/python/training/queue_runner_test.py
index 2868e7bcc69..2dbeb944776 100644
--- a/tensorflow/python/training/queue_runner_test.py
+++ b/tensorflow/python/training/queue_runner_test.py
@@ -126,7 +126,7 @@ class QueueRunnerTest(test.TestCase):
       self.assertEqual(10.0, self.evaluate(dequeue1))
       self.assertEqual(10.0, self.evaluate(dequeue1))
       # And queue1 should now be closed.
-      with self.assertRaisesRegexp(errors_impl.OutOfRangeError, "is closed"):
+      with self.assertRaisesRegex(errors_impl.OutOfRangeError, "is closed"):
         self.evaluate(dequeue1)
 
   def testRespectCoordShouldStop(self):
@@ -162,7 +162,7 @@ class QueueRunnerTest(test.TestCase):
       for t in threads:
         t.start()
       # The exception should be re-raised when joining.
-      with self.assertRaisesRegexp(ValueError, "Operation not in the graph"):
+      with self.assertRaisesRegex(ValueError, "Operation not in the graph"):
         coord.join()
 
   def testGracePeriod(self):
@@ -277,7 +277,7 @@ class QueueRunnerTest(test.TestCase):
     queue_runner_impl.add_queue_runner(qr)
     with self.cached_session():
       init_op.run()
-      with self.assertRaisesRegexp(TypeError, "tf.Session"):
+      with self.assertRaisesRegex(TypeError, "tf.Session"):
         queue_runner_impl.start_queue_runners("NotASession")
 
   def testStartQueueRunnersIgnoresMonitoredSession(self):
diff --git a/tensorflow/python/training/saver_large_variable_test.py b/tensorflow/python/training/saver_large_variable_test.py
index 9d171ea5684..0b5cbf60510 100644
--- a/tensorflow/python/training/saver_large_variable_test.py
+++ b/tensorflow/python/training/saver_large_variable_test.py
@@ -51,8 +51,8 @@ class SaverLargeVariableTest(test.TestCase):
               var.op.name: var
           }, write_version=saver_pb2.SaverDef.V1)
       var.initializer.run()
-      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
-                                   "Tensor slice is too large to serialize"):
+      with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
+                                  "Tensor slice is too large to serialize"):
         save.save(sess, save_path)
 
 
diff --git a/tensorflow/python/training/saver_test.py b/tensorflow/python/training/saver_test.py
index 5c87be37e4c..0095f3fa269 100644
--- a/tensorflow/python/training/saver_test.py
+++ b/tensorflow/python/training/saver_test.py
@@ -277,7 +277,7 @@ class SaverTest(test.TestCase):
 
       save2 = saver_module.Saver([v])
       save2.restore(sess, save_path)
-      self.assertEquals(self.evaluate(v), [1])
+      self.assertEqual(self.evaluate(v), [1])
 
   def testNoAdditionalOpsAddedBySaverForResourceVariablesOutsideSaveScope(self):
     with ops_lib.Graph().as_default() as g:
@@ -382,7 +382,7 @@ class SaverTest(test.TestCase):
     for ver in (saver_pb2.SaverDef.V1, saver_pb2.SaverDef.V2):
       with self.cached_session() as sess:
         save = saver_module.Saver({"v0": v0}, write_version=ver)
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             ValueError, "The passed save_path is not a valid checkpoint:"):
           save.restore(sess, "invalid path")
 
@@ -423,7 +423,7 @@ class SaverTest(test.TestCase):
           variables.Variable.SaveSliceInfo("v1", [1], [0], [1]))
 
       # By default the name used for "v2" will be "v1" and raise an error.
-      with self.assertRaisesRegexp(ValueError, "same name: v1"):
+      with self.assertRaisesRegex(ValueError, "same name: v1"):
         saver_module.Saver([v0, v1, v2])
 
       # The names are different and will work.
@@ -441,7 +441,7 @@ class SaverTest(test.TestCase):
           partitioner=partitioned_variables.fixed_size_partitioner(
               num_shards=2))
       p_v2._name = "p_v1"
-      with self.assertRaisesRegexp(ValueError, "same name: p_v1"):
+      with self.assertRaisesRegex(ValueError, "same name: p_v1"):
         saver_module.Saver([p_v1, p_v2])
 
   def testSameName(self):
@@ -450,12 +450,12 @@ class SaverTest(test.TestCase):
       v2 = saver_test_utils.CheckpointedOp(name="v2")
 
       # Saving one variable under two names raises an error.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "The same saveable will be restored with two names: v0"):
         saver_module.Saver({"v0": v0, "v0too": v0})
 
       # Ditto for custom saveables.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           ValueError, "The same saveable will be restored with two names: v2"):
         saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
 
@@ -631,7 +631,7 @@ class SaverTest(test.TestCase):
   def testVarListShouldBeEmptyInDeferredBuild(self):
     with ops_lib.Graph().as_default():
       v = variables.VariableV1(1.0)
-      with self.assertRaisesRegexp(ValueError, "defer_build"):
+      with self.assertRaisesRegex(ValueError, "defer_build"):
         saver_module.Saver([v], defer_build=True)
 
   def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
@@ -639,7 +639,7 @@ class SaverTest(test.TestCase):
     with ops_lib.Graph().as_default(), session.Session() as sess:
       variables.VariableV1(1.0)
       saver = saver_module.Saver(defer_build=True)
-      with self.assertRaisesRegexp(RuntimeError, "build"):
+      with self.assertRaisesRegex(RuntimeError, "build"):
         saver.save(sess, save_path)
 
   def testDeferredBuild(self):
@@ -677,7 +677,7 @@ class SaverTest(test.TestCase):
     with session.Session("", graph=ops_lib.Graph()) as sess:
       var = variables.VariableV1([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
       save = saver_module.Saver()
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           errors_impl.InvalidArgumentError,
           "Assign requires shapes of both tensors to match."):
         save.restore(sess, save_path)
@@ -810,8 +810,8 @@ class SaverTest(test.TestCase):
         # Restore the saved value with different dtype
         # in the parameter nodes.
         save = saver_module.Saver({"v0": v0_wrong_dtype})
-        with self.assertRaisesRegexp(errors.InvalidArgumentError,
-                                     "original dtype"):
+        with self.assertRaisesRegex(errors.InvalidArgumentError,
+                                    "original dtype"):
           save.restore(sess, save_path)
 
   # Test restoring large tensors (triggers a thread pool)
@@ -2612,13 +2612,13 @@ class CheckpointReaderTest(test.TestCase):
       self.assertAllEqual(v0.eval(), v0_tensor)
       self.assertAllEqual(v1.eval(), v1_tensor)
       # Verifies get_tensor() fails for non-existent tensors.
-      with self.assertRaisesRegexp(errors.NotFoundError,
-                                   "v3 not found in checkpoint"):
+      with self.assertRaisesRegex(errors.NotFoundError,
+                                  "v3 not found in checkpoint"):
         reader.get_tensor("v3")
 
   def testNonexistentPath(self):
-    with self.assertRaisesRegexp(errors.NotFoundError,
-                                 "Unsuccessful TensorSliceReader"):
+    with self.assertRaisesRegex(errors.NotFoundError,
+                                "Unsuccessful TensorSliceReader"):
       py_checkpoint_reader.NewCheckpointReader("non-existent")
 
 
@@ -3097,8 +3097,8 @@ class TrackableCompatibilityTests(test.TestCase):
     with self.cached_session() as sess:
       self.evaluate(a.initializer)
       save_path = a_saver.save(sess=sess, save_path=checkpoint_prefix)
-      with self.assertRaisesRegexp(
-          errors.NotFoundError, "Key b not found in checkpoint"):
+      with self.assertRaisesRegex(errors.NotFoundError,
+                                  "Key b not found in checkpoint"):
         b_saver.restore(sess=sess, save_path=save_path)
 
       with self.assertRaises(errors.NotFoundError) as cs:
@@ -3125,7 +3125,7 @@ class TrackableCompatibilityTests(test.TestCase):
       a = variables.VariableV1([1.], name="a")
       a_saver = saver_module.Saver([a])
       with self.session(graph=g) as sess:
-        with self.assertRaisesRegexp(
+        with self.assertRaisesRegex(
             errors.InvalidArgumentError,
             "a mismatch between the current graph and the graph"):
           a_saver.restore(sess=sess, save_path=save_path)
diff --git a/tensorflow/python/training/server_lib_test.py b/tensorflow/python/training/server_lib_test.py
index ea9f70b8208..dc2adb7dee8 100644
--- a/tensorflow/python/training/server_lib_test.py
+++ b/tensorflow/python/training/server_lib_test.py
@@ -162,8 +162,7 @@ class GrpcServerTest(test.TestCase):
     sess.run(dequeue_t)
 
     def blocking_dequeue():
-      with self.assertRaisesRegexp(errors_impl.CancelledError,
-                                   "Session::Close"):
+      with self.assertRaisesRegex(errors_impl.CancelledError, "Session::Close"):
         sess.run(dequeue_t)
 
     blocking_thread = self.checkedThread(blocking_dequeue)
@@ -205,7 +204,7 @@ class GrpcServerTest(test.TestCase):
                      per_process_gpu_memory_fraction)
 
   def testInvalidHostname(self):
-    with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "port"):
+    with self.assertRaisesRegex(errors_impl.InvalidArgumentError, "port"):
       _ = server_lib.Server(
           {
               "local": ["localhost"]
@@ -535,22 +534,15 @@ class ClusterSpecTest(test.TestCase):
     self.assertTrue(server_lib.ClusterSpec({"job": ["host:port"]}))
 
   def testEq(self):
-    self.assertEquals(server_lib.ClusterSpec({}), server_lib.ClusterSpec({}))
-    self.assertEquals(
-        server_lib.ClusterSpec({
-            "job": ["host:2222"]
-        }),
-        server_lib.ClusterSpec({
-            "job": ["host:2222"]
-        }),)
-    self.assertEquals(
-        server_lib.ClusterSpec({
-            "job": {
-                0: "host:2222"
-            }
-        }), server_lib.ClusterSpec({
-            "job": ["host:2222"]
-        }))
+    self.assertEqual(server_lib.ClusterSpec({}), server_lib.ClusterSpec({}))
+    self.assertEqual(
+        server_lib.ClusterSpec({"job": ["host:2222"]}),
+        server_lib.ClusterSpec({"job": ["host:2222"]}),
+    )
+    self.assertEqual(
+        server_lib.ClusterSpec({"job": {
+            0: "host:2222"
+        }}), server_lib.ClusterSpec({"job": ["host:2222"]}))
 
   def testNe(self):
     self.assertNotEquals(
diff --git a/tensorflow/python/training/session_manager_test.py b/tensorflow/python/training/session_manager_test.py
index 9d7381d08e0..df795ff5f7e 100644
--- a/tensorflow/python/training/session_manager_test.py
+++ b/tensorflow/python/training/session_manager_test.py
@@ -118,7 +118,7 @@ class SessionManagerTest(test.TestCase):
           ready_op=variables.report_uninitialized_variables())
       saver = saver_lib.Saver({"v": v})
       # This should fail as there's no checkpoint within 2 seconds.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError, "no init_op or init_fn or local_init_op was given"):
         sess = sm.prepare_session(
             "",
@@ -164,7 +164,7 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(v))
+      self.assertEqual(1, sess.run(v))
 
   @test_util.run_v1_only("b/120545219")
   def testRecoverSession(self):
@@ -185,7 +185,7 @@ class SessionManagerTest(test.TestCase):
           "", saver=saver, checkpoint_dir=checkpoint_dir)
       self.assertFalse(initialized)
       sess.run(v.initializer)
-      self.assertEquals(1, sess.run(v))
+      self.assertEqual(1, sess.run(v))
       saver.save(sess, os.path.join(checkpoint_dir,
                                     "recover_session_checkpoint"))
     self._test_recovered_variable(checkpoint_dir=checkpoint_dir)
@@ -214,7 +214,7 @@ class SessionManagerTest(test.TestCase):
   def testInitWithNoneLocalInitOpError(self):
     # Creating a SessionManager with a None local_init_op but
     # non-None ready_for_local_init_op raises ValueError
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "If you pass a ready_for_local_init_op "
         "you must also pass a local_init_op "):
       session_manager.SessionManager(
@@ -242,7 +242,7 @@ class SessionManagerTest(test.TestCase):
           "", saver=saver, checkpoint_dir=checkpoint_dir)
       self.assertFalse(initialized)
       sess.run(v.initializer)
-      self.assertEquals(1, sess.run(v))
+      self.assertEqual(1, sess.run(v))
       saver.save(sess, os.path.join(checkpoint_dir,
                                     "recover_session_checkpoint"))
     # Create a new Graph and SessionManager and recover.
@@ -273,8 +273,8 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(v))
-      self.assertEquals(1, sess.run(w))
+      self.assertEqual(1, sess.run(v))
+      self.assertEqual(1, sess.run(w))
 
   @test_util.run_v1_only("b/120545219")
   def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
@@ -301,7 +301,7 @@ class SessionManagerTest(test.TestCase):
           "", saver=saver, checkpoint_dir=checkpoint_dir)
       self.assertFalse(initialized)
       sess.run(v.initializer)
-      self.assertEquals(1, sess.run(v))
+      self.assertEqual(1, sess.run(v))
       saver.save(sess, os.path.join(checkpoint_dir,
                                     "recover_session_checkpoint"))
     # Create a new Graph and SessionManager and recover.
@@ -331,7 +331,7 @@ class SessionManagerTest(test.TestCase):
           False,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(v))
+      self.assertEqual(1, sess.run(v))
 
   @test_util.run_v1_only("b/120545219")
   def testRecoverSessionNoChkptStillRunsLocalInitOp(self):
@@ -360,7 +360,7 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(w))
+      self.assertEqual(1, sess.run(w))
 
   @test_util.run_v1_only("b/120545219")
   def testRecoverSessionFailsStillRunsLocalInitOp(self):
@@ -404,7 +404,7 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(w))
+      self.assertEqual(1, sess.run(w))
 
   @test_util.run_v1_only("b/120545219")
   def testWaitForSessionLocalInit(self):
@@ -436,8 +436,8 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(v))
-      self.assertEquals(1, sess.run(w))
+      self.assertEqual(1, sess.run(v))
+      self.assertEqual(1, sess.run(w))
 
   def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
     with ops.Graph().as_default() as graph:
@@ -472,8 +472,8 @@ class SessionManagerTest(test.TestCase):
           ready_op=variables.report_uninitialized_variables(),
           ready_for_local_init_op=None,
           local_init_op=w.initializer)
-    with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
-                                 "Session was not ready after waiting.*"):
+    with self.assertRaisesRegex(errors_impl.DeadlineExceededError,
+                                "Session was not ready after waiting.*"):
       sm.wait_for_session("", max_wait_secs=3)
 
   @test_util.run_v1_only("b/120545219")
@@ -512,9 +512,9 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(v))
-      self.assertEquals(1, sess.run(w))
-      self.assertEquals(3, sess.run(x))
+      self.assertEqual(1, sess.run(v))
+      self.assertEqual(1, sess.run(w))
+      self.assertEqual(3, sess.run(x))
 
   @test_util.run_v1_only("b/120545219")
   def testPrepareSessionWithPartialInitOp(self):
@@ -566,8 +566,8 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(w))
-      self.assertEquals(3, sess.run(x))
+      self.assertEqual(1, sess.run(w))
+      self.assertEqual(3, sess.run(x))
       self.assertEqual(
           False,
           variables.is_variable_initialized(
@@ -580,8 +580,8 @@ class SessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("x_res:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(w_res))
-      self.assertEquals(3, sess.run(x_res))
+      self.assertEqual(1, sess.run(w_res))
+      self.assertEqual(3, sess.run(x_res))
 
   @test_util.run_v1_only("b/120545219")
   def testPrepareSessionWithCyclicInitializer(self):
@@ -615,8 +615,8 @@ class SessionManagerTest(test.TestCase):
         self.assertEqual(False, variables.is_variable_initialized(w).eval())
       sm2 = session_manager.SessionManager(
           ready_op=variables.report_uninitialized_variables())
-      with self.assertRaisesRegexp(
-          RuntimeError, "Init operations did not make model ready.*"):
+      with self.assertRaisesRegex(RuntimeError,
+                                  "Init operations did not make model ready.*"):
         sm2.prepare_session("", init_op=v.initializer)
 
   def testPrepareSessionDidNotInitLocalVariableList(self):
@@ -632,8 +632,8 @@ class SessionManagerTest(test.TestCase):
         self.assertEqual(False, variables.is_variable_initialized(w).eval())
       sm2 = session_manager.SessionManager(
           ready_op=variables.report_uninitialized_variables())
-      with self.assertRaisesRegexp(RuntimeError,
-                                   "Init operations did not make model ready"):
+      with self.assertRaisesRegex(RuntimeError,
+                                  "Init operations did not make model ready"):
         sm2.prepare_session("", init_op=[v.initializer])
 
   def testPrepareSessionWithReadyNotReadyForLocal(self):
@@ -652,7 +652,7 @@ class SessionManagerTest(test.TestCase):
           ready_for_local_init_op=variables.report_uninitialized_variables(
               variables.global_variables()),
           local_init_op=w.initializer)
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError,
           "Init operations did not make model ready for local_init"):
         sm2.prepare_session("", init_op=None)
@@ -673,8 +673,8 @@ class SessionManagerTest(test.TestCase):
           ready_op=variables.report_uninitialized_variables(),
           ready_for_local_init_op=None,
           local_init_op=w.initializer)
-    with self.assertRaisesRegexp(RuntimeError,
-                                 "Init operations did not make model ready.*"):
+    with self.assertRaisesRegex(RuntimeError,
+                                "Init operations did not make model ready.*"):
       sm2.prepare_session("", init_op=None)
 
 
@@ -747,7 +747,7 @@ class ObsoleteSessionManagerTest(test.TestCase):
           ready_op=variables.assert_variables_initialized())
       saver = saver_lib.Saver({"v": v})
       # This should fail as there's no checkpoint within 2 seconds.
-      with self.assertRaisesRegexp(
+      with self.assertRaisesRegex(
           RuntimeError, "no init_op or init_fn or local_init_op was given"):
         sess = sm.prepare_session(
             "",
@@ -791,7 +791,7 @@ class ObsoleteSessionManagerTest(test.TestCase):
           "", saver=saver, checkpoint_dir=checkpoint_dir)
       self.assertFalse(initialized)
       sess.run(v.initializer)
-      self.assertEquals(1, sess.run(v))
+      self.assertEqual(1, sess.run(v))
       saver.save(sess, os.path.join(checkpoint_dir,
                                     "recover_session_checkpoint"))
     # Create a new Graph and SessionManager and recover.
@@ -809,7 +809,7 @@ class ObsoleteSessionManagerTest(test.TestCase):
           True,
           variables.is_variable_initialized(
               sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
-      self.assertEquals(1, sess.run(v))
+      self.assertEqual(1, sess.run(v))
 
   @test_util.run_v1_only("b/120545219")
   def testWaitForSessionReturnsNoneAfterTimeout(self):
diff --git a/tensorflow/python/training/supervisor_test.py b/tensorflow/python/training/supervisor_test.py
index 0529cff1697..adc8b69c66f 100644
--- a/tensorflow/python/training/supervisor_test.py
+++ b/tensorflow/python/training/supervisor_test.py
@@ -122,7 +122,7 @@ class SupervisorTest(test.TestCase):
       my_op = constant_op.constant(1.0)
       sv = supervisor.Supervisor(logdir=logdir)
       last_step = None
-      with self.assertRaisesRegexp(RuntimeError, "failing here"):
+      with self.assertRaisesRegex(RuntimeError, "failing here"):
         with sv.managed_session("") as sess:
           for step in xrange(10):
             last_step = step
@@ -308,7 +308,7 @@ class SupervisorTest(test.TestCase):
     logdir = self._test_dir("managed_main_error_two_queues")
     os.makedirs(logdir)
     data_path = self._csv_data(logdir)
-    with self.assertRaisesRegexp(RuntimeError, "fail at step 3"):
+    with self.assertRaisesRegex(RuntimeError, "fail at step 3"):
       with ops.Graph().as_default():
         # Create an input pipeline that reads the file 3 times.
         filename_queue = input_lib.string_input_producer(
@@ -418,7 +418,7 @@ class SupervisorTest(test.TestCase):
       summ = summary.merge_all()
       sv = supervisor.Supervisor(logdir="", summary_op=None)
       sess = sv.prepare_or_wait_for_session("")
-      with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
+      with self.assertRaisesRegex(RuntimeError, "requires a summary writer"):
         sv.summary_computed(sess, sess.run(summ))
 
   @test_util.run_v1_only("train.Supervisor is for v1 only")
@@ -435,7 +435,7 @@ class SupervisorTest(test.TestCase):
       # Check that a checkpoint is still be generated.
       self._wait_for_glob(sv.save_path, 3.0)
       # Check that we cannot write a summary
-      with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
+      with self.assertRaisesRegex(RuntimeError, "requires a summary writer"):
         sv.summary_computed(sess, sess.run(summ))
 
   def testNoLogdirButExplicitSummaryWriter(self):
@@ -698,8 +698,7 @@ class SupervisorTest(test.TestCase):
       variables.VariableV1([4.0, 5.0, 6.0], name="w")
       # w will not be initialized.
       sv = supervisor.Supervisor(logdir=logdir, init_op=v.initializer)
-      with self.assertRaisesRegexp(RuntimeError,
-                                   "Variables not initialized: w"):
+      with self.assertRaisesRegex(RuntimeError, "Variables not initialized: w"):
         sv.prepare_or_wait_for_session(server.target)
 
   def testInitOpFailsForTransientVariable(self):
@@ -716,8 +715,7 @@ class SupervisorTest(test.TestCase):
           collections=[ops.GraphKeys.LOCAL_VARIABLES])
       # w will not be initialized.
       sv = supervisor.Supervisor(logdir=logdir, local_init_op=v.initializer)
-      with self.assertRaisesRegexp(RuntimeError,
-                                   "Variables not initialized: w"):
+      with self.assertRaisesRegex(RuntimeError, "Variables not initialized: w"):
         sv.prepare_or_wait_for_session(server.target)
 
   @test_util.run_v1_only("train.Supervisor is for v1 only")
@@ -725,7 +723,7 @@ class SupervisorTest(test.TestCase):
     logdir = self._test_dir("setup_fail")
     with ops.Graph().as_default():
       variables.VariableV1([1.0, 2.0, 3.0], name="v")
-      with self.assertRaisesRegexp(ValueError, "must have their device set"):
+      with self.assertRaisesRegex(ValueError, "must have their device set"):
         supervisor.Supervisor(logdir=logdir, is_chief=False)
     with ops.Graph().as_default(), ops.device("/job:ps"):
       variables.VariableV1([1.0, 2.0, 3.0], name="v")
diff --git a/tensorflow/python/training/sync_replicas_optimizer_test.py b/tensorflow/python/training/sync_replicas_optimizer_test.py
index 03c07173252..7ff31d61c9a 100644
--- a/tensorflow/python/training/sync_replicas_optimizer_test.py
+++ b/tensorflow/python/training/sync_replicas_optimizer_test.py
@@ -265,8 +265,7 @@ class SyncReplicasOptimizerHookTest(test.TestCase):
         replicas_to_aggregate=1,
         total_num_replicas=1)
     hook = opt.make_session_run_hook(True)
-    with self.assertRaisesRegexp(ValueError,
-                                 "apply_gradient should be called"):
+    with self.assertRaisesRegex(ValueError, "apply_gradient should be called"):
       hook.begin()
 
   @test_util.run_v1_only("b/120545219")
diff --git a/tensorflow/python/training/tracking/base_test.py b/tensorflow/python/training/tracking/base_test.py
index d76e20edf7e..feacd77417b 100644
--- a/tensorflow/python/training/tracking/base_test.py
+++ b/tensorflow/python/training/tracking/base_test.py
@@ -54,8 +54,8 @@ class InterfaceTests(test.TestCase):
           getter=variable_scope.get_variable)
       self.assertEqual([root, b], util.list_objects(root))
     with ops.Graph().as_default():
-      with self.assertRaisesRegexp(
-          ValueError, "already declared as a dependency"):
+      with self.assertRaisesRegex(ValueError,
+                                  "already declared as a dependency"):
         root._add_variable_with_custom_getter(
             name="v", shape=[], overwrite=False,
             getter=variable_scope.get_variable)
@@ -80,7 +80,7 @@ class InterfaceTests(test.TestCase):
     save_path = saved.save(os.path.join(self.get_temp_dir(), "ckpt"))
     restored = util.Checkpoint(obj=base.Trackable())
     status = restored.restore(save_path)
-    with self.assertRaisesRegexp(AssertionError, "foo_attr"):
+    with self.assertRaisesRegex(AssertionError, "foo_attr"):
       status.assert_consumed()
 
   def testBuggyGetConfig(self):
diff --git a/tensorflow/python/training/tracking/data_structures_test.py b/tensorflow/python/training/tracking/data_structures_test.py
index be795601678..90f8fbdef64 100644
--- a/tensorflow/python/training/tracking/data_structures_test.py
+++ b/tensorflow/python/training/tracking/data_structures_test.py
@@ -57,7 +57,7 @@ class ListTests(test.TestCase):
       data_structures.List([NotTrackable()])
 
   def testCallNotImplemented(self):
-    with self.assertRaisesRegexp(TypeError, "not callable"):
+    with self.assertRaisesRegex(TypeError, "not callable"):
       data_structures.List()(1.)
 
   def testNoPop(self):
@@ -123,7 +123,7 @@ class ListTests(test.TestCase):
 
   def testIMul_zero(self):
     l = data_structures.List([])
-    with self.assertRaisesRegexp(ValueError, "List only supports append"):
+    with self.assertRaisesRegex(ValueError, "List only supports append"):
       l *= 0
 
   def testIMul(self):
@@ -328,7 +328,7 @@ class ListWrapperTest(test.TestCase):
 
   def assertUnableToSave(self, l, msg):
     l._maybe_initialize_trackable()  # pylint: disable=protected-access
-    with self.assertRaisesRegexp(ValueError, msg):
+    with self.assertRaisesRegex(ValueError, msg):
       return l._checkpoint_dependencies  # pylint: disable=protected-access
 
 
@@ -368,7 +368,7 @@ class MappingTests(test.TestCase):
     self.assertEqual({}, a.d)
     self.assertFalse({} != a.d)  # pylint: disable=g-explicit-bool-comparison
     self.assertNotEqual({1: 2}, a.d)
-    with self.assertRaisesRegexp(TypeError, "unhashable"):
+    with self.assertRaisesRegex(TypeError, "unhashable"):
       set([a.d])
 
   def testListShallowCopy(self):
diff --git a/tensorflow/python/training/tracking/tracking_test.py b/tensorflow/python/training/tracking/tracking_test.py
index 4dff392cf9f..e2b01964bb3 100644
--- a/tensorflow/python/training/tracking/tracking_test.py
+++ b/tensorflow/python/training/tracking/tracking_test.py
@@ -59,7 +59,7 @@ class InterfaceTests(test.TestCase):
     root.leaf = tracking.AutoTrackable()
     root.leaf = root.leaf
     duplicate_name_dep = tracking.AutoTrackable()
-    with self.assertRaisesRegexp(ValueError, "already declared"):
+    with self.assertRaisesRegex(ValueError, "already declared"):
       root._track_trackable(duplicate_name_dep, name="leaf")
     # No error; we're overriding __setattr__, so we can't really stop people
     # from doing this while maintaining backward compatibility.
@@ -106,7 +106,7 @@ class InterfaceTests(test.TestCase):
     c = tracking.AutoTrackable()
     a.l.insert(0, c)
     checkpoint = util.Checkpoint(a=a)
-    with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
+    with self.assertRaisesRegex(ValueError, "A list element was replaced"):
       checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
 
   @test_util.run_in_graph_and_eager_modes
@@ -118,7 +118,7 @@ class InterfaceTests(test.TestCase):
     c = tracking.AutoTrackable()
     held_reference.append(c)
     checkpoint = util.Checkpoint(a=a)
-    with self.assertRaisesRegexp(ValueError, "The wrapped list was modified"):
+    with self.assertRaisesRegex(ValueError, "The wrapped list was modified"):
       checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
 
   @test_util.run_in_graph_and_eager_modes
@@ -154,7 +154,7 @@ class InterfaceTests(test.TestCase):
     checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
     # Dirtying the inner list means the root object is unsaveable.
     a.l[0][1] = 2
-    with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
+    with self.assertRaisesRegex(ValueError, "A list element was replaced"):
       checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
 
   @test_util.run_in_graph_and_eager_modes
diff --git a/tensorflow/python/training/tracking/util_test.py b/tensorflow/python/training/tracking/util_test.py
index 6c0b08426e7..4ef5f63380b 100644
--- a/tensorflow/python/training/tracking/util_test.py
+++ b/tensorflow/python/training/tracking/util_test.py
@@ -59,7 +59,7 @@ class InterfaceTests(test.TestCase):
   @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
   def testAddVariable(self):
     obj = NonLayerTrackable()
-    with self.assertRaisesRegexp(ValueError, "do not specify shape"):
+    with self.assertRaisesRegex(ValueError, "do not specify shape"):
       trackable_utils.add_variable(
           obj, name="shape_specified_twice", shape=[], initializer=1)
     constant_initializer = trackable_utils.add_variable(
@@ -83,7 +83,7 @@ class InterfaceTests(test.TestCase):
         name="duplicate", initial_value=1.)
     duplicate = trackable_utils.add_variable(
         obj, name="duplicate", shape=[])
-    with self.assertRaisesRegexp(ValueError, "'duplicate'.*already declared"):
+    with self.assertRaisesRegex(ValueError, "'duplicate'.*already declared"):
       trackable_utils.add_variable(obj, name="duplicate", shape=[])
 
     self.evaluate(trackable_utils.gather_initializers(obj))
@@ -365,9 +365,8 @@ class CheckpointingTests(parameterized.TestCase, test.TestCase):
       partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
                                                 v2=variables_lib.Variable(0.))
       status = partial_root.restore(save_path)
-      with self.assertRaisesRegexp(
-          AssertionError,
-          r"Unused attributes(.|\n)*\(root\).v1"):
+      with self.assertRaisesRegex(AssertionError,
+                                  r"Unused attributes(.|\n)*\(root\).v1"):
         status.assert_consumed()
 
   def testSilencePartialWarning(self):
diff --git a/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py b/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py
index a5af8e1f876..ca9f648c239 100644
--- a/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py
+++ b/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py
@@ -77,7 +77,7 @@ class CheckpointingTests(test.TestCase):
     self.assertEqual(12., self.evaluate(new_root.var))
     new_root.optimizer = adam.AdamOptimizer(0.1)
     slot_status.assert_existing_objects_matched()
-    with self.assertRaisesRegexp(AssertionError, "beta1_power"):
+    with self.assertRaisesRegex(AssertionError, "beta1_power"):
       slot_status.assert_consumed()
     self.assertEqual(12., self.evaluate(new_root.var))
     if context.executing_eagerly():
diff --git a/tensorflow/python/training/training_util_test.py b/tensorflow/python/training/training_util_test.py
index 3f9858a33ba..5049d6e00a0 100644
--- a/tensorflow/python/training/training_util_test.py
+++ b/tensorflow/python/training/training_util_test.py
@@ -43,10 +43,10 @@ class GlobalStepTest(test.TestCase):
           trainable=False,
           dtype=dtypes.float32,
           name=ops.GraphKeys.GLOBAL_STEP)
-      self.assertRaisesRegexp(TypeError, 'does not have integer type',
-                              training_util.get_global_step)
-    self.assertRaisesRegexp(TypeError, 'does not have integer type',
-                            training_util.get_global_step, g)
+      self.assertRaisesRegex(TypeError, 'does not have integer type',
+                             training_util.get_global_step)
+    self.assertRaisesRegex(TypeError, 'does not have integer type',
+                           training_util.get_global_step, g)
 
   def test_invalid_shape(self):
     with ops.Graph().as_default() as g:
@@ -56,20 +56,20 @@ class GlobalStepTest(test.TestCase):
           trainable=False,
           dtype=dtypes.int32,
           name=ops.GraphKeys.GLOBAL_STEP)
-      self.assertRaisesRegexp(TypeError, 'not scalar',
-                              training_util.get_global_step)
-    self.assertRaisesRegexp(TypeError, 'not scalar',
-                            training_util.get_global_step, g)
+      self.assertRaisesRegex(TypeError, 'not scalar',
+                             training_util.get_global_step)
+    self.assertRaisesRegex(TypeError, 'not scalar',
+                           training_util.get_global_step, g)
 
   def test_create_global_step(self):
     self.assertIsNone(training_util.get_global_step())
     with ops.Graph().as_default() as g:
       global_step = training_util.create_global_step()
       self._assert_global_step(global_step)
-      self.assertRaisesRegexp(ValueError, 'already exists',
-                              training_util.create_global_step)
-      self.assertRaisesRegexp(ValueError, 'already exists',
-                              training_util.create_global_step, g)
+      self.assertRaisesRegex(ValueError, 'already exists',
+                             training_util.create_global_step)
+      self.assertRaisesRegex(ValueError, 'already exists',
+                             training_util.create_global_step, g)
       self._assert_global_step(training_util.create_global_step(ops.Graph()))
 
   def test_get_global_step(self):
diff --git a/tensorflow/python/util/deprecation_test.py b/tensorflow/python/util/deprecation_test.py
index a6ca3c6fda8..20c0846cfb8 100644
--- a/tensorflow/python/util/deprecation_test.py
+++ b/tensorflow/python/util/deprecation_test.py
@@ -39,8 +39,7 @@ class DeprecatedAliasTest(test.TestCase):
     deprecated_func("FAKE ERROR!")
     self.assertEqual(1, mock_warning.call_count)
     # Make sure the error points to the right file.
-    self.assertRegexpMatches(mock_warning.call_args[0][1],
-                             r"deprecation_test\.py:")
+    self.assertRegex(mock_warning.call_args[0][1], r"deprecation_test\.py:")
     deprecated_func("ANOTHER FAKE ERROR!")
     self.assertEqual(1, mock_warning.call_count)
 
@@ -67,8 +66,7 @@ class DeprecatedAliasTest(test.TestCase):
     deprecated_cls("deprecated")
     self.assertEqual(1, mock_warning.call_count)
     # Make sure the error points to the right file.
-    self.assertRegexpMatches(mock_warning.call_args[0][1],
-                             r"deprecation_test\.py:")
+    self.assertRegex(mock_warning.call_args[0][1], r"deprecation_test\.py:")
     deprecated_cls("deprecated again")
     self.assertEqual(1, mock_warning.call_count)
 
@@ -123,14 +121,14 @@ class DeprecationTest(test.TestCase):
 
   def test_deprecated_illegal_args(self):
     instructions = "This is how you update..."
-    with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
+    with self.assertRaisesRegex(ValueError, "YYYY-MM-DD"):
       deprecation.deprecated("", instructions)
-    with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
+    with self.assertRaisesRegex(ValueError, "YYYY-MM-DD"):
       deprecation.deprecated("07-04-2016", instructions)
     date = "2016-07-04"
-    with self.assertRaisesRegexp(ValueError, "instructions"):
+    with self.assertRaisesRegex(ValueError, "instructions"):
       deprecation.deprecated(date, None)
-    with self.assertRaisesRegexp(ValueError, "instructions"):
+    with self.assertRaisesRegex(ValueError, "instructions"):
       deprecation.deprecated(date, "")
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -169,8 +167,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(
-        args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["in a future version", instructions]),
                         set(args[1:]))
 
@@ -212,7 +209,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -238,7 +235,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -264,7 +261,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -309,7 +306,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual(3, _Object()._fn(1, 2))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -339,7 +336,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual(3, _Object()._fn(1, 2))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -369,11 +366,11 @@ class DeprecationTest(test.TestCase):
     self.assertEqual(3, _Object()._fn(1, 2))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   def test_prop_wrong_order(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "make sure @property appears before @deprecated in your source code"):
       # pylint: disable=unused-variable
@@ -424,7 +421,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual("prop_with_doc", _Object()._prop)
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -455,7 +452,7 @@ class DeprecationTest(test.TestCase):
     self.assertEqual("prop_no_doc", _Object()._prop)
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
 
@@ -469,15 +466,15 @@ class DeprecatedArgsTest(test.TestCase):
   def test_deprecated_illegal_args(self):
     instructions = "This is how you update..."
     date = "2016-07-04"
-    with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
+    with self.assertRaisesRegex(ValueError, "YYYY-MM-DD"):
       deprecation.deprecated_args("", instructions, "deprecated")
-    with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
+    with self.assertRaisesRegex(ValueError, "YYYY-MM-DD"):
       deprecation.deprecated_args("07-04-2016", instructions, "deprecated")
-    with self.assertRaisesRegexp(ValueError, "instructions"):
+    with self.assertRaisesRegex(ValueError, "instructions"):
       deprecation.deprecated_args(date, None, "deprecated")
-    with self.assertRaisesRegexp(ValueError, "instructions"):
+    with self.assertRaisesRegex(ValueError, "instructions"):
       deprecation.deprecated_args(date, "", "deprecated")
-    with self.assertRaisesRegexp(ValueError, "argument"):
+    with self.assertRaisesRegex(ValueError, "argument"):
       deprecation.deprecated_args(date, instructions)
 
   def test_deprecated_missing_args(self):
@@ -488,7 +485,7 @@ class DeprecatedArgsTest(test.TestCase):
       return arg0 + arg1 if deprecated else arg1 + arg0
 
     # Assert calls without the deprecated argument log nothing.
-    with self.assertRaisesRegexp(ValueError, "not present.*\\['missing'\\]"):
+    with self.assertRaisesRegex(ValueError, "not present.*\\['missing'\\]"):
       deprecation.deprecated_args(date, instructions, "missing")(_fn)
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -536,7 +533,7 @@ class DeprecatedArgsTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, True))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -567,7 +564,7 @@ class DeprecatedArgsTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, True))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -598,7 +595,7 @@ class DeprecatedArgsTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, True))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -619,7 +616,7 @@ class DeprecatedArgsTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, True, False))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -640,7 +637,7 @@ class DeprecatedArgsTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, a=True, b=False))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -661,11 +658,11 @@ class DeprecatedArgsTest(test.TestCase):
     self.assertEqual(2, _fn(1, None, 2, d2=False))
     self.assertEqual(2, mock_warning.call_count)
     (args1, _) = mock_warning.call_args_list[0]
-    self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
+    self.assertRegex(args1[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions, "d1"]),
                         set(args1[1:]))
     (args2, _) = mock_warning.call_args_list[1]
-    self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
+    self.assertRegex(args2[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions, "d2"]),
                         set(args2[1:]))
 
@@ -688,11 +685,11 @@ class DeprecatedArgsTest(test.TestCase):
     self.assertEqual(2, _fn(1, False, 2, d2=False))
     self.assertEqual(2, mock_warning.call_count)
     (args1, _) = mock_warning.call_args_list[0]
-    self.assertRegexpMatches(args1[0], r"deprecated and will be removed")
+    self.assertRegex(args1[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions, "d1"]),
                         set(args1[1:]))
     (args2, _) = mock_warning.call_args_list[1]
-    self.assertRegexpMatches(args2[0], r"deprecated and will be removed")
+    self.assertRegex(args2[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions, "d2"]),
                         set(args2[1:]))
 
@@ -751,17 +748,17 @@ class DeprecatedArgValuesTest(test.TestCase):
 
   def test_deprecated_illegal_args(self):
     instructions = "This is how you update..."
-    with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
+    with self.assertRaisesRegex(ValueError, "YYYY-MM-DD"):
       deprecation.deprecated_arg_values("", instructions, deprecated=True)
-    with self.assertRaisesRegexp(ValueError, "YYYY-MM-DD"):
+    with self.assertRaisesRegex(ValueError, "YYYY-MM-DD"):
       deprecation.deprecated_arg_values(
           "07-04-2016", instructions, deprecated=True)
     date = "2016-07-04"
-    with self.assertRaisesRegexp(ValueError, "instructions"):
+    with self.assertRaisesRegex(ValueError, "instructions"):
       deprecation.deprecated_arg_values(date, None, deprecated=True)
-    with self.assertRaisesRegexp(ValueError, "instructions"):
+    with self.assertRaisesRegex(ValueError, "instructions"):
       deprecation.deprecated_arg_values(date, "", deprecated=True)
-    with self.assertRaisesRegexp(ValueError, "argument"):
+    with self.assertRaisesRegex(ValueError, "argument"):
       deprecation.deprecated_arg_values(date, instructions)
 
   @test.mock.patch.object(logging, "warning", autospec=True)
@@ -810,7 +807,7 @@ class DeprecatedArgValuesTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, deprecated=True))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
     # Assert calling new fn with default deprecated value issues log warning.
@@ -846,7 +843,7 @@ class DeprecatedArgValuesTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, deprecated=True))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
     # Assert calling new fn with default deprecated value issues log warning.
@@ -882,7 +879,7 @@ class DeprecatedArgValuesTest(test.TestCase):
     self.assertEqual(3, _fn(1, 2, deprecated=True))
     self.assertEqual(1, mock_warning.call_count)
     (args, _) = mock_warning.call_args
-    self.assertRegexpMatches(args[0], r"deprecated and will be removed")
+    self.assertRegex(args[0], r"deprecated and will be removed")
     self._assert_subset(set(["after " + date, instructions]), set(args[1:]))
 
     # Assert calling new fn with default deprecated value issues log warning.
@@ -936,8 +933,8 @@ class DeprecationArgumentsTest(test.TestCase):
     self.assertEqual(
         deprecation.deprecated_argument_lookup("val_new", None, "val_old",
                                                good_value), good_value)
-    with self.assertRaisesRegexp(ValueError,
-                                 "Cannot specify both 'val_old' and 'val_new'"):
+    with self.assertRaisesRegex(ValueError,
+                                "Cannot specify both 'val_old' and 'val_new'"):
       self.assertEqual(
           deprecation.deprecated_argument_lookup("val_new", good_value,
                                                  "val_old", good_value),
diff --git a/tensorflow/python/util/dispatch_test.py b/tensorflow/python/util/dispatch_test.py
index bd35c391924..49026a754e4 100644
--- a/tensorflow/python/util/dispatch_test.py
+++ b/tensorflow/python/util/dispatch_test.py
@@ -141,8 +141,10 @@ class DispatchTest(test_util.TensorFlowTestCase):
     test_op._tf_dispatchers = original_handlers
 
   def testDispatchForTypes_SignatureMismatch(self):
-    with self.assertRaisesRegexp(AssertionError, "The decorated function's "
-                                 "signature must exactly match.*"):
+    with self.assertRaisesRegex(
+        AssertionError, "The decorated function's "
+        "signature must exactly match.*"):
+
       @dispatch.dispatch_for_types(test_op, CustomTensor)
       def override_for_test_op(a, b, c):  # pylint: disable=unused-variable
         return CustomTensor(test_op(a.tensor, b.tensor, c.tensor),
@@ -152,7 +154,8 @@ class DispatchTest(test_util.TensorFlowTestCase):
     def some_op(x, y):
       return x + y
 
-    with self.assertRaisesRegexp(AssertionError, "Dispatching not enabled for"):
+    with self.assertRaisesRegex(AssertionError, "Dispatching not enabled for"):
+
       @dispatch.dispatch_for_types(some_op, CustomTensor)
       def override_for_some_op(x, y):  # pylint: disable=unused-variable
         return x if x.score > 0 else y
@@ -167,9 +170,8 @@ class DispatchTest(test_util.TensorFlowTestCase):
     some_op(5)
 
     message = mock_warning.call_args[0][0] % mock_warning.call_args[0][1:]
-    self.assertRegexpMatches(
-        message,
-        r".*some_op \(from __main__\) is deprecated and will be "
+    self.assertRegex(
+        message, r".*some_op \(from __main__\) is deprecated and will be "
         "removed in a future version.*")
 
   def testGlobalDispatcher(self):
diff --git a/tensorflow/python/util/function_utils_test.py b/tensorflow/python/util/function_utils_test.py
index 8fc740492c6..546b23a7af1 100644
--- a/tensorflow/python/util/function_utils_test.py
+++ b/tensorflow/python/util/function_utils_test.py
@@ -234,8 +234,8 @@ class HasKwargsTest(test.TestCase):
     self.assertEqual(double_wrapped_fn(some_arg), some_arg)
 
   def test_raises_type_error(self):
-    with self.assertRaisesRegexp(
-        TypeError, 'fn should be a function-like object'):
+    with self.assertRaisesRegex(TypeError,
+                                'fn should be a function-like object'):
       function_utils.has_kwargs('not a function')
 
 
@@ -253,15 +253,14 @@ class GetFuncNameTest(test.TestCase):
 
   def testWithCallableClass(self):
     callable_instance = SillyCallableClass()
-    self.assertRegexpMatches(
+    self.assertRegex(
         function_utils.get_func_name(callable_instance),
         '<.*SillyCallableClass.*>')
 
   def testWithFunctoolsPartial(self):
     partial = functools.partial(silly_example_function)
-    self.assertRegexpMatches(
-        function_utils.get_func_name(partial),
-        '<.*functools.partial.*>')
+    self.assertRegex(
+        function_utils.get_func_name(partial), '<.*functools.partial.*>')
 
   def testWithLambda(self):
     anon_fn = lambda x: x
@@ -277,24 +276,24 @@ class GetFuncCodeTest(test.TestCase):
   def testWithSimpleFunction(self):
     code = function_utils.get_func_code(silly_example_function)
     self.assertIsNotNone(code)
-    self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
+    self.assertRegex(code.co_filename, 'function_utils_test.py')
 
   def testWithClassMethod(self):
     code = function_utils.get_func_code(self.testWithClassMethod)
     self.assertIsNotNone(code)
-    self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
+    self.assertRegex(code.co_filename, 'function_utils_test.py')
 
   def testWithCallableClass(self):
     callable_instance = SillyCallableClass()
     code = function_utils.get_func_code(callable_instance)
     self.assertIsNotNone(code)
-    self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
+    self.assertRegex(code.co_filename, 'function_utils_test.py')
 
   def testWithLambda(self):
     anon_fn = lambda x: x
     code = function_utils.get_func_code(anon_fn)
     self.assertIsNotNone(code)
-    self.assertRegexpMatches(code.co_filename, 'function_utils_test.py')
+    self.assertRegex(code.co_filename, 'function_utils_test.py')
 
   def testWithFunctoolsPartial(self):
     partial = functools.partial(silly_example_function)
diff --git a/tensorflow/python/util/keyword_args_test.py b/tensorflow/python/util/keyword_args_test.py
index 87c95bf3feb..637f5b72299 100644
--- a/tensorflow/python/util/keyword_args_test.py
+++ b/tensorflow/python/util/keyword_args_test.py
@@ -38,12 +38,12 @@ class KeywordArgsTest(test.TestCase):
     self.assertEqual(3, func_with_decorator(a=1, b=2))
 
     # Providing non-keyword args should fail.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Must use keyword args to call func_with_decorator."):
       self.assertEqual(3, func_with_decorator(1, 2))
 
     # Partially providing keyword args should fail.
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Must use keyword args to call func_with_decorator."):
       self.assertEqual(3, func_with_decorator(1, b=2))
 
diff --git a/tensorflow/python/util/nest_test.py b/tensorflow/python/util/nest_test.py
index eb5523d1a40..ca808ba9ff1 100644
--- a/tensorflow/python/util/nest_test.py
+++ b/tensorflow/python/util/nest_test.py
@@ -104,7 +104,7 @@ class NestTest(parameterized.TestCase, test.TestCase):
     self.assertEqual(restructured_from_flat, sample_attr)
 
     # Check that flatten fails if attributes are not iterable
-    with self.assertRaisesRegexp(TypeError, "object is not iterable"):
+    with self.assertRaisesRegex(TypeError, "object is not iterable"):
       flat = nest.flatten(NestTest.BadAttr())
 
   @parameterized.parameters(
@@ -148,11 +148,10 @@ class NestTest(parameterized.TestCase, test.TestCase):
     self.assertEqual(
         np.array([5]), nest.pack_sequence_as("scalar", [np.array([5])]))
 
-    with self.assertRaisesRegexp(
-        ValueError, self.unsafe_map_pattern):
+    with self.assertRaisesRegex(ValueError, self.unsafe_map_pattern):
       nest.pack_sequence_as("scalar", [4, 5])
 
-    with self.assertRaisesRegexp(TypeError, self.bad_pack_pattern):
+    with self.assertRaisesRegex(TypeError, self.bad_pack_pattern):
       nest.pack_sequence_as([4, 5], "bad_sequence")
 
     with self.assertRaises(ValueError):
@@ -272,12 +271,11 @@ class NestTest(parameterized.TestCase, test.TestCase):
     self.assertEqual(structure, unflattened)
 
   def testPackSequenceAs_notIterableError(self):
-    with self.assertRaisesRegexp(
-        TypeError, self.bad_pack_pattern):
+    with self.assertRaisesRegex(TypeError, self.bad_pack_pattern):
       nest.pack_sequence_as("hi", "bye")
 
   def testPackSequenceAs_wrongLengthsError(self):
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         "Structure had 2 elements, but flat_sequence had 3 elements."):
       nest.pack_sequence_as(["hello", "world"],
@@ -310,13 +308,13 @@ class NestTest(parameterized.TestCase, test.TestCase):
       nest.flatten_dict_items(4)
 
     bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
-    with self.assertRaisesRegexp(ValueError, "not unique"):
+    with self.assertRaisesRegex(ValueError, "not unique"):
       nest.flatten_dict_items(bad_dictionary)
 
     another_bad_dictionary = mapping_type({
         (4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
     })
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
       nest.flatten_dict_items(another_bad_dictionary)
 
@@ -347,7 +345,7 @@ class NestTest(parameterized.TestCase, test.TestCase):
     nest.assert_same_structure("abc", np.array([0, 1]))
     nest.assert_same_structure("abc", constant_op.constant([0, 1]))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         ("The two structures don't have the same nested structure\\.\n\n"
          "First structure:.*?\n\n"
@@ -361,7 +359,7 @@ class NestTest(parameterized.TestCase, test.TestCase):
          r"\(\., \.\)")):
       nest.assert_same_structure(structure1, structure_different_num_elements)
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         ("The two structures don't have the same nested structure\\.\n\n"
          "First structure:.*?\n\n"
@@ -371,7 +369,7 @@ class NestTest(parameterized.TestCase, test.TestCase):
          "is not")):
       nest.assert_same_structure([0, 1], np.array([0, 1]))
 
-    with self.assertRaisesRegexp(
+    with self.assertRaisesRegex(
         ValueError,
         ("The two structures don't have the same nested structure\\.\n\n"
          "First structure:.*?\n\n"
@@ -383,10 +381,9 @@ class NestTest(parameterized.TestCase, test.TestCase):
 
     self.assertRaises(TypeError, nest.assert_same_structure, (0, 1), [0, 1])
 
-    with self.assertRaisesRegexp(
-        ValueError,
-        ("don't have the same nested structure\\.\n\n"
-         "First structure: .*?\n\nSecond structure: ")):
+    with self.assertRaisesRegex(ValueError,
+                                ("don't have the same nested structure\\.\n\n"
+                                 "First structure: .*?\n\nSecond structure: ")):
       nest.assert_same_structure(structure1, structure_different_nesting)
 
     self.assertRaises(TypeError, nest.assert_same_structure, (0, 1),
@@ -398,28 +395,24 @@ class NestTest(parameterized.TestCase, test.TestCase):
     self.assertRaises(TypeError, nest.assert_same_structure,
                       NestTest.Named0ab(3, 4), NestTest.Named1ab(3, 4))
 
-    with self.assertRaisesRegexp(
-        ValueError,
-        ("don't have the same nested structure\\.\n\n"
-         "First structure: .*?\n\nSecond structure: ")):
+    with self.assertRaisesRegex(ValueError,
+                                ("don't have the same nested structure\\.\n\n"
+                                 "First structure: .*?\n\nSecond structure: ")):
       nest.assert_same_structure(NestTest.Named0ab(3, 4),
                                  NestTest.Named0ab([3], 4))
 
-    with self.assertRaisesRegexp(
-        ValueError,
-        ("don't have the same nested structure\\.\n\n"
-         "First structure: .*?\n\nSecond structure: ")):
+    with self.assertRaisesRegex(ValueError,
+                                ("don't have the same nested structure\\.\n\n"
+                                 "First structure: .*?\n\nSecond structure: ")):
       nest.assert_same_structure([[3], 4], [3, [4]])
 
     structure1_list = [[[1, 2], 3], 4, [5, 6]]
-    with self.assertRaisesRegexp(TypeError,
-                                 "don't have the same sequence type"):
+    with self.assertRaisesRegex(TypeError, "don't have the same sequence type"):
       nest.assert_same_structure(structure1, structure1_list)
     nest.assert_same_structure(structure1, structure2, check_types=False)
     nest.assert_same_structure(structure1, structure1_list, check_types=False)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "don't have the same set of keys"):
+    with self.assertRaisesRegex(ValueError, "don't have the same set of keys"):
       nest.assert_same_structure({"a": 1}, {"b": 1})
 
     nest.assert_same_structure(NestTest.SameNameab(0, 1),
@@ -432,7 +425,7 @@ class NestTest(parameterized.TestCase, test.TestCase):
         NestTest.SameNameab2(NestTest.SameName1xy2(2, 3), 4))
 
     expected_message = "The two structures don't have the same.*"
-    with self.assertRaisesRegexp(ValueError, expected_message):
+    with self.assertRaisesRegex(ValueError, expected_message):
       nest.assert_same_structure(
           NestTest.SameNameab(0, NestTest.SameNameab2(1, 2)),
           NestTest.SameNameab2(NestTest.SameNameab(0, 1), 2))
@@ -491,41 +484,39 @@ class NestTest(parameterized.TestCase, test.TestCase):
     # This is checking actual equality of types, empty list != empty tuple
     self.assertNotEqual((), nest.map_structure(lambda x: x + 1, []))
 
-    with self.assertRaisesRegexp(TypeError, "callable"):
+    with self.assertRaisesRegex(TypeError, "callable"):
       nest.map_structure("bad", structure1_plus1)
 
-    with self.assertRaisesRegexp(ValueError, "at least one structure"):
+    with self.assertRaisesRegex(ValueError, "at least one structure"):
       nest.map_structure(lambda x: x)
 
-    with self.assertRaisesRegexp(ValueError, "same number of elements"):
+    with self.assertRaisesRegex(ValueError, "same number of elements"):
       nest.map_structure(lambda x, y: None, (3, 4), (3, 4, 5))
 
-    with self.assertRaisesRegexp(ValueError, "same nested structure"):
+    with self.assertRaisesRegex(ValueError, "same nested structure"):
       nest.map_structure(lambda x, y: None, 3, (3,))
 
-    with self.assertRaisesRegexp(TypeError, "same sequence type"):
+    with self.assertRaisesRegex(TypeError, "same sequence type"):
       nest.map_structure(lambda x, y: None, ((3, 4), 5), [(3, 4), 5])
 
-    with self.assertRaisesRegexp(ValueError, "same nested structure"):
+    with self.assertRaisesRegex(ValueError, "same nested structure"):
       nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)))
 
     structure1_list = [[[1, 2], 3], 4, [5, 6]]
-    with self.assertRaisesRegexp(TypeError, "same sequence type"):
+    with self.assertRaisesRegex(TypeError, "same sequence type"):
       nest.map_structure(lambda x, y: None, structure1, structure1_list)
 
     nest.map_structure(lambda x, y: None, structure1, structure1_list,
                        check_types=False)
 
-    with self.assertRaisesRegexp(ValueError, "same nested structure"):
+    with self.assertRaisesRegex(ValueError, "same nested structure"):
       nest.map_structure(lambda x, y: None, ((3, 4), 5), (3, (4, 5)),
                          check_types=False)
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Only valid keyword argument.*foo"):
+    with self.assertRaisesRegex(ValueError, "Only valid keyword argument.*foo"):
       nest.map_structure(lambda x: None, structure1, foo="a")
 
-    with self.assertRaisesRegexp(ValueError,
-                                 "Only valid keyword argument.*foo"):
+    with self.assertRaisesRegex(ValueError, "Only valid keyword argument.*foo"):
       nest.map_structure(lambda x: None, structure1, check_types=False, foo="a")
 
   ABTuple = collections.namedtuple("ab_tuple", "a, b")  # pylint: disable=invalid-name
@@ -725,14 +716,14 @@ class NestTest(parameterized.TestCase, test.TestCase):
     shallow_tree = ["shallow_tree"]
     expected_message = ("If shallow structure is a sequence, input must also "
                         "be a sequence. Input has type: <(type|class) 'str'>.")
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, shallow_tree)
 
     input_tree = "input_tree"
     shallow_tree = ["shallow_tree_9", "shallow_tree_8"]
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, shallow_tree)
@@ -742,14 +733,14 @@ class NestTest(parameterized.TestCase, test.TestCase):
     shallow_tree = [9]
     expected_message = ("If shallow structure is a sequence, input must also "
                         "be a sequence. Input has type: <(type|class) 'int'>.")
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, shallow_tree)
 
     input_tree = 0
     shallow_tree = [9, 8]
-    with self.assertRaisesRegexp(TypeError, expected_message):
+    with self.assertRaisesRegex(TypeError, expected_message):
       flattened_input_tree = nest.flatten_up_to(shallow_tree, input_tree)
     flattened_shallow_tree = nest.flatten_up_to(shallow_tree, shallow_tree)
     self.assertEqual(flattened_shallow_tree, shallow_tree)
@@ -758,7 +749,7 @@ class NestTest(parameterized.TestCase, test.TestCase):
     shallow_tree = [(1,), (2,)]
     expected_message = nest._STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
         input_length=len(input_tree), shallow_length=len(shallow_tree))
-    with self.assertRaisesRegexp(ValueError, expected_message):  # pylint: disable=g-error-prone-assert-raises
+    with self.assertRaisesRegex(ValueError, expected_message):  # pylint: disable=g-error-prone-assert-raises
       nest.assert_shallow_structure(shallow_tree, input_tree)
 
   def testFlattenWithTuplePathsUpTo(self):
@@ -1074,14 +1065,14 @@ class NestTest(parameterized.TestCase, test.TestCase):
     nest.assert_shallow_structure(structure_traverse_r,
                                   structure_traverse_input)
 
-    with self.assertRaisesRegexp(TypeError, "returned structure"):
+    with self.assertRaisesRegex(TypeError, "returned structure"):
       nest.get_traverse_shallow_structure(lambda _: [True], 0)
 
-    with self.assertRaisesRegexp(TypeError, "returned a non-bool scalar"):
+    with self.assertRaisesRegex(TypeError, "returned a non-bool scalar"):
       nest.get_traverse_shallow_structure(lambda _: 1, [1])
 
-    with self.assertRaisesRegexp(
-        TypeError, "didn't return a depth=1 structure of bools"):
+    with self.assertRaisesRegex(TypeError,
+                                "didn't return a depth=1 structure of bools"):
       nest.get_traverse_shallow_structure(lambda _: [1], [1])
 
   def testYieldFlatStringPaths(self):
@@ -1214,7 +1205,7 @@ class NestTest(parameterized.TestCase, test.TestCase):
 
   def testFlattenCustomSequenceThatRaisesException(self):  # b/140746865
     seq = _CustomSequenceThatRaisesException()
-    with self.assertRaisesRegexp(ValueError, "Cannot get item"):
+    with self.assertRaisesRegex(ValueError, "Cannot get item"):
       nest.flatten(seq)
 
   def testListToTuple(self):
diff --git a/tensorflow/python/util/protobuf/compare_test.py b/tensorflow/python/util/protobuf/compare_test.py
index 2e5e83df990..229d5d78db6 100644
--- a/tensorflow/python/util/protobuf/compare_test.py
+++ b/tensorflow/python/util/protobuf/compare_test.py
@@ -47,53 +47,53 @@ class ProtoEqTest(googletest.TestCase):
   def assertNotEquals(self, a, b):
     """Asserts that ProtoEq says a != b."""
     a, b = LargePbs(a, b)
-    googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), False)
+    googletest.TestCase.assertEqual(self, compare.ProtoEq(a, b), False)
 
-  def assertEquals(self, a, b):
+  def assertEqual(self, a, b):
     """Asserts that ProtoEq says a == b."""
     a, b = LargePbs(a, b)
-    googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), True)
+    googletest.TestCase.assertEqual(self, compare.ProtoEq(a, b), True)
 
   def testPrimitives(self):
     googletest.TestCase.assertEqual(self, True, compare.ProtoEq('a', 'a'))
     googletest.TestCase.assertEqual(self, False, compare.ProtoEq('b', 'a'))
 
   def testEmpty(self):
-    self.assertEquals('', '')
+    self.assertEqual('', '')
 
   def testPrimitiveFields(self):
     self.assertNotEquals('string_: "a"', '')
-    self.assertEquals('string_: "a"', 'string_: "a"')
+    self.assertEqual('string_: "a"', 'string_: "a"')
     self.assertNotEquals('string_: "b"', 'string_: "a"')
     self.assertNotEquals('string_: "ab"', 'string_: "aa"')
 
     self.assertNotEquals('int64_: 0', '')
-    self.assertEquals('int64_: 0', 'int64_: 0')
+    self.assertEqual('int64_: 0', 'int64_: 0')
     self.assertNotEquals('int64_: -1', '')
     self.assertNotEquals('int64_: 1', 'int64_: 0')
     self.assertNotEquals('int64_: 0', 'int64_: -1')
 
     self.assertNotEquals('float_: 0.0', '')
-    self.assertEquals('float_: 0.0', 'float_: 0.0')
+    self.assertEqual('float_: 0.0', 'float_: 0.0')
     self.assertNotEquals('float_: -0.1', '')
     self.assertNotEquals('float_: 3.14', 'float_: 0')
     self.assertNotEquals('float_: 0', 'float_: -0.1')
-    self.assertEquals('float_: -0.1', 'float_: -0.1')
+    self.assertEqual('float_: -0.1', 'float_: -0.1')
 
     self.assertNotEquals('bool_: true', '')
     self.assertNotEquals('bool_: false', '')
     self.assertNotEquals('bool_: true', 'bool_: false')
-    self.assertEquals('bool_: false', 'bool_: false')
-    self.assertEquals('bool_: true', 'bool_: true')
+    self.assertEqual('bool_: false', 'bool_: false')
+    self.assertEqual('bool_: true', 'bool_: true')
 
     self.assertNotEquals('enum_: A', '')
     self.assertNotEquals('enum_: B', 'enum_: A')
     self.assertNotEquals('enum_: C', 'enum_: B')
-    self.assertEquals('enum_: C', 'enum_: C')
+    self.assertEqual('enum_: C', 'enum_: C')
 
   def testRepeatedPrimitives(self):
     self.assertNotEquals('int64s: 0', '')
-    self.assertEquals('int64s: 0', 'int64s: 0')
+    self.assertEqual('int64s: 0', 'int64s: 0')
     self.assertNotEquals('int64s: 1', 'int64s: 0')
     self.assertNotEquals('int64s: 0 int64s: 0', '')
     self.assertNotEquals('int64s: 0 int64s: 0', 'int64s: 0')
@@ -101,8 +101,8 @@ class ProtoEqTest(googletest.TestCase):
     self.assertNotEquals('int64s: 0 int64s: 1', 'int64s: 0')
     self.assertNotEquals('int64s: 1', 'int64s: 0 int64s: 2')
     self.assertNotEquals('int64s: 2 int64s: 0', 'int64s: 1')
-    self.assertEquals('int64s: 0 int64s: 0', 'int64s: 0 int64s: 0')
-    self.assertEquals('int64s: 0 int64s: 1', 'int64s: 0 int64s: 1')
+    self.assertEqual('int64s: 0 int64s: 0', 'int64s: 0 int64s: 0')
+    self.assertEqual('int64s: 0 int64s: 1', 'int64s: 0 int64s: 1')
     self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 0')
     self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 1')
     self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 2')
@@ -111,10 +111,10 @@ class ProtoEqTest(googletest.TestCase):
 
   def testMessage(self):
     self.assertNotEquals('small <>', '')
-    self.assertEquals('small <>', 'small <>')
+    self.assertEqual('small <>', 'small <>')
     self.assertNotEquals('small < strings: "a" >', '')
     self.assertNotEquals('small < strings: "a" >', 'small <>')
-    self.assertEquals('small < strings: "a" >', 'small < strings: "a" >')
+    self.assertEqual('small < strings: "a" >', 'small < strings: "a" >')
     self.assertNotEquals('small < strings: "b" >', 'small < strings: "a" >')
     self.assertNotEquals('small < strings: "a" strings: "b" >',
                          'small < strings: "a" >')
@@ -124,11 +124,11 @@ class ProtoEqTest(googletest.TestCase):
     self.assertNotEquals('string_: "a"', 'small < strings: "b" strings: "c" >')
     self.assertNotEquals('string_: "a" small <>', 'small <>')
     self.assertNotEquals('string_: "a" small <>', 'small < strings: "b" >')
-    self.assertEquals('string_: "a" small <>', 'string_: "a" small <>')
+    self.assertEqual('string_: "a" small <>', 'string_: "a" small <>')
     self.assertNotEquals('string_: "a" small < strings: "a" >',
                          'string_: "a" small <>')
-    self.assertEquals('string_: "a" small < strings: "a" >',
-                      'string_: "a" small < strings: "a" >')
+    self.assertEqual('string_: "a" small < strings: "a" >',
+                     'string_: "a" small < strings: "a" >')
     self.assertNotEquals('string_: "a" small < strings: "a" >',
                          'int64_: 1 small < strings: "a" >')
     self.assertNotEquals('string_: "a" small < strings: "a" >', 'int64_: 1')
@@ -137,18 +137,18 @@ class ProtoEqTest(googletest.TestCase):
                          'int64_: 1 small < strings: "a" >')
     self.assertNotEquals('string_: "a" int64_: 1 small < strings: "a" >',
                          'string_: "a" int64_: 0 small < strings: "a" >')
-    self.assertEquals('string_: "a" int64_: 0 small < strings: "a" >',
-                      'string_: "a" int64_: 0 small < strings: "a" >')
+    self.assertEqual('string_: "a" int64_: 0 small < strings: "a" >',
+                     'string_: "a" int64_: 0 small < strings: "a" >')
 
   def testNestedMessage(self):
     self.assertNotEquals('medium <>', '')
-    self.assertEquals('medium <>', 'medium <>')
+    self.assertEqual('medium <>', 'medium <>')
     self.assertNotEquals('medium < smalls <> >', 'medium <>')
-    self.assertEquals('medium < smalls <> >', 'medium < smalls <> >')
+    self.assertEqual('medium < smalls <> >', 'medium < smalls <> >')
     self.assertNotEquals('medium < smalls <> smalls <> >',
                          'medium < smalls <> >')
-    self.assertEquals('medium < smalls <> smalls <> >',
-                      'medium < smalls <> smalls <> >')
+    self.assertEqual('medium < smalls <> smalls <> >',
+                     'medium < smalls <> smalls <> >')
 
     self.assertNotEquals('medium < int32s: 0 >', 'medium < smalls <> >')
 
@@ -172,12 +172,12 @@ class ProtoEqTest(googletest.TestCase):
                          '             int64_: 1            ')
     self.assertNotEquals('string_: "b" int64_: 1            ',
                          'string_: "a" int64_: 2            ')
-    self.assertEquals('string_: "a" int64_: 1            ',
-                      'string_: "a" int64_: 1            ')
+    self.assertEqual('string_: "a" int64_: 1            ',
+                     'string_: "a" int64_: 1            ')
     self.assertNotEquals('string_: "a" int64_: 1 float_: 0.0',
                          'string_: "a" int64_: 1            ')
-    self.assertEquals('string_: "a" int64_: 1 float_: 0.0',
-                      'string_: "a" int64_: 1 float_: 0.0')
+    self.assertEqual('string_: "a" int64_: 1 float_: 0.0',
+                     'string_: "a" int64_: 1 float_: 0.0')
     self.assertNotEquals('string_: "a" int64_: 1 float_: 0.1',
                          'string_: "a" int64_: 1 float_: 0.0')
     self.assertNotEquals('string_: "a" int64_: 2 float_: 0.0',
@@ -194,8 +194,8 @@ class ProtoEqTest(googletest.TestCase):
                          'small < strings: "b" >')
     self.assertNotEquals('string_: "a" small < strings: "b" >',
                          'string_: "a" small < strings: "a" >')
-    self.assertEquals('string_: "a" small < strings: "a" >',
-                      'string_: "a" small < strings: "a" >')
+    self.assertEqual('string_: "a" small < strings: "a" >',
+                     'string_: "a" small < strings: "a" >')
 
     self.assertNotEquals('string_: "a" medium <>',
                          'string_: "a" small < strings: "a" >')
@@ -286,8 +286,8 @@ class AssertTest(googletest.TestCase):
   def assertNone(self, a, b, message, **kwargs):
     """Checks that all possible asserts fail with the given message."""
     message = re.escape(textwrap.dedent(message))
-    self.assertRaisesRegexp(AssertionError, message, self.assertProtoEqual, a,
-                            b, **kwargs)
+    self.assertRaisesRegex(AssertionError, message, self.assertProtoEqual, a, b,
+                           **kwargs)
 
   def testCheckInitialized(self):
     # neither is initialized
@@ -427,7 +427,7 @@ class AssertTest(googletest.TestCase):
                     """)
 
   def testMsgPassdown(self):
-    self.assertRaisesRegexp(
+    self.assertRaisesRegex(
         AssertionError,
         'test message passed down',
         self.assertProtoEqual,
diff --git a/tensorflow/python/util/tf_export_test.py b/tensorflow/python/util/tf_export_test.py
index 6716560b79b..2d434cc9e8f 100644
--- a/tensorflow/python/util/tf_export_test.py
+++ b/tensorflow/python/util/tf_export_test.py
@@ -76,12 +76,12 @@ class ValidateExportTest(test.TestCase):
   def testExportSingleFunction(self):
     export_decorator = tf_export.tf_export('nameA', 'nameB')
     decorated_function = export_decorator(_test_function)
-    self.assertEquals(decorated_function, _test_function)
-    self.assertEquals(('nameA', 'nameB'), decorated_function._tf_api_names)
-    self.assertEquals(['nameA', 'nameB'],
-                      tf_export.get_v1_names(decorated_function))
-    self.assertEquals(['nameA', 'nameB'],
-                      tf_export.get_v2_names(decorated_function))
+    self.assertEqual(decorated_function, _test_function)
+    self.assertEqual(('nameA', 'nameB'), decorated_function._tf_api_names)
+    self.assertEqual(['nameA', 'nameB'],
+                     tf_export.get_v1_names(decorated_function))
+    self.assertEqual(['nameA', 'nameB'],
+                     tf_export.get_v2_names(decorated_function))
     self.assertEqual(tf_export.get_symbol_from_name('nameA'),
                      decorated_function)
     self.assertEqual(tf_export.get_symbol_from_name('nameB'),
@@ -115,10 +115,10 @@ class ValidateExportTest(test.TestCase):
     export_decorator2 = tf_export.tf_export('nameC', 'nameD')
     decorated_function1 = export_decorator1(_test_function)
     decorated_function2 = export_decorator2(_test_function2)
-    self.assertEquals(decorated_function1, _test_function)
-    self.assertEquals(decorated_function2, _test_function2)
-    self.assertEquals(('nameA', 'nameB'), decorated_function1._tf_api_names)
-    self.assertEquals(('nameC', 'nameD'), decorated_function2._tf_api_names)
+    self.assertEqual(decorated_function1, _test_function)
+    self.assertEqual(decorated_function2, _test_function2)
+    self.assertEqual(('nameA', 'nameB'), decorated_function1._tf_api_names)
+    self.assertEqual(('nameC', 'nameD'), decorated_function2._tf_api_names)
     self.assertEqual(tf_export.get_symbol_from_name('nameB'),
                      decorated_function1)
     self.assertEqual(tf_export.get_symbol_from_name('nameD'),
@@ -137,41 +137,41 @@ class ValidateExportTest(test.TestCase):
   def testExportClasses(self):
     export_decorator_a = tf_export.tf_export('TestClassA1')
     export_decorator_a(TestClassA)
-    self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
+    self.assertEqual(('TestClassA1',), TestClassA._tf_api_names)
     self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
 
     export_decorator_b = tf_export.tf_export('TestClassB1')
     export_decorator_b(TestClassB)
-    self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
-    self.assertEquals(('TestClassB1',), TestClassB._tf_api_names)
-    self.assertEquals(['TestClassA1'], tf_export.get_v1_names(TestClassA))
-    self.assertEquals(['TestClassB1'], tf_export.get_v1_names(TestClassB))
+    self.assertEqual(('TestClassA1',), TestClassA._tf_api_names)
+    self.assertEqual(('TestClassB1',), TestClassB._tf_api_names)
+    self.assertEqual(['TestClassA1'], tf_export.get_v1_names(TestClassA))
+    self.assertEqual(['TestClassB1'], tf_export.get_v1_names(TestClassB))
 
   def testExportClassInEstimator(self):
     export_decorator_a = tf_export.tf_export('TestClassA1')
     export_decorator_a(TestClassA)
-    self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
+    self.assertEqual(('TestClassA1',), TestClassA._tf_api_names)
 
     export_decorator_b = tf_export.estimator_export(
         'estimator.TestClassB1')
     export_decorator_b(TestClassB)
     self.assertTrue('_tf_api_names' not in TestClassB.__dict__)
-    self.assertEquals(('TestClassA1',), TestClassA._tf_api_names)
-    self.assertEquals(['TestClassA1'], tf_export.get_v1_names(TestClassA))
-    self.assertEquals(['estimator.TestClassB1'],
-                      tf_export.get_v1_names(TestClassB))
+    self.assertEqual(('TestClassA1',), TestClassA._tf_api_names)
+    self.assertEqual(['TestClassA1'], tf_export.get_v1_names(TestClassA))
+    self.assertEqual(['estimator.TestClassB1'],
+                     tf_export.get_v1_names(TestClassB))
 
   def testExportSingleConstant(self):
     module1 = self._CreateMockModule('module1')
 
     export_decorator = tf_export.tf_export('NAME_A', 'NAME_B')
     export_decorator.export_constant('module1', 'test_constant')
-    self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
-                      module1._tf_api_constants)
-    self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
-                      tf_export.get_v1_constants(module1))
-    self.assertEquals([(('NAME_A', 'NAME_B'), 'test_constant')],
-                      tf_export.get_v2_constants(module1))
+    self.assertEqual([(('NAME_A', 'NAME_B'), 'test_constant')],
+                     module1._tf_api_constants)
+    self.assertEqual([(('NAME_A', 'NAME_B'), 'test_constant')],
+                     tf_export.get_v1_constants(module1))
+    self.assertEqual([(('NAME_A', 'NAME_B'), 'test_constant')],
+                     tf_export.get_v2_constants(module1))
 
   def testExportMultipleConstants(self):
     module1 = self._CreateMockModule('module1')
@@ -187,11 +187,9 @@ class ValidateExportTest(test.TestCase):
     export_decorator1.export_constant('module1', test_constant1)
     export_decorator2.export_constant('module2', test_constant2)
     export_decorator3.export_constant('module2', test_constant3)
-    self.assertEquals([(('NAME_A', 'NAME_B'), 123)],
-                      module1._tf_api_constants)
-    self.assertEquals([(('NAME_C', 'NAME_D'), 'abc'),
-                       (('NAME_E', 'NAME_F'), 0.5)],
-                      module2._tf_api_constants)
+    self.assertEqual([(('NAME_A', 'NAME_B'), 123)], module1._tf_api_constants)
+    self.assertEqual([(('NAME_C', 'NAME_D'), 'abc'),
+                      (('NAME_E', 'NAME_F'), 0.5)], module2._tf_api_constants)
 
   def testRaisesExceptionIfAlreadyHasAPINames(self):
     _test_function._tf_api_names = ['abc']
@@ -239,8 +237,8 @@ class ValidateExportTest(test.TestCase):
 
     export_decorator = tf_export.tf_export('nameA', 'nameB')
     exported_function = export_decorator(decorated_function)
-    self.assertEquals(decorated_function, exported_function)
-    self.assertEquals(('nameA', 'nameB'), _test_function._tf_api_names)
+    self.assertEqual(decorated_function, exported_function)
+    self.assertEqual(('nameA', 'nameB'), _test_function._tf_api_names)
 
 
 if __name__ == '__main__':
diff --git a/tensorflow/python/util/tf_inspect_test.py b/tensorflow/python/util/tf_inspect_test.py
index 44afdd262d2..9989fa164d9 100644
--- a/tensorflow/python/util/tf_inspect_test.py
+++ b/tensorflow/python/util/tf_inspect_test.py
@@ -133,7 +133,7 @@ class TfInspectTest(test.TestCase):
     exception_message = (r"Some arguments \['n'\] do not have default value, "
                          "but they are positioned after those with default "
                          "values. This can not be expressed with ArgSpec.")
-    with self.assertRaisesRegexp(ValueError, exception_message):
+    with self.assertRaisesRegex(ValueError, exception_message):
       tf_inspect.getargspec(partial_func)
 
   def testGetArgSpecOnPartialInvalidArgspec(self):
@@ -147,7 +147,7 @@ class TfInspectTest(test.TestCase):
     exception_message = (r"Some arguments \['l'\] do not have default value, "
                          "but they are positioned after those with default "
                          "values. This can not be expressed with ArgSpec.")
-    with self.assertRaisesRegexp(ValueError, exception_message):
+    with self.assertRaisesRegex(ValueError, exception_message):
       tf_inspect.getargspec(partial_func)
 
   def testGetArgSpecOnPartialValidArgspec(self):
diff --git a/tensorflow/python/util/tf_should_use_test.py b/tensorflow/python/util/tf_should_use_test.py
index bb50edfa857..7f6c86fa0a8 100644
--- a/tensorflow/python/util/tf_should_use_test.py
+++ b/tensorflow/python/util/tf_should_use_test.py
@@ -70,8 +70,8 @@ class TfShouldUseTest(test.TestCase):
       self.assertFalse(gc.garbage)
 
     tf_fn_in_this_function = def_function.function(in_this_function)
-    with self.assertRaisesRegexp(
-        RuntimeError, r'Object was never used.*blah0:0'):
+    with self.assertRaisesRegex(RuntimeError,
+                                r'Object was never used.*blah0:0'):
       tf_fn_in_this_function()
     self.assertFalse(gc.garbage)
 
diff --git a/tensorflow/tools/compatibility/ast_edits_test.py b/tensorflow/tools/compatibility/ast_edits_test.py
index 2e4f6a2dfb2..71a3f37c696 100644
--- a/tensorflow/tools/compatibility/ast_edits_test.py
+++ b/tensorflow/tools/compatibility/ast_edits_test.py
@@ -495,11 +495,10 @@ class TestAstEdits(test_util.TensorFlowTestCase):
 
   def testFullNameNode(self):
     t = ast_edits.full_name_node("a.b.c")
-    self.assertEquals(
+    self.assertEqual(
         ast.dump(t),
         "Attribute(value=Attribute(value=Name(id='a', ctx=Load()), attr='b', "
-        "ctx=Load()), attr='c', ctx=Load())"
-    )
+        "ctx=Load()), attr='c', ctx=Load())")
 
   def testImport(self):
     # foo should be renamed to bar.