diff --git a/tensorflow/core/grappler/optimizers/BUILD b/tensorflow/core/grappler/optimizers/BUILD
index da2d2acc878..27075ad0b97 100644
--- a/tensorflow/core/grappler/optimizers/BUILD
+++ b/tensorflow/core/grappler/optimizers/BUILD
@@ -1,8 +1,7 @@
 licenses(["notice"])  # Apache 2.0
 
-load("//tensorflow:tensorflow.bzl", "tf_cc_test")
+load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_kernel_library")
 load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_test")
-load("//tensorflow:tensorflow.bzl", "tf_kernel_library")
 
 # Platform specific build config
 load(
@@ -314,6 +313,7 @@ tf_cuda_cc_test(
         "//tensorflow/core/grappler:grappler_item",
         "//tensorflow/core/grappler:utils",
         "//tensorflow/core/grappler/inputs:trivial_test_graph_input_yielder",
+        "@com_google_absl//absl/strings",
     ],
 )
 
diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
index 6a9d7557620..94508c68368 100644
--- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
+++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc
@@ -14,6 +14,9 @@ limitations under the License.
 ==============================================================================*/
 
 #include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h"
+
+#include "absl/strings/str_cat.h"
+#include "tensorflow/cc/ops/array_ops.h"
 #include "tensorflow/cc/ops/math_ops.h"
 #include "tensorflow/cc/ops/standard_ops.h"
 #include "tensorflow/core/framework/node_def.pb.h"
@@ -110,25 +113,25 @@ TEST_F(ArithmeticOptimizerTest, OpDedupping) {
   item.fetch = {"div"};
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
-  EXPECT_EQ(2, output.node_size());
+  EXPECT_EQ(output.node_size(), 2);
   const NodeDef* new_c1 = node_map.GetNode("c1");
   ASSERT_NE(new_c1, nullptr);
 
   const NodeDef* new_div = node_map.GetNode("div");
   ASSERT_NE(new_div, nullptr);
-  EXPECT_EQ(2, new_div->input_size());
-  EXPECT_EQ("c1", new_div->input(0));
-  EXPECT_EQ("c1", new_div->input(1));
+  ASSERT_EQ(new_div->input_size(), 2);
+  EXPECT_EQ(new_div->input(0), "c1");
+  EXPECT_EQ(new_div->input(1), "c1");
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, OpDeduppingAssertAndCheckNumerics) {
@@ -149,7 +152,7 @@ TEST_F(ArithmeticOptimizerTest, OpDeduppingAssertAndCheckNumerics) {
   bool_t.scalar<bool>().setConstant(true);
   auto tensors_expected =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", bool_t}});
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
@@ -157,18 +160,18 @@ TEST_F(ArithmeticOptimizerTest, OpDeduppingAssertAndCheckNumerics) {
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
 
-  EXPECT_EQ(5, output.node_size());
+  EXPECT_EQ(output.node_size(), 5);
   const NodeDef* new_div = node_map.GetNode("div");
   ASSERT_NE(new_div, nullptr);
-  EXPECT_EQ(4, new_div->input_size());
-  EXPECT_EQ("check1", new_div->input(0));
-  EXPECT_EQ("check1", new_div->input(1));
-  EXPECT_EQ("^assert1", new_div->input(2));
-  EXPECT_EQ("^assert1", new_div->input(3));
+  ASSERT_EQ(new_div->input_size(), 4);
+  EXPECT_EQ(new_div->input(0), "check1");
+  EXPECT_EQ(new_div->input(1), "check1");
+  EXPECT_EQ(new_div->input(2), "^assert1");
+  EXPECT_EQ(new_div->input(3), "^assert1");
 
   auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", bool_t}});
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
+  EXPECT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, OpDedupCommutative) {
@@ -182,32 +185,32 @@ TEST_F(ArithmeticOptimizerTest, OpDedupCommutative) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   item.fetch = {"div1"};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
 
-  EXPECT_EQ(4, output.node_size());
+  EXPECT_EQ(output.node_size(), 4);
   const NodeDef* new_c1 = node_map.GetNode("c1");
   ASSERT_NE(new_c1, nullptr);
   const NodeDef* new_c2 = node_map.GetNode("c2");
   ASSERT_NE(new_c2, nullptr);
   const NodeDef* new_mul1 = node_map.GetNode("mul1");
   ASSERT_NE(new_mul1, nullptr);
-  EXPECT_EQ(2, new_mul1->input_size());
-  EXPECT_EQ("c1", new_mul1->input(0));
-  EXPECT_EQ("c2", new_mul1->input(1));
+  ASSERT_EQ(new_mul1->input_size(), 2);
+  EXPECT_EQ(new_mul1->input(0), "c1");
+  EXPECT_EQ(new_mul1->input(1), "c2");
   const NodeDef* new_div1 = node_map.GetNode("div1");
   ASSERT_NE(new_div1, nullptr);
-  EXPECT_EQ(2, new_div1->input_size());
-  EXPECT_EQ("mul1", new_div1->input(0));
-  EXPECT_EQ("mul1", new_div1->input(1));
+  ASSERT_EQ(new_div1->input_size(), 2);
+  EXPECT_EQ(new_div1->input(0), "mul1");
+  EXPECT_EQ(new_div1->input(1), "mul1");
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, ReplaceMulWithSquare) {
@@ -223,36 +226,38 @@ TEST_F(ArithmeticOptimizerTest, ReplaceMulWithSquare) {
   item.fetch = {"id", "id2"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  ASSERT_EQ(2, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 2);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyReplaceMulWithSquare(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(6, output.node_size());
+  EXPECT_EQ(output.node_size(), 6);
 
   NodeMap node_map(&output);
   const string p = "ArithmeticOptimizer/ReplaceMulWithSquare";
-  const NodeDef* square_node = node_map.GetNode(strings::StrCat(p, "_", "mul"));
+  const NodeDef* square_node = node_map.GetNode(absl::StrCat(p, "_", "mul"));
 
   ASSERT_NE(square_node, nullptr);
-  EXPECT_EQ("Square", square_node->op());
-  EXPECT_EQ("c", square_node->input(0));
-  EXPECT_EQ("^d", square_node->input(1));
+  EXPECT_EQ(square_node->op(), "Square");
+  ASSERT_EQ(square_node->input_size(), 2);
+  EXPECT_EQ(square_node->input(0), "c");
+  EXPECT_EQ(square_node->input(1), "^d");
 
   const NodeDef* square_node2 =
-      node_map.GetNode(strings::StrCat(p, "_", "mul_no_nan"));
+      node_map.GetNode(absl::StrCat(p, "_", "mul_no_nan"));
   ASSERT_NE(square_node2, nullptr);
-  EXPECT_EQ("Square", square_node2->op());
-  EXPECT_EQ("d", square_node2->input(0));
+  EXPECT_EQ(square_node2->op(), "Square");
+  ASSERT_EQ(square_node2->input_size(), 1);
+  EXPECT_EQ(square_node2->input(0), "d");
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  ASSERT_EQ(2, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 2);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AdjacentNodes) {
+TEST_F(ArithmeticOptimizerTest, RemoveInvolutionAdjacentNodes) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
@@ -266,7 +271,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AdjacentNodes) {
   item.fetch = {"id"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -274,16 +279,17 @@ TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AdjacentNodes) {
   OptimizeAndPrune(&optimizer, &item, &output);
 
   // Negation and Reciprocal nodes cancelled each other.
-  EXPECT_EQ(2, output.node_size());
-  EXPECT_EQ("id", output.node(1).name());
-  EXPECT_EQ("c", output.node(1).input(0));
+  ASSERT_EQ(output.node_size(), 2);
+  EXPECT_EQ(output.node(1).name(), "id");
+  ASSERT_EQ(output.node(1).input_size(), 1);
+  EXPECT_EQ(output.node(1).input(0), "c");
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AroundValuePreservingChain) {
+TEST_F(ArithmeticOptimizerTest, RemoveInvolutionAroundValuePreservingChain) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
@@ -299,7 +305,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AroundValuePreservingChain) {
   item.fetch = fetch;
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -307,27 +313,29 @@ TEST_F(ArithmeticOptimizerTest, RemoveInvolution_AroundValuePreservingChain) {
   OptimizeTwiceAndPrune(&optimizer, &item, &output);
 
   // Check that Reciprocal nodes were removed from the graph.
-  EXPECT_EQ(3, output.node_size());
+  EXPECT_EQ(output.node_size(), 3);
 
   // And const directly flows into squeeze.
   int found = 0;
   for (const NodeDef& node : output.node()) {
     if (node.name() == "squeeze") {
-      EXPECT_EQ("c", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "c");
       found++;
     } else if (node.name() == "id2") {
-      EXPECT_EQ("squeeze", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "squeeze");
       found++;
     }
   }
-  EXPECT_EQ(2, found);
+  EXPECT_EQ(found, 2);
 
   auto tensors = EvaluateNodes(output, fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, RemoveInvolution_SkipControlDependencies) {
+TEST_F(ArithmeticOptimizerTest, RemoveInvolutionSkipControlDependencies) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   auto c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
@@ -345,7 +353,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveInvolution_SkipControlDependencies) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -356,8 +364,8 @@ TEST_F(ArithmeticOptimizerTest, RemoveInvolution_SkipControlDependencies) {
   VerifyGraphsMatch(item.graph, output, __LINE__);
 
   auto tensors = EvaluateNodes(output, fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) {
@@ -371,36 +379,39 @@ TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
 
-  EXPECT_EQ(5, output.node_size());
+  EXPECT_EQ(output.node_size(), 5);
 
   const string optimized_const_name = AggregationConstName("add");
   const string optimized_mul_name = AggregationMulName("add");
 
   const NodeDef* new_const = node_map.GetNode(optimized_const_name);
   ASSERT_NE(new_const, nullptr);
-  EXPECT_EQ("^x", new_const->input(0));
-  EXPECT_EQ(string("\0\0\0@", 4),
-            new_const->attr().at("value").tensor().tensor_content());
+  ASSERT_EQ(new_const->input_size(), 1);
+  EXPECT_EQ(new_const->input(0), "^x");
+  EXPECT_EQ(new_const->attr().at("value").tensor().tensor_content(),
+            string("\0\0\0@", 4));
 
   const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
   ASSERT_NE(new_mul, nullptr);
-  EXPECT_EQ(optimized_const_name, new_mul->input(0));
-  EXPECT_EQ("x", new_mul->input(1));
+  ASSERT_EQ(new_mul->input_size(), 2);
+  EXPECT_EQ(new_mul->input(0), optimized_const_name);
+  EXPECT_EQ(new_mul->input(1), "x");
 
   const NodeDef* new_id = node_map.GetNode("id");
   ASSERT_NE(new_id, nullptr);
-  EXPECT_EQ(optimized_mul_name, new_id->input(0));
+  ASSERT_EQ(new_id->input_size(), 1);
+  EXPECT_EQ(new_id->input(0), optimized_mul_name);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, TrivialSumsSimpleWithControlDep) {
@@ -415,37 +426,40 @@ TEST_F(ArithmeticOptimizerTest, TrivialSumsSimpleWithControlDep) {
 
   std::vector<string> fetch = {"id"};
   auto tensors_expected = EvaluateNodes(item.graph, fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
 
-  EXPECT_EQ(6, output.node_size());
+  EXPECT_EQ(output.node_size(), 6);
 
   const string optimized_const_name = AggregationConstName("add");
   const string optimized_mul_name = AggregationMulName("add");
 
   const NodeDef* new_const = node_map.GetNode(optimized_const_name);
   ASSERT_NE(new_const, nullptr);
-  EXPECT_EQ("^x", new_const->input(0));
-  EXPECT_EQ(string("\0\0\0@", 4),
-            new_const->attr().at("value").tensor().tensor_content());
+  ASSERT_EQ(new_const->input_size(), 1);
+  EXPECT_EQ(new_const->input(0), "^x");
+  EXPECT_EQ(new_const->attr().at("value").tensor().tensor_content(),
+            string("\0\0\0@", 4));
 
   const NodeDef* new_mul = node_map.GetNode(optimized_mul_name);
   ASSERT_NE(new_mul, nullptr);
-  EXPECT_EQ(optimized_const_name, new_mul->input(0));
-  EXPECT_EQ("x", new_mul->input(1));
-  EXPECT_EQ("^y", new_mul->input(2));
+  ASSERT_EQ(new_mul->input_size(), 3);
+  EXPECT_EQ(new_mul->input(0), optimized_const_name);
+  EXPECT_EQ(new_mul->input(1), "x");
+  EXPECT_EQ(new_mul->input(2), "^y");
 
   const NodeDef* new_id = node_map.GetNode("id");
   ASSERT_NE(new_id, nullptr);
-  EXPECT_EQ(optimized_mul_name, new_id->input(0));
+  ASSERT_EQ(new_id->input_size(), 1);
+  EXPECT_EQ(new_id->input(0), optimized_mul_name);
 
   auto tensors = EvaluateNodes(output, fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) {
@@ -483,51 +497,51 @@ TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) {
   //           Add_5(Const(2), Const(2))))
   NodeMap node_map(&output);
 
-  EXPECT_EQ(17, output.node_size());
+  EXPECT_EQ(output.node_size(), 17);
 
   const NodeDef* id_node = node_map.GetNode("id");
   ASSERT_NE(id_node, nullptr);
-  EXPECT_EQ(1, id_node->input_size());
-  EXPECT_EQ(HoistMulName("Add_6"), id_node->input(0));
+  ASSERT_EQ(id_node->input_size(), 1);
+  EXPECT_EQ(id_node->input(0), HoistMulName("Add_6"));
 
   const NodeDef* mul_node = node_map.GetNode(HoistMulName("Add_6"));
   ASSERT_NE(mul_node, nullptr);
-  EXPECT_EQ(2, mul_node->input_size());
-  EXPECT_EQ("Placeholder", mul_node->input(0));
-  EXPECT_EQ(HoistAddName("Add_6"), mul_node->input(1));
+  ASSERT_EQ(mul_node->input_size(), 2);
+  EXPECT_EQ(mul_node->input(0), "Placeholder");
+  EXPECT_EQ(mul_node->input(1), HoistAddName("Add_6"));
 
   const NodeDef* add_6_node = node_map.GetNode(HoistAddName("Add_6"));
   ASSERT_NE(add_6_node, nullptr);
-  EXPECT_EQ(2, add_6_node->input_size());
-  EXPECT_EQ(HoistAddName("Add_4"), add_6_node->input(0));
-  EXPECT_EQ(HoistAddName("Add_5"), add_6_node->input(1));
+  ASSERT_EQ(add_6_node->input_size(), 2);
+  EXPECT_EQ(add_6_node->input(0), HoistAddName("Add_4"));
+  EXPECT_EQ(add_6_node->input(1), HoistAddName("Add_5"));
 
   const NodeDef* add_4_node = node_map.GetNode(HoistAddName("Add_4"));
   ASSERT_NE(add_4_node, nullptr);
-  EXPECT_EQ("Add", add_4_node->op());
-  EXPECT_EQ(2, add_4_node->input_size());
-  EXPECT_EQ(AggregationConstName("Add"), add_4_node->input(0));
-  EXPECT_EQ(AggregationConstName("Add_1"), add_4_node->input(1));
+  EXPECT_EQ(add_4_node->op(), "Add");
+  ASSERT_EQ(2, add_4_node->input_size());
+  EXPECT_EQ(add_4_node->input(0), AggregationConstName("Add"));
+  EXPECT_EQ(add_4_node->input(1), AggregationConstName("Add_1"));
 
   const NodeDef* add_5_node = node_map.GetNode(HoistAddName("Add_5"));
   ASSERT_NE(add_5_node, nullptr);
-  EXPECT_EQ("Add", add_5_node->op());
-  EXPECT_EQ(2, add_5_node->input_size());
-  EXPECT_EQ(AggregationConstName("Add"), add_5_node->input(0));
-  EXPECT_EQ(AggregationConstName("Add_1"), add_5_node->input(1));
+  EXPECT_EQ(add_5_node->op(), "Add");
+  ASSERT_EQ(add_5_node->input_size(), 2);
+  EXPECT_EQ(add_5_node->input(0), AggregationConstName("Add"));
+  EXPECT_EQ(add_5_node->input(1), AggregationConstName("Add_1"));
 
   const NodeDef* add_const_node = node_map.GetNode(AggregationConstName("Add"));
   ASSERT_NE(add_const_node, nullptr);
-  EXPECT_EQ("Const", add_const_node->op());
-  EXPECT_EQ(1, add_const_node->input_size());
-  EXPECT_EQ("^Placeholder", add_const_node->input(0));
+  EXPECT_EQ(add_const_node->op(), "Const");
+  ASSERT_EQ(add_const_node->input_size(), 1);
+  EXPECT_EQ(add_const_node->input(0), "^Placeholder");
 
   const NodeDef* add_1_const_node =
       node_map.GetNode(AggregationConstName("Add_1"));
   ASSERT_NE(add_1_const_node, nullptr);
-  EXPECT_EQ("Const", add_1_const_node->op());
-  EXPECT_EQ(1, add_1_const_node->input_size());
-  EXPECT_EQ("^Placeholder", add_1_const_node->input(0));
+  EXPECT_EQ(add_1_const_node->op(), "Const");
+  ASSERT_EQ(add_1_const_node->input_size(), 1);
+  EXPECT_EQ(add_1_const_node->input(0), "^Placeholder");
 }
 
 TEST_F(ArithmeticOptimizerTest, HoistFactorMul) {
@@ -551,7 +565,7 @@ TEST_F(ArithmeticOptimizerTest, HoistFactorMul) {
       item.fetch = {"id"};
       TF_CHECK_OK(s.ToGraphDef(&item.graph));
       auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-      EXPECT_EQ(1, tensors_expected.size());
+      ASSERT_EQ(tensors_expected.size(), 1);
       ArithmeticOptimizer optimizer;
       EnableOnlyHoistCommonFactor(&optimizer);
 
@@ -573,26 +587,29 @@ TEST_F(ArithmeticOptimizerTest, HoistFactorMul) {
       if (use_addn && !matching_shapes) {
         VerifyGraphsMatch(item.graph, output, __LINE__);
       } else {
-        EXPECT_EQ(9, output.node_size());
+        EXPECT_EQ(output.node_size(), 9);
 
         const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add"));
         ASSERT_NE(new_add_node, nullptr) << "Hoisted Add node not found";
-        EXPECT_EQ("y1", new_add_node->input(0));
-        EXPECT_EQ("y2", new_add_node->input(1));
+        ASSERT_EQ(new_add_node->input_size(), 2);
+        EXPECT_EQ(new_add_node->input(0), "y1");
+        EXPECT_EQ(new_add_node->input(1), "y2");
 
         const NodeDef* new_mul_node = node_map.GetNode(HoistMulName("add"));
         ASSERT_NE(new_mul_node, nullptr) << "Hoisted Mul node not found";
-        EXPECT_EQ("x", new_mul_node->input(0));
-        EXPECT_EQ(new_add_node->name(), new_mul_node->input(1));
+        ASSERT_EQ(new_mul_node->input_size(), 2);
+        EXPECT_EQ(new_mul_node->input(0), "x");
+        EXPECT_EQ(new_mul_node->input(1), new_add_node->name());
 
         const NodeDef* id_node = node_map.GetNode("id");
         ASSERT_NE(id_node, nullptr) << "Id node not found";
-        EXPECT_EQ("id", id_node->name());
-        EXPECT_EQ(HoistMulName("add"), id_node->input(0));
+        EXPECT_EQ(id_node->name(), "id");
+        ASSERT_EQ(id_node->input_size(), 1);
+        EXPECT_EQ(id_node->input(0), HoistMulName("add"));
       }
       auto tensors = EvaluateNodes(output, item.fetch);
-      EXPECT_EQ(1, tensors.size());
-      test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+      ASSERT_EQ(tensors.size(), 1);
+      test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
     }
   }
 }
@@ -630,7 +647,7 @@ TEST_F(ArithmeticOptimizerTest, HoistFactorDiv) {
         TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
         auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-        EXPECT_EQ(1, tensors_expected.size());
+        ASSERT_EQ(tensors_expected.size(), 1);
 
         ArithmeticOptimizer optimizer;
         EnableOnlyHoistCommonFactor(&optimizer);
@@ -653,29 +670,32 @@ TEST_F(ArithmeticOptimizerTest, HoistFactorDiv) {
         if ((use_addn && !matching_shapes) || use_ints) {
           VerifyGraphsMatch(item.graph, output, __LINE__);
         } else {
-          EXPECT_EQ(9, output.node_size());
+          EXPECT_EQ(output.node_size(), 9);
 
           const NodeDef* new_add_node = node_map.GetNode(HoistAddName("add"));
           ASSERT_TRUE(new_add_node != nullptr) << "Hoisted Add node not found";
-          EXPECT_EQ("y1", new_add_node->input(0));
-          EXPECT_EQ("y2", new_add_node->input(1));
+          ASSERT_EQ(new_add_node->input_size(), 2);
+          EXPECT_EQ(new_add_node->input(0), "y1");
+          EXPECT_EQ(new_add_node->input(1), "y2");
 
           const NodeDef* new_div_node = node_map.GetNode(HoistDivName("add"));
           ASSERT_TRUE(new_div_node != nullptr) << "Hoisted Div node not found";
-          EXPECT_EQ(new_add_node->name(), new_div_node->input(0));
-          EXPECT_EQ("x", new_div_node->input(1));
+          ASSERT_EQ(new_div_node->input_size(), 2);
+          EXPECT_EQ(new_div_node->input(0), new_add_node->name());
+          EXPECT_EQ(new_div_node->input(1), "x");
 
           const NodeDef* id_node = node_map.GetNode("id");
           ASSERT_TRUE(id_node != nullptr) << "Id node not found";
           EXPECT_EQ("id", id_node->name());
-          EXPECT_EQ(HoistDivName("add"), id_node->input(0));
+          ASSERT_EQ(id_node->input_size(), 1);
+          EXPECT_EQ(id_node->input(0), HoistDivName("add"));
         }
         auto tensors = EvaluateNodes(output, item.fetch);
-        EXPECT_EQ(1, tensors.size());
+        ASSERT_EQ(tensors.size(), 1);
         if (use_ints) {
-          test::ExpectTensorEqual<int32>(tensors_expected[0], tensors[0]);
+          test::ExpectTensorEqual<int32>(tensors[0], tensors_expected[0]);
         } else {
-          test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+          test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
         }
       }
     }
@@ -696,27 +716,28 @@ TEST_F(ArithmeticOptimizerTest, FuseConjAndTranspose) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
 
-  EXPECT_EQ(7, output.node_size());
+  EXPECT_EQ(output.node_size(), 7);
 
   const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
-  const string optimized_name = strings::StrCat(p, "_", "trans");
+  const string optimized_name = absl::StrCat(p, "_", "trans");
 
   const NodeDef* trans_fused_node = node_map.GetNode(optimized_name);
   ASSERT_NE(trans_fused_node, nullptr);
-  EXPECT_EQ("ConjugateTranspose", trans_fused_node->op());
-  EXPECT_EQ("z", trans_fused_node->input(0));
-  EXPECT_EQ("perm", trans_fused_node->input(1));
+  EXPECT_EQ(trans_fused_node->op(), "ConjugateTranspose");
+  ASSERT_EQ(trans_fused_node->input_size(), 2);
+  EXPECT_EQ(trans_fused_node->input(0), "z");
+  EXPECT_EQ(trans_fused_node->input(1), "perm");
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<complex64>(tensors[0], tensors_expected[0]);
 }
 
 TEST_F(ArithmeticOptimizerTest, FuseConjAndConjugateTranspose) {
@@ -735,27 +756,28 @@ TEST_F(ArithmeticOptimizerTest, FuseConjAndConjugateTranspose) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
 
-  EXPECT_EQ(7, output.node_size());
+  EXPECT_EQ(output.node_size(), 7);
 
   const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
-  const string optimized_name = strings::StrCat(p, "_", "conjugate_trans");
+  const string optimized_name = absl::StrCat(p, "_", "conjugate_trans");
 
   const NodeDef* conjugate_trans_fused_node = node_map.GetNode(optimized_name);
   ASSERT_NE(conjugate_trans_fused_node, nullptr);
-  EXPECT_EQ("Transpose", conjugate_trans_fused_node->op());
-  EXPECT_EQ("z", conjugate_trans_fused_node->input(0));
-  EXPECT_EQ("perm", conjugate_trans_fused_node->input(1));
+  EXPECT_EQ(conjugate_trans_fused_node->op(), "Transpose");
+  ASSERT_EQ(conjugate_trans_fused_node->input_size(), 2);
+  EXPECT_EQ(conjugate_trans_fused_node->input(0), "z");
+  EXPECT_EQ(conjugate_trans_fused_node->input(1), "perm");
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<complex64>(tensors[0], tensors_expected[0]);
 }
 
 TEST_F(ArithmeticOptimizerTest, FuseTransposeAndConj) {
@@ -772,27 +794,28 @@ TEST_F(ArithmeticOptimizerTest, FuseTransposeAndConj) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
   NodeMap node_map(&output);
 
-  EXPECT_EQ(7, output.node_size());
+  EXPECT_EQ(output.node_size(), 7);
 
   const string p = "ArithmeticOptimizer/FoldConjugateIntoTranspose";
-  const string optimized_name = strings::StrCat(p, "_", "conj");
+  const string optimized_name = absl::StrCat(p, "_", "conj");
 
   const NodeDef* conj_fused_node = node_map.GetNode(optimized_name);
   ASSERT_NE(conj_fused_node, nullptr);
-  EXPECT_EQ("ConjugateTranspose", conj_fused_node->op());
-  EXPECT_EQ("z", conj_fused_node->input(0));
-  EXPECT_EQ("perm", conj_fused_node->input(1));
+  EXPECT_EQ(conj_fused_node->op(), "ConjugateTranspose");
+  ASSERT_EQ(conj_fused_node->input_size(), 2);
+  EXPECT_EQ(conj_fused_node->input(0), "z");
+  EXPECT_EQ(conj_fused_node->input(1), "perm");
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<complex64>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<complex64>(tensors[0], tensors_expected[0]);
 }
 
 TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) {
@@ -822,7 +845,7 @@ TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) {
     TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
     auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-    EXPECT_EQ(1, tensors_expected.size());
+    ASSERT_EQ(tensors_expected.size(), 1);
 
     ArithmeticOptimizer optimizer;
     EnableOnlyFoldTransposeIntoMatMul(&optimizer);
@@ -830,15 +853,16 @@ TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) {
     OptimizeTwice(&optimizer, &item, &output);
     NodeMap node_map(&output);
 
-    EXPECT_EQ(8, output.node_size());
+    EXPECT_EQ(output.node_size(), 8);
 
     const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul";
-    const string optimized_name = strings::StrCat(p, "_", "matmul");
+    const string optimized_name = absl::StrCat(p, "_", "matmul");
 
     const NodeDef* matmul_fused_node = node_map.GetNode(optimized_name);
     ASSERT_NE(matmul_fused_node, nullptr);
-    EXPECT_EQ("a", matmul_fused_node->input(0));
-    EXPECT_EQ("b", matmul_fused_node->input(1));
+    ASSERT_EQ(matmul_fused_node->input_size(), 2);
+    EXPECT_EQ(matmul_fused_node->input(0), "a");
+    EXPECT_EQ(matmul_fused_node->input(1), "b");
 
     if (matmul_type == "BatchMatMul") {
       EXPECT_TRUE(matmul_fused_node->attr().at("adj_x").b());
@@ -854,8 +878,8 @@ TEST_F(ArithmeticOptimizerTest, FoldTransposeIntoMatMul) {
     EXPECT_EQ(identity_node->input(0), optimized_name);
 
     auto tensors = EvaluateNodes(output, item.fetch);
-    EXPECT_EQ(1, tensors.size());
-    test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+    ASSERT_EQ(tensors.size(), 1);
+    test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
   }
 }
 
@@ -882,31 +906,32 @@ TEST_F(ArithmeticOptimizerTest, FoldConjugateTransposeIntoBatchMatMul) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   ArithmeticOptimizer optimizer;
   GraphDef output;
   OptimizeTwice(&optimizer, &item, &output);
 
   NodeMap node_map(&output);
-  ASSERT_EQ(11, output.node_size());
+  EXPECT_EQ(output.node_size(), 11);
 
   const string p = "ArithmeticOptimizer/FoldTransposeIntoMatMul";
-  const string optimized_name = strings::StrCat(p, "_", "matmul");
+  const string optimized_name = absl::StrCat(p, "_", "matmul");
 
   const NodeDef* optimized_matmul = node_map.GetNode(optimized_name);
   ASSERT_NE(optimized_matmul, nullptr);
-  EXPECT_EQ("a", optimized_matmul->input(0));
-  EXPECT_EQ("b", optimized_matmul->input(1));
+  ASSERT_EQ(optimized_matmul->input_size(), 2);
+  EXPECT_EQ(optimized_matmul->input(0), "a");
+  EXPECT_EQ(optimized_matmul->input(1), "b");
   EXPECT_TRUE(optimized_matmul->attr().at("adj_x").b());
   EXPECT_TRUE(optimized_matmul->attr().at("adj_y").b());
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<complex64>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<complex64>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_IdentityReshape) {
+TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeIdentityReshape) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
   Output inputs =
       ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, 28, 28}));
@@ -927,21 +952,21 @@ TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_IdentityReshape) {
   auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({3, 3, 28, 28}));
   auto tensors_expected =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", x_t}});
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyRemoveRedundantReshape(&optimizer);
   OptimizeTwiceAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(0, CountOpNodes(output, "Reshape"));
+  EXPECT_EQ(CountOpNodes(output, "Reshape"), 0);
   auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", x_t}});
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest,
-       RemoveRedundantReshape_IdentityReshapeBetweenSymbolicShapes) {
+       RemoveRedundantReshapeIdentityReshapeBetweenSymbolicShapes) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
   Output inputs =
       ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({-1, 3, -1, -1}));
@@ -968,7 +993,7 @@ TEST_F(ArithmeticOptimizerTest,
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   // Assume valid feed shape in aggressive mode.
@@ -976,13 +1001,13 @@ TEST_F(ArithmeticOptimizerTest,
   EnableOnlyRemoveRedundantReshape(&optimizer);
   OptimizeTwiceAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(0, CountOpNodes(output, "Reshape"));
+  EXPECT_EQ(CountOpNodes(output, "Reshape"), 0);
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotAssumeValidFeeds) {
+TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeNotAssumeValidFeeds) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
   Output inputs =
       ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28}));
@@ -997,7 +1022,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotAssumeValidFeeds) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1006,15 +1031,15 @@ TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotAssumeValidFeeds) {
 
   // The reshape is preserved because the shape of the placeholder can be
   // different from the shape of the actual feed.
-  EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
+  EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
 
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest,
-       RemoveRedundantReshape_AssumeValidFeedsInAggressiveMode) {
+       RemoveRedundantReshapeAssumeValidFeedsInAggressiveMode) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
   Output inputs =
       ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3, 28, 28}));
@@ -1029,20 +1054,20 @@ TEST_F(ArithmeticOptimizerTest,
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer(RewriterConfig::AGGRESSIVE);
   EnableOnlyRemoveRedundantReshape(&optimizer);
   OptimizeTwiceAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(0, CountOpNodes(output, "Reshape"));
+  EXPECT_EQ(CountOpNodes(output, "Reshape"), 0);
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotIdentityReshape) {
+TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeNotIdentityReshape) {
   // Reshape from [-1,3,28,28] to [8,-1,28,28] is not identity, because it can
   // be from [4,3,28,28] to [8,6,28,28].
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
@@ -1057,21 +1082,21 @@ TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_NotIdentityReshape) {
   auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 3, 28, 28}));
   item.feed = {{"Placeholder", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyRemoveRedundantReshape(&optimizer);
   OptimizeTwiceAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
+  EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest,
-       RemoveRedundantReshape_NotIdentityReshapeTooManyUnknownDimSizes) {
+       RemoveRedundantReshapeNotIdentityReshapeTooManyUnknownDimSizes) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
   Output inputs =
       ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({4, 3}));
@@ -1087,10 +1112,10 @@ TEST_F(ArithmeticOptimizerTest,
   EnableOnlyRemoveRedundantReshape(&optimizer);
   OptimizeTwiceAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
+  EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
 }
 
-TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_CombineReshapes) {
+TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshapeCombineReshapes) {
   // Converts an NCHW_VECT_C tensor to NHWC and then flattens it to 2D. The two
   // reshapes should be combined.
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
@@ -1114,20 +1139,20 @@ TEST_F(ArithmeticOptimizerTest, RemoveRedundantReshape_CombineReshapes) {
   auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({8, 3, 28, 28, 4}));
   item.feed = {{"nchw_vect_c", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyRemoveRedundantReshape(&optimizer);
   OptimizeTwiceAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(1, CountOpNodes(output, "Reshape"));
+  EXPECT_EQ(CountOpNodes(output, "Reshape"), 1);
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<int8>(tensors[0], tensors_expected[0]);
 }
 
-TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_ProducerIsCast) {
+TEST_F(ArithmeticOptimizerTest, ReorderTransposeCastProducerIsCast) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
   Output nhwc_uint8 =
       ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
@@ -1143,7 +1168,7 @@ TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_ProducerIsCast) {
   auto input_t = GenerateRandomTensor<DT_UINT8>(TensorShape({8, 28, 28, 3}));
   auto tensors_expected =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1153,25 +1178,26 @@ TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_ProducerIsCast) {
   for (const NodeDef& node : output.node()) {
     if (node.op() == "Transpose") {
       EXPECT_EQ(transpose_node, nullptr);
-      EXPECT_EQ(DT_UINT8, node.attr().at("T").type());
+      EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
       transpose_node = &node;
     }
   }
-  EXPECT_NE(transpose_node, nullptr);
+  ASSERT_NE(transpose_node, nullptr);
 
   for (const NodeDef& node : output.node()) {
     if (node.op() == "Cast") {
-      EXPECT_EQ(NodeName(node.input(0)), transpose_node->name());
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(transpose_node->name(), NodeName(node.input(0)));
     }
   }
 
   auto tensors =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
 }
 
-TEST_F(ArithmeticOptimizerTest, ReorderS2DCast_ProducerIsCast) {
+TEST_F(ArithmeticOptimizerTest, ReorderS2DCastProducerIsCast) {
   // TODO(jingyue): Evaluate S2D+Cast on GPU as well. We can't simply put nodes
   // under a /GPU:0 scope, because this test would fail if the testing machine
   // doesn't have a GPU. Maybe EvaluateNodes should allow soft placement?
@@ -1189,7 +1215,7 @@ TEST_F(ArithmeticOptimizerTest, ReorderS2DCast_ProducerIsCast) {
   auto input_t = GenerateRandomTensor<DT_UINT8>(TensorShape({8, 28, 28, 3}));
   auto tensors_expected =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1199,25 +1225,26 @@ TEST_F(ArithmeticOptimizerTest, ReorderS2DCast_ProducerIsCast) {
   for (const NodeDef& node : output.node()) {
     if (node.op() == "SpaceToDepth") {
       EXPECT_EQ(s2d_node, nullptr);
-      EXPECT_EQ(DT_UINT8, node.attr().at("T").type());
+      EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
       s2d_node = &node;
     }
   }
-  EXPECT_NE(s2d_node, nullptr);
+  ASSERT_NE(s2d_node, nullptr);
 
   for (const NodeDef& node : output.node()) {
     if (node.op() == "Cast") {
-      EXPECT_EQ(NodeName(node.input(0)), s2d_node->name());
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(s2d_node->name(), NodeName(node.input(0)));
     }
   }
 
   auto tensors =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
 }
 
-TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_ProducerIsTranspose) {
+TEST_F(ArithmeticOptimizerTest, ReorderTransposeCastProducerIsTranspose) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
   Output nhwc_fp32 =
       ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3}));
@@ -1234,7 +1261,7 @@ TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_ProducerIsTranspose) {
       GenerateConstantTensor<DT_FLOAT>(TensorShape({8, 28, 28, 3}), 42.0f);
   auto tensors_expected =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1245,22 +1272,24 @@ TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_ProducerIsTranspose) {
     if (node.op() == "Cast") {
       EXPECT_EQ(cast_node, nullptr);
       cast_node = &node;
+      ASSERT_EQ(node.input_size(), 1);
       EXPECT_EQ(NodeName(node.input(0)), "Placeholder");
     }
   }
-  EXPECT_NE(cast_node, nullptr);
+  ASSERT_NE(cast_node, nullptr);
 
   for (const NodeDef& node : output.node()) {
     if (node.op() == "Transpose") {
-      EXPECT_EQ(DT_UINT8, node.attr().at("T").type());
-      EXPECT_EQ(NodeName(node.input(0)), cast_node->name());
+      EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(cast_node->name(), NodeName(node.input(0)));
     }
   }
 
   auto tensors =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<uint8>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<uint8>(tensors[0], tensors_expected[0]);
 }
 
 TEST_F(ArithmeticOptimizerTest, ReorderTransposeReverseCast) {
@@ -1282,7 +1311,7 @@ TEST_F(ArithmeticOptimizerTest, ReorderTransposeReverseCast) {
   auto input_t = GenerateRandomTensor<DT_UINT8>(TensorShape({8, 28, 28, 3}));
   auto tensors_expected =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1294,30 +1323,33 @@ TEST_F(ArithmeticOptimizerTest, ReorderTransposeReverseCast) {
   for (const NodeDef& node : output.node()) {
     if (node.op() == "Transpose") {
       EXPECT_EQ(transpose_node, nullptr);
-      EXPECT_EQ(DT_UINT8, node.attr().at("T").type());
+      EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
       transpose_node = &node;
     } else if (node.op() == "ReverseV2") {
       EXPECT_EQ(reverse_node, nullptr);
-      EXPECT_EQ(DT_UINT8, node.attr().at("T").type());
+      EXPECT_EQ(node.attr().at("T").type(), DT_UINT8);
       reverse_node = &node;
     } else if (node.op() == "Cast") {
       cast_node = &node;
     }
   }
-  EXPECT_NE(cast_node, nullptr);
-  EXPECT_NE(reverse_node, nullptr);
-  EXPECT_NE(transpose_node, nullptr);
+  ASSERT_NE(cast_node, nullptr);
+  ASSERT_NE(reverse_node, nullptr);
+  ASSERT_NE(transpose_node, nullptr);
+  ASSERT_EQ(reverse_node->input_size(), 2);
   EXPECT_EQ(NodeName(reverse_node->input(0)), "Placeholder");
+  ASSERT_EQ(transpose_node->input_size(), 2);
   EXPECT_EQ(NodeName(transpose_node->input(0)), reverse_node->name());
+  ASSERT_EQ(cast_node->input_size(), 1);
   EXPECT_EQ(NodeName(cast_node->input(0)), transpose_node->name());
 
   auto tensors =
       EvaluateNodes(item.graph, item.fetch, {{"Placeholder", input_t}});
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
 }
 
-TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_CheckNumericsToIdentity) {
+TEST_F(ArithmeticOptimizerTest, ReorderTransposeCastCheckNumericsToIdentity) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
   Output nhwc_uint8 =
       ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
@@ -1334,7 +1366,7 @@ TEST_F(ArithmeticOptimizerTest, ReorderTransposeCast_CheckNumericsToIdentity) {
   CompareGraphs(item.graph, output);
 }
 
-TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCast_ProducerIsCast) {
+TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCastProducerIsCast) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
   Output nhwc_fp32 =
       ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({8, 28, 28, 3}));
@@ -1352,7 +1384,7 @@ TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCast_ProducerIsCast) {
   CompareGraphs(item.graph, output);
 }
 
-TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCast_ProducerIsTranspose) {
+TEST_F(ArithmeticOptimizerTest, NoReorderTransposeCastProducerIsTranspose) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/CPU:0");
   Output nhwc_uint8 =
       ops::Placeholder(s, DT_UINT8, ops::Placeholder::Shape({8, 28, 28, 3}));
@@ -1425,7 +1457,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesMultipleOutputs) {
   auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({8, 12, 28, 28}));
   item.feed = {{"inputs", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1434,6 +1466,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesMultipleOutputs) {
 
   for (const NodeDef& node : output.node()) {
     if (node.op() == "Concat") {
+      ASSERT_EQ(node.input_size(), 3);
       EXPECT_EQ(node.input(0), "Split");
       EXPECT_EQ(node.input(1), "Split:1");
       EXPECT_EQ(node.input(2), "Split:2");
@@ -1441,8 +1474,8 @@ TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesMultipleOutputs) {
   }
 
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, RemoveTransposesWithControlDependency) {
@@ -1462,7 +1495,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveTransposesWithControlDependency) {
   auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 3}));
   item.feed = {{"Placeholder", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1471,13 +1504,13 @@ TEST_F(ArithmeticOptimizerTest, RemoveTransposesWithControlDependency) {
 
   NodeMap node_map(&output);
   const NodeDef* outputs_node = node_map.GetNode("outputs");
-  EXPECT_EQ(2, outputs_node->input_size());
+  ASSERT_EQ(outputs_node->input_size(), 2);
   EXPECT_EQ(outputs_node->input(0), "outputs_const");
   EXPECT_EQ(outputs_node->input(1), "^Placeholder");
 
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, NotRemoveTransposes) {
@@ -1501,7 +1534,7 @@ TEST_F(ArithmeticOptimizerTest, NotRemoveTransposes) {
   EnableOnlyRemoveIdentityTranspose(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(6, output.node_size());
+  EXPECT_EQ(output.node_size(), 6);
 }
 
 TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesThroughChain) {
@@ -1532,12 +1565,12 @@ TEST_F(ArithmeticOptimizerTest, RemoveIdentityTransposesThroughChain) {
   for (const NodeDef& node : output.node()) {
     nodes_after_optimization.insert(node.name());
     if (node.name() == "id") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("inputs", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "inputs");
     }
     if (node.name() == "id1") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("id", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "id");
     }
   }
   EXPECT_EQ(nodes_after_optimization,
@@ -1579,12 +1612,13 @@ TEST_F(ArithmeticOptimizerTest, FoldMulToTransposeConv) {
 
   const NodeDef* folded_conv_weights = node_map.GetNode(folded_conv->input(1));
   ASSERT_NE(folded_conv_weights, nullptr);
-  EXPECT_EQ("Mul", folded_conv_weights->op());
+  EXPECT_EQ(folded_conv_weights->op(), "Mul");
 
   // Its input should be a transpose of `inputs`.
   const NodeDef* transpose = node_map.GetNode(NodeName(folded_conv->input(0)));
   ASSERT_NE(transpose, nullptr);
-  EXPECT_EQ("inputs", transpose->input(0));
+  ASSERT_EQ(transpose->input_size(), 2);
+  EXPECT_EQ(transpose->input(0), "inputs");
 }
 
 TEST_F(ArithmeticOptimizerTest, NotFoldMulAcrossPreservedTranspose) {
@@ -1623,6 +1657,8 @@ TEST_F(ArithmeticOptimizerTest, NotFoldMulAcrossPreservedTranspose) {
   NodeMap node_map(&output);
   const NodeDef* inputs_nchw_node_def =
       node_map.GetNode(inputs_nchw.node()->name());
+  ASSERT_NE(inputs_nchw_node_def, nullptr);
+  ASSERT_EQ(inputs_nchw_node_def->input_size(), 2);
   EXPECT_EQ(NodeName(inputs_nchw_node_def->input(0)),
             scaled_inputs.node()->name());
 }
@@ -1653,8 +1689,13 @@ TEST_F(ArithmeticOptimizerTest, FoldMulToConv) {
   NodeMap node_map(&output);
   // `conv` is now a folded convolution on `inputs` and scaled weights.
   const NodeDef* folded_conv = node_map.GetNode(conv.node()->name());
-  CHECK_EQ(inputs.node()->name(), NodeName(folded_conv->input(0)));
-  CHECK_EQ(node_map.GetNode(NodeName(folded_conv->input(1)))->op(), "Mul");
+  ASSERT_NE(folded_conv, nullptr);
+  ASSERT_EQ(folded_conv->input_size(), 2);
+  CHECK_EQ(NodeName(folded_conv->input(0)), inputs.node()->name());
+  const NodeDef* folded_conv_input_1 =
+      node_map.GetNode(NodeName(folded_conv->input(1)));
+  ASSERT_NE(folded_conv_input_1, nullptr);
+  CHECK_EQ(folded_conv_input_1->op(), "Mul");
 }
 
 TEST_F(ArithmeticOptimizerTest, OptimizeCastMulTransposeConv) {
@@ -1691,8 +1732,8 @@ TEST_F(ArithmeticOptimizerTest, OptimizeCastMulTransposeConv) {
 
   // Expected names for reordered cast and transpose.
   const string p = "ArithmeticOptimizer/ReorderCastLikeAndValuePreserving_";
-  const string optimized_cast_name = strings::StrCat(p, "float_Cast");
-  const string optimized_transpose_name = strings::StrCat(p, "uint8_Transpose");
+  const string optimized_cast_name = absl::StrCat(p, "float_Cast");
+  const string optimized_transpose_name = absl::StrCat(p, "uint8_Transpose");
 
   // Expected names for folded multiply and conv.
   const string optimized_weights =
@@ -1712,8 +1753,11 @@ TEST_F(ArithmeticOptimizerTest, OptimizeCastMulTransposeConv) {
   ASSERT_NE(conv_node, nullptr);
 
   EXPECT_EQ(output.node_size(), 7);
+  ASSERT_EQ(transpose_node->input_size(), 2);
   EXPECT_EQ(transpose_node->input(0), inputs_node->name());
+  ASSERT_EQ(cast_node->input_size(), 1);
   EXPECT_EQ(cast_node->input(0), transpose_node->name());
+  ASSERT_EQ(conv_node->input_size(), 2);
   EXPECT_EQ(conv_node->input(0), cast_node->name());
   EXPECT_EQ(conv_node->input(1), weights_node->name());
 }
@@ -1747,7 +1791,7 @@ TEST_F(ArithmeticOptimizerTest, OptimizeMultipleMulTransposeConv) {
 
   NodeMap node_map(&output);
 
-  using strings::StrCat;
+  using absl::StrCat;
   const string p = "ArithmeticOptimizer/FoldMultiplyIntoConv_";
   const string optimized_weights = StrCat(p, "scaled_Conv2D_weights");
   const string optimized_weights_1 = StrCat(p, "scaled_Conv2D_1_weights_1");
@@ -1762,6 +1806,8 @@ TEST_F(ArithmeticOptimizerTest, OptimizeMultipleMulTransposeConv) {
   ASSERT_NE(conv_node, nullptr);
   ASSERT_NE(conv_node_1, nullptr);
 
+  ASSERT_EQ(conv_node->input_size(), 2);
+  ASSERT_EQ(conv_node_1->input_size(), 2);
   EXPECT_EQ(conv_node->input(1), weights_node->name());
   EXPECT_EQ(conv_node_1->input(1), weights_node_1->name());
 }
@@ -1781,7 +1827,7 @@ TEST_F(ArithmeticOptimizerTest, CombineBitcasts) {
   auto x_t = GenerateRandomTensor<DT_UINT8>(TensorShape({2, 3}));
   item.feed = {{"inputs", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1791,13 +1837,13 @@ TEST_F(ArithmeticOptimizerTest, CombineBitcasts) {
   NodeMap node_map(&output);
 
   // Bitcasts combined into a single op and inputs redirected to updated Bitcast
-  EXPECT_EQ(3, output.node_size());
-  EXPECT_EQ(1, CountOpNodes(output, "Bitcast"));
+  EXPECT_EQ(output.node_size(), 3);
+  EXPECT_EQ(CountOpNodes(output, "Bitcast"), 1);
   EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "bc2"));
 
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<int8>(tensors[0], tensors_expected[0]);
 }
 
 TEST_F(ArithmeticOptimizerTest, CombineAndRemoveBitcasts) {
@@ -1815,7 +1861,7 @@ TEST_F(ArithmeticOptimizerTest, CombineAndRemoveBitcasts) {
   auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3}));
   item.feed = {{"inputs", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1825,13 +1871,13 @@ TEST_F(ArithmeticOptimizerTest, CombineAndRemoveBitcasts) {
   NodeMap node_map(&output);
 
   // Bitcasts removed and inputs redirected to outputs
-  EXPECT_EQ(2, output.node_size());
-  EXPECT_EQ(0, CountOpNodes(output, "Bitcast"));
+  EXPECT_EQ(output.node_size(), 2);
+  EXPECT_EQ(CountOpNodes(output, "Bitcast"), 0);
   EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs"));
 
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<int8>(tensors[0], tensors_expected[0]);
 }
 
 TEST_F(ArithmeticOptimizerTest, RemoveRedundantCast) {
@@ -1848,7 +1894,7 @@ TEST_F(ArithmeticOptimizerTest, RemoveRedundantCast) {
   auto x_t = GenerateRandomTensor<DT_INT8>(TensorShape({2, 3}));
   item.feed = {{"inputs", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1858,16 +1904,16 @@ TEST_F(ArithmeticOptimizerTest, RemoveRedundantCast) {
   NodeMap node_map(&output);
 
   // Cast removed and inputs redirected to outputs
-  EXPECT_EQ(2, output.node_size());
-  EXPECT_EQ(0, CountOpNodes(output, "Cast"));
+  EXPECT_EQ(output.node_size(), 2);
+  EXPECT_EQ(CountOpNodes(output, "Cast"), 0);
   EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "inputs", "outputs"));
 
   auto tensors = EvaluateNodes(output, item.fetch, item.feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<int8>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<int8>(tensors[0], tensors_expected[0]);
 }
 
-TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfIdenticalShape) {
+TEST_F(ArithmeticOptimizerTest, AddOpsRewriteAddOpsOfIdenticalShape) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
   tensorflow::Scope sx = s.NewSubScope("x");
   tensorflow::Scope sy = s.NewSubScope("y");
@@ -1890,7 +1936,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfIdenticalShape) {
   std::vector<std::pair<string, Tensor>> feed = {
       {"a", a_t}, {"b", b_t}, {"c", c_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1905,7 +1951,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfIdenticalShape) {
   //   +   c      -->    AddN(a, b, c)
   //  / \
   // a   b
-  EXPECT_EQ(5, output.node_size());
+  EXPECT_EQ(output.node_size(), 5);
 
   NodeMap node_map(&output);
 
@@ -1914,24 +1960,24 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfIdenticalShape) {
       node_map.GetNode("y/ArithmeticOptimizer/AddOpsRewrite_Add_abc");
   ASSERT_NE(collapsed_add, nullptr);
 
-  EXPECT_EQ("AddN", collapsed_add->op());
-  EXPECT_EQ(3, collapsed_add->input_size());
-  EXPECT_EQ("a", collapsed_add->input(0));
-  EXPECT_EQ("b", collapsed_add->input(1));
-  EXPECT_EQ("c", collapsed_add->input(2));
+  EXPECT_EQ(collapsed_add->op(), "AddN");
+  ASSERT_EQ(collapsed_add->input_size(), 3);
+  EXPECT_EQ(collapsed_add->input(0), "a");
+  EXPECT_EQ(collapsed_add->input(1), "b");
+  EXPECT_EQ(collapsed_add->input(2), "c");
 
   // check output was re-wired to new node
   const NodeDef* updated_outputs = node_map.GetNode("outputs");
   ASSERT_NE(updated_outputs, nullptr);
-
-  EXPECT_EQ(collapsed_add->name(), updated_outputs->input(0));
+  ASSERT_EQ(updated_outputs->input_size(), 1);
+  EXPECT_EQ(updated_outputs->input(0), collapsed_add->name());
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MultiplePasses) {
+TEST_F(ArithmeticOptimizerTest, AddOpsRewriteMultiplePasses) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
@@ -1962,7 +2008,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MultiplePasses) {
   std::vector<std::pair<string, Tensor>> feed = {
       {"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -1979,7 +2025,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MultiplePasses) {
   //   +   c   x   + -->    AddN(a, b, c)  AddN(x, y, z))
   //  / \         / \
   // a   b       y   z
-  EXPECT_EQ(10, output.node_size());
+  EXPECT_EQ(output.node_size(), 10);
 
   NodeMap node_map(&output);
 
@@ -1988,38 +2034,38 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MultiplePasses) {
       node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc");
   ASSERT_NE(collapsed_left, nullptr);
 
-  EXPECT_EQ("AddN", collapsed_left->op());
-  EXPECT_EQ(3, collapsed_left->input_size());
-  EXPECT_EQ("a", collapsed_left->input(0));
-  EXPECT_EQ("b", collapsed_left->input(1));
-  EXPECT_EQ("c", collapsed_left->input(2));
+  EXPECT_EQ(collapsed_left->op(), "AddN");
+  ASSERT_EQ(collapsed_left->input_size(), 3);
+  EXPECT_EQ(collapsed_left->input(0), "a");
+  EXPECT_EQ(collapsed_left->input(1), "b");
+  EXPECT_EQ(collapsed_left->input(2), "c");
 
   // check right Add subtree replaced with AddN
   const NodeDef* collapsed_right =
       node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_xyz");
   ASSERT_NE(collapsed_right, nullptr);
 
-  EXPECT_EQ("AddN", collapsed_right->op());
-  EXPECT_EQ(3, collapsed_right->input_size());
-  EXPECT_EQ("x", collapsed_right->input(0));
-  EXPECT_EQ("y", collapsed_right->input(1));
-  EXPECT_EQ("z", collapsed_right->input(2));
+  EXPECT_EQ(collapsed_right->op(), "AddN");
+  ASSERT_EQ(collapsed_right->input_size(), 3);
+  EXPECT_EQ(collapsed_right->input(0), "x");
+  EXPECT_EQ(collapsed_right->input(1), "y");
+  EXPECT_EQ(collapsed_right->input(2), "z");
 
   // check that Mul inputs re-wired to new Nodes
   const NodeDef* updated_mul = node_map.GetNode("Mul");
   ASSERT_NE(updated_mul, nullptr);
 
-  EXPECT_EQ("Mul", updated_mul->op());
-  EXPECT_EQ(2, updated_mul->input_size());
-  EXPECT_EQ(collapsed_left->name(), updated_mul->input(0));
-  EXPECT_EQ(collapsed_right->name(), updated_mul->input(1));
+  EXPECT_EQ(updated_mul->op(), "Mul");
+  ASSERT_EQ(updated_mul->input_size(), 2);
+  EXPECT_EQ(updated_mul->input(0), collapsed_left->name());
+  EXPECT_EQ(updated_mul->input(1), collapsed_right->name());
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddInputMultipleTimes) {
+TEST_F(ArithmeticOptimizerTest, AddOpsRewriteAddInputMultipleTimes) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
@@ -2040,7 +2086,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddInputMultipleTimes) {
   std::vector<std::pair<string, Tensor>> feed = {
       {"a", a_t}, {"b", b_t}, {"c", c_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -2055,7 +2101,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddInputMultipleTimes) {
   //   +   +     -->    AddN(a, b, b, c)
   //  / \ / \                   ^
   // a   b   c                  b added twice!
-  EXPECT_EQ(5, output.node_size());
+  EXPECT_EQ(output.node_size(), 5);
 
   NodeMap node_map(&output);
 
@@ -2064,19 +2110,19 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddInputMultipleTimes) {
       node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_all");
   ASSERT_NE(collapsed_add, nullptr);
 
-  EXPECT_EQ("AddN", collapsed_add->op());
-  EXPECT_EQ(4, collapsed_add->input_size());
-  EXPECT_EQ("a", collapsed_add->input(0));
-  EXPECT_EQ("b", collapsed_add->input(1));
-  EXPECT_EQ("b", collapsed_add->input(2));
-  EXPECT_EQ("c", collapsed_add->input(3));
+  EXPECT_EQ(collapsed_add->op(), "AddN");
+  ASSERT_EQ(collapsed_add->input_size(), 4);
+  EXPECT_EQ(collapsed_add->input(0), "a");
+  EXPECT_EQ(collapsed_add->input(1), "b");
+  EXPECT_EQ(collapsed_add->input(2), "b");
+  EXPECT_EQ(collapsed_add->input(3), "c");
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfSymbolicallyEqualShape) {
+TEST_F(ArithmeticOptimizerTest, AddOpsRewriteAddOpsOfSymbolicallyEqualShape) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   // unknown input shape propagated symbolically through the graph
@@ -2100,7 +2146,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfSymbolicallyEqualShape) {
   auto x_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
   std::vector<std::pair<string, Tensor>> feed = {{"input", x_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -2115,7 +2161,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfSymbolicallyEqualShape) {
   //   +   c      -->    AddN(a, b, c)
   //  / \
   // a   b
-  EXPECT_EQ(6, output.node_size());
+  EXPECT_EQ(output.node_size(), 6);
 
   NodeMap node_map(&output);
 
@@ -2123,23 +2169,24 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_AddOpsOfSymbolicallyEqualShape) {
   const NodeDef* collapsed_add =
       node_map.GetNode("ArithmeticOptimizer/AddOpsRewrite_Add_abc");
   ASSERT_NE(collapsed_add, nullptr);
-  EXPECT_EQ("AddN", collapsed_add->op());
-  EXPECT_EQ(3, collapsed_add->input_size());
-  EXPECT_EQ("a", collapsed_add->input(0));
-  EXPECT_EQ("b", collapsed_add->input(1));
-  EXPECT_EQ("c", collapsed_add->input(2));
+  EXPECT_EQ(collapsed_add->op(), "AddN");
+  ASSERT_EQ(collapsed_add->input_size(), 3);
+  EXPECT_EQ(collapsed_add->input(0), "a");
+  EXPECT_EQ(collapsed_add->input(1), "b");
+  EXPECT_EQ(collapsed_add->input(2), "c");
 
   // check output was re-wired to new node
   const NodeDef* updated_outputs = node_map.GetNode("outputs");
   ASSERT_NE(updated_outputs, nullptr);
-  EXPECT_EQ(collapsed_add->name(), updated_outputs->input(0));
+  ASSERT_EQ(updated_outputs->input_size(), 1);
+  EXPECT_EQ(updated_outputs->input(0), collapsed_add->name());
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCast) {
+TEST_F(ArithmeticOptimizerTest, AddOpsRewriteMinimizeBCast) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   auto a = ops::Variable(s.WithOpName("a"), {32}, DT_FLOAT);
@@ -2170,7 +2217,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCast) {
   std::vector<std::pair<string, Tensor>> feed = {
       {"a", a_t}, {"b", b_t}, {"c", c_t}, {"x", x_t}, {"y", y_t}, {"z", z_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -2189,7 +2236,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCast) {
   //   +   c   x   + -->    AddN(a, x)  AddN(b, y)
   //  / \         / \
   // a   b       y   z
-  EXPECT_EQ(12, output.node_size());
+  EXPECT_EQ(output.node_size(), 12);
   NodeMap node_map(&output);
 
   // expected names of outer and inner nodes
@@ -2203,54 +2250,55 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCast) {
   // Add [a, x] first
   const NodeDef* add_ax_node = node_map.GetNode(inner_0_add_name);
   ASSERT_NE(add_ax_node, nullptr);
-  EXPECT_EQ("AddN", add_ax_node->op());
-  EXPECT_EQ(2, add_ax_node->input_size());
-  EXPECT_EQ("a", add_ax_node->input(0));
-  EXPECT_EQ("x", add_ax_node->input(1));
+  EXPECT_EQ(add_ax_node->op(), "AddN");
+  ASSERT_EQ(add_ax_node->input_size(), 2);
+  EXPECT_EQ(add_ax_node->input(0), "a");
+  EXPECT_EQ(add_ax_node->input(1), "x");
 
   // Then add [b, y]
   const NodeDef* add_by_node = node_map.GetNode(inner_1_add_name);
   ASSERT_NE(add_by_node, nullptr);
-  EXPECT_EQ("AddN", add_by_node->op());
-  EXPECT_EQ(2, add_by_node->input_size());
-  EXPECT_EQ("b", add_by_node->input(0));
-  EXPECT_EQ("y", add_by_node->input(1));
+  EXPECT_EQ(add_by_node->op(), "AddN");
+  ASSERT_EQ(2, add_by_node->input_size());
+  EXPECT_EQ(add_by_node->input(0), "b");
+  EXPECT_EQ(add_by_node->input(1), "y");
 
   // Then add [c, z]
   const NodeDef* add_cz_node = node_map.GetNode(inner_2_add_name);
   ASSERT_NE(add_cz_node, nullptr);
-  EXPECT_EQ("AddN", add_cz_node->op());
-  EXPECT_EQ(2, add_cz_node->input_size());
-  EXPECT_EQ("c", add_cz_node->input(0));
-  EXPECT_EQ("z", add_cz_node->input(1));
+  EXPECT_EQ(add_cz_node->op(), "AddN");
+  ASSERT_EQ(add_cz_node->input_size(), 2);
+  EXPECT_EQ(add_cz_node->input(0), "c");
+  EXPECT_EQ(add_cz_node->input(1), "z");
 
   // Then add results together starting from smaller shapes [a, x] + [b, y]
   const NodeDef* outer_0_node = node_map.GetNode(outer_0_add_name);
   ASSERT_NE(outer_0_node, nullptr);
-  EXPECT_EQ("Add", outer_0_node->op());
-  EXPECT_EQ(2, outer_0_node->input_size());
-  EXPECT_EQ(inner_0_add_name, outer_0_node->input(0));
-  EXPECT_EQ(inner_1_add_name, outer_0_node->input(1));
+  EXPECT_EQ(outer_0_node->op(), "Add");
+  ASSERT_EQ(outer_0_node->input_size(), 2);
+  EXPECT_EQ(outer_0_node->input(0), inner_0_add_name);
+  EXPECT_EQ(outer_0_node->input(1), inner_1_add_name);
 
   // And finally top level Add node
   const NodeDef* outer_node = node_map.GetNode(outer_add_name);
   ASSERT_NE(outer_node, nullptr);
-  EXPECT_EQ("Add", outer_node->op());
-  EXPECT_EQ(2, outer_node->input_size());
-  EXPECT_EQ(outer_0_add_name, outer_node->input(0));
-  EXPECT_EQ(inner_2_add_name, outer_node->input(1));
+  EXPECT_EQ(outer_node->op(), "Add");
+  ASSERT_EQ(outer_node->input_size(), 2);
+  EXPECT_EQ(outer_node->input(0), outer_0_add_name);
+  EXPECT_EQ(outer_node->input(1), inner_2_add_name);
 
   // And outputs reading new top level Add node
   const NodeDef* updated_outputs = node_map.GetNode("outputs");
   ASSERT_NE(updated_outputs, nullptr);
-  EXPECT_EQ(outer_add_name, updated_outputs->input(0));
+  ASSERT_EQ(updated_outputs->input_size(), 1);
+  EXPECT_EQ(updated_outputs->input(0), outer_add_name);
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
-TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCastWithSymbolicShapes) {
+TEST_F(ArithmeticOptimizerTest, AddOpsRewriteMinimizeBCastWithSymbolicShapes) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
 
   // We have a small input with one unknown dimension
@@ -2281,7 +2329,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCastWithSymbolicShapes) {
   auto v_t = GenerateRandomTensor<DT_DOUBLE>(TensorShape({1, 32, 32}));
   std::vector<std::pair<string, Tensor>> feed = {{"small", s_t}, {"v", v_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -2296,7 +2344,7 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCastWithSymbolicShapes) {
   //   +   c      -->     +   b
   //  / \                / \
   // a   b              a   c
-  EXPECT_EQ(9, output.node_size());
+  EXPECT_EQ(output.node_size(), 9);
   NodeMap node_map(&output);
 
   // expected names of outer and inner nodes
@@ -2306,25 +2354,27 @@ TEST_F(ArithmeticOptimizerTest, AddOpsRewrite_MinimizeBCastWithSymbolicShapes) {
   // outer Add node
   const NodeDef* outer_add = node_map.GetNode(outer_add_name);
   ASSERT_NE(outer_add, nullptr);
-  EXPECT_EQ("Add", outer_add->op());
-  EXPECT_EQ(inner_add_name, outer_add->input(0));
-  EXPECT_EQ("b", outer_add->input(1));
+  EXPECT_EQ(outer_add->op(), "Add");
+  ASSERT_EQ(outer_add->input_size(), 2);
+  EXPECT_EQ(outer_add->input(0), inner_add_name);
+  EXPECT_EQ(outer_add->input(1), "b");
 
   // inner AddN node
   const NodeDef* inner_add = node_map.GetNode(inner_add_name);
   ASSERT_NE(inner_add, nullptr);
-  EXPECT_EQ(2, inner_add->input_size());
-  EXPECT_EQ("a", inner_add->input(0));
-  EXPECT_EQ("c", inner_add->input(1));
+  ASSERT_EQ(inner_add->input_size(), 2);
+  EXPECT_EQ(inner_add->input(0), "a");
+  EXPECT_EQ(inner_add->input(1), "c");
 
   // check output was re-wired to new node
   const NodeDef* updated_outputs = node_map.GetNode("outputs");
   ASSERT_NE(updated_outputs, nullptr);
-  EXPECT_EQ(outer_add_name, updated_outputs->input(0));
+  ASSERT_EQ(updated_outputs->input_size(), 1);
+  EXPECT_EQ(updated_outputs->input(0), outer_add_name);
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, RemoveNegation) {
@@ -2358,61 +2408,61 @@ TEST_F(ArithmeticOptimizerTest, RemoveNegation) {
   auto y_t = GenerateRandomTensor<DT_FLOAT>(TensorShape({2, 2}));
   std::vector<std::pair<string, Tensor>> feed = {{"x", x_t}, {"y", y_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyRemoveNegation(&optimizer);
   OptimizeTwice(&optimizer, &item, &output);
 
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   int found = 0;
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "Add_negx_y") {
       ++found;
-      EXPECT_EQ("Sub", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("y", node.input(0));
-      EXPECT_EQ("x", node.input(1));
+      EXPECT_EQ(node.op(), "Sub");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "y");
+      EXPECT_EQ(node.input(1), "x");
     } else if (node.name() == "Add_x_negy") {
       ++found;
-      EXPECT_EQ("Sub", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
-      EXPECT_EQ("y", node.input(1));
+      EXPECT_EQ(node.op(), "Sub");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
+      EXPECT_EQ(node.input(1), "y");
     } else if (node.name() == "Add_negx_negy") {
       ++found;
-      EXPECT_EQ("Sub", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("Neg_x", node.input(0));
-      EXPECT_EQ("y", node.input(1));
+      EXPECT_EQ(node.op(), "Sub");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "Neg_x");
+      EXPECT_EQ(node.input(1), "y");
     } else if (node.name() == "Sub_x_negy") {
       ++found;
-      EXPECT_EQ("Add", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
-      EXPECT_EQ("y", node.input(1));
+      EXPECT_EQ(node.op(), "Add");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
+      EXPECT_EQ(node.input(1), "y");
     } else if (node.name() == "Sub_negx_negy") {
       ++found;
-      EXPECT_EQ("Sub", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("y", node.input(0));
-      EXPECT_EQ("x", node.input(1));
+      EXPECT_EQ(node.op(), "Sub");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "y");
+      EXPECT_EQ(node.input(1), "x");
     } else if (node.name() == "Add_negx_with_dep_y") {
       ++found;
-      EXPECT_EQ("Sub", node.op());
-      EXPECT_EQ(3, node.input_size());
-      EXPECT_EQ("y", node.input(0));
-      EXPECT_EQ("x", node.input(1));
-      EXPECT_EQ("^Add_x_y", node.input(2));
+      EXPECT_EQ(node.op(), "Sub");
+      ASSERT_EQ(node.input_size(), 3);
+      EXPECT_EQ(node.input(0), "y");
+      EXPECT_EQ(node.input(1), "x");
+      EXPECT_EQ(node.input(2), "^Add_x_y");
     }
   }
-  EXPECT_EQ(6, found);
+  EXPECT_EQ(found, 6);
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, ConvertSqrtDivToRsqrtMul) {
@@ -2426,28 +2476,28 @@ TEST_F(ArithmeticOptimizerTest, ConvertSqrtDivToRsqrtMul) {
   item.fetch = {"output"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlySqrtDivToRsqrtMul(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
 
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "output") {
-      EXPECT_EQ("Mul", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
-      EXPECT_EQ("sqrt_y", node.input(1));
+      EXPECT_EQ(node.op(), "Mul");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
+      EXPECT_EQ(node.input(1), "sqrt_y");
     } else if (node.name() == "sqrt_y") {
-      EXPECT_EQ("Rsqrt", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("y", node.input(0));
+      EXPECT_EQ(node.op(), "Rsqrt");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "y");
     }
   }
 }
@@ -2465,31 +2515,31 @@ TEST_F(ArithmeticOptimizerTest, DoNotConvertSqrtDivToRsqrtMulDivisorFetchNode) {
   item.fetch = {"grad", "output0"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  ASSERT_EQ(2, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 2);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlySqrtDivToRsqrtMul(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   auto tensors = EvaluateNodes(output, item.fetch);
-  ASSERT_EQ(2, tensors.size());
+  ASSERT_EQ(tensors.size(), 2);
 
   for (int i = 0; i < tensors.size(); i++) {
     EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
-    test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
+    test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
   }
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "grad") {
-      EXPECT_EQ("Div", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("mul1", node.input(0));
-      EXPECT_EQ("output0", node.input(1));
+      EXPECT_EQ(node.op(), "Div");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "mul1");
+      EXPECT_EQ(node.input(1), "output0");
     } else if (node.name() == "output0") {
-      EXPECT_EQ("Sqrt", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("floats", node.input(0));
+      EXPECT_EQ(node.op(), "Sqrt");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "floats");
     }
   }
 }
@@ -2505,28 +2555,28 @@ TEST_F(ArithmeticOptimizerTest, FuseSquaredDiff) {
   item.fetch = {"output"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyFuseSquaredDiff(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   const auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
 
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "output") {
-      EXPECT_EQ("Identity", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("sub_x_y", node.input(0));
+      EXPECT_EQ(node.op(), "Identity");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "sub_x_y");
     } else if (node.name() == "sub_x_y") {
-      EXPECT_EQ("SquaredDifference", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
-      EXPECT_EQ("y", node.input(1));
+      EXPECT_EQ(node.op(), "SquaredDifference");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
+      EXPECT_EQ(node.input(1), "y");
     }
   }
 }
@@ -2542,31 +2592,31 @@ TEST_F(ArithmeticOptimizerTest, DoNotFuseSquaredDiffFetchNode) {
   item.fetch = {"output", "sub_x_y"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  ASSERT_EQ(2, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 2);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyFuseSquaredDiff(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   const auto tensors = EvaluateNodes(output, item.fetch);
-  ASSERT_EQ(2, tensors.size());
+  ASSERT_EQ(tensors.size(), 2);
 
   for (int i = 0; i < tensors.size(); i++) {
     EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
-    test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
+    test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
   }
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "output") {
-      EXPECT_EQ("Square", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("sub_x_y", node.input(0));
+      EXPECT_EQ(node.op(), "Square");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "sub_x_y");
     } else if (node.name() == "sub_x_y") {
-      EXPECT_EQ("Sub", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
-      EXPECT_EQ("y", node.input(1));
+      EXPECT_EQ(node.op(), "Sub");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
+      EXPECT_EQ(node.input(1), "y");
     }
   }
 }
@@ -2581,23 +2631,23 @@ TEST_F(ArithmeticOptimizerTest, ConvertLogSoftmax) {
   item.fetch = {"output"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyLogSoftmax(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   const auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
 
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
-  EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
+  EXPECT_EQ(output.node_size(), item.graph.node_size() - 1);
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "output") {
-      EXPECT_EQ("LogSoftmax", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "LogSoftmax");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "x");
     }
   }
 }
@@ -2613,14 +2663,14 @@ TEST_F(ArithmeticOptimizerTest, DoNotConvertLogSoftmaxArgFetchNode) {
   item.fetch = {"softmax", "final_output"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  ASSERT_EQ(2, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 2);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyLogSoftmax(&optimizer);
   OptimizeTwice(&optimizer, &item, &output);
   const auto tensors = EvaluateNodes(output, item.fetch);
-  ASSERT_EQ(2, tensors.size());
+  ASSERT_EQ(tensors.size(), 2);
 
   // Should be a NoOp since we are not allowed to change the output of fetch
   // nodes.
@@ -2628,7 +2678,7 @@ TEST_F(ArithmeticOptimizerTest, DoNotConvertLogSoftmaxArgFetchNode) {
 
   for (int i = 0; i < tensors.size(); i++) {
     EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
-    test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
+    test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
   }
 }
 
@@ -2660,14 +2710,14 @@ TEST_F(ArithmeticOptimizerTest, ConvertPow) {
                 "out_1", "out",  "out_bcast1", "out_bcast2"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(9, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 9);
 
   GraphDef got;
   ArithmeticOptimizer optimizer;
   EnableOnlyConvertPow(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &got);
   auto tensors = EvaluateNodes(got, item.fetch);
-  EXPECT_EQ(9, tensors.size());
+  ASSERT_EQ(tensors.size(), 9);
 
   for (int i = 0; i < tensors.size(); ++i) {
     EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
@@ -2708,14 +2758,14 @@ TEST_F(ArithmeticOptimizerTest, Log1p) {
   item.fetch = {"out1", "out2"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(2, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 2);
 
   GraphDef got;
   ArithmeticOptimizer optimizer;
   EnableOnlyLog1p(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &got);
   auto tensors = EvaluateNodes(got, item.fetch);
-  EXPECT_EQ(2, tensors.size());
+  ASSERT_EQ(tensors.size(), 2);
 
   for (int i = 0; i < 2; ++i) {
     EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
@@ -2746,14 +2796,14 @@ TEST_F(ArithmeticOptimizerTest, Expm1) {
   item.fetch = {"out1", "out2"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(2, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 2);
 
   GraphDef got;
   ArithmeticOptimizer optimizer;
   EnableOnlyExpm1(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &got);
   auto tensors = EvaluateNodes(got, item.fetch);
-  EXPECT_EQ(2, tensors.size());
+  ASSERT_EQ(tensors.size(), 2);
 
   for (int i = 0; i < 2; ++i) {
     EXPECT_EQ(tensors[i].NumElements(), tensors_expected[i].NumElements());
@@ -2792,7 +2842,7 @@ TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_SimpleSwap) {
   std::vector<std::pair<string, Tensor>> feed = {
       {"a", a_t}, {"b", b_t}, {"c", c_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -2811,17 +2861,19 @@ TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_SimpleSwap) {
 
   const NodeDef* mul1_node = node_map.GetNode("mul1");
   ASSERT_NE(mul1_node, nullptr);
-  EXPECT_EQ("a", mul1_node->input(0));
-  EXPECT_EQ("c", mul1_node->input(1));
+  ASSERT_EQ(mul1_node->input_size(), 2);
+  EXPECT_EQ(mul1_node->input(0), "a");
+  EXPECT_EQ(mul1_node->input(1), "c");
 
   const NodeDef* mul2_node = node_map.GetNode("mul2");
   ASSERT_NE(mul2_node, nullptr);
-  EXPECT_EQ("mul1", mul2_node->input(0));
-  EXPECT_EQ("b", mul2_node->input(1));
+  ASSERT_EQ(mul2_node->input_size(), 2);
+  EXPECT_EQ(mul2_node->input(0), "mul1");
+  EXPECT_EQ(mul2_node->input(1), "b");
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_FlattenTallGraph) {
@@ -2852,7 +2904,7 @@ TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_FlattenTallGraph) {
   std::vector<std::pair<string, Tensor>> feed = {
       {"a", a_t}, {"b", b_t}, {"c", c_t}, {"d", d_t}, {"e", e_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -2876,27 +2928,31 @@ TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_FlattenTallGraph) {
 
   const NodeDef* mul1_node = node_map.GetNode("mul1");
   ASSERT_NE(mul1_node, nullptr);
-  EXPECT_EQ("a", mul1_node->input(0));
-  EXPECT_EQ("c", mul1_node->input(1));
+  ASSERT_EQ(mul1_node->input_size(), 2);
+  EXPECT_EQ(mul1_node->input(0), "a");
+  EXPECT_EQ(mul1_node->input(1), "c");
 
   const NodeDef* mul2_node = node_map.GetNode("mul2");
   ASSERT_NE(mul2_node, nullptr);
-  EXPECT_EQ("d", mul2_node->input(0));
-  EXPECT_EQ("e", mul2_node->input(1));
+  ASSERT_EQ(mul2_node->input_size(), 2);
+  EXPECT_EQ(mul2_node->input(0), "d");
+  EXPECT_EQ(mul2_node->input(1), "e");
 
   const NodeDef* mul3_node = node_map.GetNode("mul3");
   ASSERT_NE(mul3_node, nullptr);
-  EXPECT_EQ("mul1", mul3_node->input(0));
-  EXPECT_EQ("mul2", mul3_node->input(1));
+  ASSERT_EQ(mul3_node->input_size(), 2);
+  EXPECT_EQ(mul3_node->input(0), "mul1");
+  EXPECT_EQ(mul3_node->input(1), "mul2");
 
   const NodeDef* mul4_node = node_map.GetNode("mul4");
   ASSERT_NE(mul4_node, nullptr);
-  EXPECT_EQ("mul3", mul4_node->input(0));
-  EXPECT_EQ("b", mul4_node->input(1));
+  ASSERT_EQ(mul4_node->input_size(), 2);
+  EXPECT_EQ(mul4_node->input(0), "mul3");
+  EXPECT_EQ(mul4_node->input(1), "b");
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<double>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_BuildTreeUp) {
@@ -2925,7 +2981,7 @@ TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_BuildTreeUp) {
   std::vector<std::pair<string, Tensor>> feed = {
       {"a", a_t}, {"b", b_t}, {"c", c_t}, {"D", d_t}};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -2946,22 +3002,25 @@ TEST_F(ArithmeticOptimizerTest, MinimizeBroadcasts_BuildTreeUp) {
 
   const NodeDef* mul1_node = node_map.GetNode("mul2");
   ASSERT_NE(mul1_node, nullptr);
-  EXPECT_EQ("a", mul1_node->input(0));
-  EXPECT_EQ("b", mul1_node->input(1));
+  ASSERT_EQ(mul1_node->input_size(), 2);
+  EXPECT_EQ(mul1_node->input(0), "a");
+  EXPECT_EQ(mul1_node->input(1), "b");
 
   const NodeDef* mul2_node = node_map.GetNode("mul1");
   ASSERT_NE(mul2_node, nullptr);
-  EXPECT_EQ("mul2", mul2_node->input(0));
-  EXPECT_EQ("c", mul2_node->input(1));
+  ASSERT_EQ(mul2_node->input_size(), 2);
+  EXPECT_EQ(mul2_node->input(0), "mul2");
+  EXPECT_EQ(mul2_node->input(1), "c");
 
   const NodeDef* mul3_node = node_map.GetNode("mul3");
   ASSERT_NE(mul3_node, nullptr);
-  EXPECT_EQ("D", mul3_node->input(0));
-  EXPECT_EQ("mul1", mul3_node->input(1));
+  ASSERT_EQ(mul3_node->input_size(), 2);
+  EXPECT_EQ(mul3_node->input(0), "D");
+  EXPECT_EQ(mul3_node->input(1), "mul1");
 
   auto tensors = EvaluateNodes(output, item.fetch, feed);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryFromConcat) {
@@ -3021,55 +3080,55 @@ TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryFromConcat) {
   int found = 0;
   for (const NodeDef& node : output.node()) {
     if (node.name() == "concat") {
-      ASSERT_EQ(4, node.input_size());
-      EXPECT_EQ("sin_a", node.input(0));
-      EXPECT_EQ("b", node.input(1));
-      EXPECT_EQ("c", node.input(2));
-      EXPECT_EQ("axis", node.input(3));
+      ASSERT_EQ(node.input_size(), 4);
+      EXPECT_EQ(node.input(0), "sin_a");
+      EXPECT_EQ(node.input(1), "b");
+      EXPECT_EQ(node.input(2), "c");
+      EXPECT_EQ(node.input(3), "axis");
       found++;
     }
     if (node.name() == "exp_a") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("concat", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "concat");
       found++;
     }
     if (node.name() == "id") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("exp_a", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "exp_a");
       found++;
     }
 
     if (node.name() == "concat2") {
-      ASSERT_EQ(4, node.input_size());
-      EXPECT_EQ("sin_a", node.input(0));
-      EXPECT_EQ("b", node.input(1));
-      EXPECT_EQ("c", node.input(2));
-      EXPECT_EQ("axis", node.input(3));
+      ASSERT_EQ(node.input_size(), 4);
+      EXPECT_EQ(node.input(0), "sin_a");
+      EXPECT_EQ(node.input(1), "b");
+      EXPECT_EQ(node.input(2), "c");
+      EXPECT_EQ(node.input(3), "axis");
       found++;
     }
     if (node.name() == "exp_a2") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("concat2", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "concat2");
       found++;
     }
     if (node.name() == "cos_exp_a2") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("exp_a2", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "exp_a2");
       found++;
     }
     if (node.name() == "id2") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("cos_exp_a2", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "cos_exp_a2");
       found++;
     }
   }
-  EXPECT_EQ(7, found);
+  EXPECT_EQ(found, 7);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(tensors.size(), tensors_expected.size());
+  ASSERT_EQ(tensors.size(), tensors_expected.size());
   EXPECT_EQ(tensors.size(), item.fetch.size());
   for (int i = 0; i < item.fetch.size(); ++i) {
-    test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
+    test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
   }
 }
 
@@ -3132,69 +3191,69 @@ TEST_F(ArithmeticOptimizerTest, HoistCWiseUnaryIntoSplit) {
     EXPECT_NE(node.name(), "cos_exp_b2");
 
     if (node.name() == "split1") {
-      ASSERT_EQ(2, node.input_size());
-      EXPECT_EQ("axis", node.input(0));
-      EXPECT_EQ("ArithmeticOptimizer/_sin_a_split1", node.input(1));
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "axis");
+      EXPECT_EQ(node.input(1), "ArithmeticOptimizer/_sin_a_split1");
       found++;
     }
     if (node.name() == "ArithmeticOptimizer/_sin_a_split1") {
-      EXPECT_EQ("Sin", node.op());
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "Sin");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "x");
       found++;
     }
     if (node.name() == "id_a") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("split1", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "split1");
       found++;
     }
     if (node.name() == "exp_b") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("split1:1", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "split1:1");
       found++;
     }
     if (node.name() == "id_b") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("exp_b", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "exp_b");
       found++;
     }
     if (node.name() == "ArithmeticOptimizer/_exp_a2_split2") {
-      EXPECT_EQ("Exp", node.op());
-      ASSERT_EQ(1, node.input_size());
-      ASSERT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "Exp");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "x");
       found++;
     }
     if (node.name() == "ArithmeticOptimizer/_cos_exp_a2_split2") {
-      EXPECT_EQ("Cos", node.op());
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("ArithmeticOptimizer/_exp_a2_split2", node.input(0));
+      EXPECT_EQ(node.op(), "Cos");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "ArithmeticOptimizer/_exp_a2_split2");
       found++;
     }
     if (node.name() == "split2") {
-      ASSERT_EQ(3, node.input_size());
-      EXPECT_EQ("ArithmeticOptimizer/_cos_exp_a2_split2", node.input(0));
-      EXPECT_EQ("size_splits2", node.input(1));
-      EXPECT_EQ("axis", node.input(2));
+      ASSERT_EQ(node.input_size(), 3);
+      EXPECT_EQ(node.input(0), "ArithmeticOptimizer/_cos_exp_a2_split2");
+      EXPECT_EQ(node.input(1), "size_splits2");
+      EXPECT_EQ(node.input(2), "axis");
       found++;
     }
     if (node.name() == "id_a2") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("split2", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "split2");
       found++;
     }
     if (node.name() == "id_b2") {
-      ASSERT_EQ(1, node.input_size());
-      EXPECT_EQ("split2:1", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "split2:1");
       found++;
     }
   }
-  EXPECT_EQ(10, found);
+  EXPECT_EQ(found, 10);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(tensors.size(), tensors_expected.size());
+  ASSERT_EQ(tensors.size(), tensors_expected.size());
   EXPECT_EQ(tensors.size(), item.fetch.size());
   for (int i = 0; i < item.fetch.size(); ++i) {
-    test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
+    test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
   }
 }
 
@@ -3222,26 +3281,26 @@ TEST_F(ArithmeticOptimizerTest, RemoveIdempotent) {
   int found = 0;
   for (const NodeDef& node : output.node()) {
     if (node.name() == "out1") {
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("sn1", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "sn1");
       found++;
     } else if (node.name() == "out2") {
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("id1", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "id1");
       found++;
     } else if (node.name() == "sn1") {
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("a", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "a");
       found++;
     }
   }
-  EXPECT_EQ(3, found);
+  EXPECT_EQ(found, 3);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(tensors.size(), tensors_expected.size());
+  ASSERT_EQ(tensors.size(), tensors_expected.size());
   EXPECT_EQ(tensors.size(), item.fetch.size());
   for (int i = 0; i < item.fetch.size(); ++i) {
-    test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
+    test::ExpectTensorNear<float>(tensors[i], tensors_expected[i], 1e-6);
   }
 }
 
@@ -3284,62 +3343,68 @@ TEST_F(ArithmeticOptimizerTest, RemoveLogicalNot) {
   int found = 0;
   for (const NodeDef& node : output.node()) {
     if (node.name() == "id_not_eq") {
-      EXPECT_EQ("eq", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "eq");
       ++found;
     }
     if (node.name() == "id_not_neq") {
-      EXPECT_EQ("neq", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "neq");
       ++found;
     }
     if (node.name() == "id_not_lt") {
-      EXPECT_EQ("lt", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "lt");
       ++found;
     }
     if (node.name() == "id_not_le") {
-      EXPECT_EQ("le", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "le");
       ++found;
     }
     if (node.name() == "id_not_gt") {
-      EXPECT_EQ("gt", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "gt");
       ++found;
     }
     if (node.name() == "id_not_ge") {
-      EXPECT_EQ("ge", node.input(0));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "ge");
       ++found;
     }
 
     if (node.name() == "eq") {
-      EXPECT_EQ("NotEqual", node.op());
+      EXPECT_EQ(node.op(), "NotEqual");
       ++found;
     }
     if (node.name() == "neq") {
-      EXPECT_EQ("Equal", node.op());
+      EXPECT_EQ(node.op(), "Equal");
       ++found;
     }
     if (node.name() == "lt") {
-      EXPECT_EQ("GreaterEqual", node.op());
+      EXPECT_EQ(node.op(), "GreaterEqual");
       ++found;
     }
     if (node.name() == "le") {
-      EXPECT_EQ("Greater", node.op());
+      EXPECT_EQ(node.op(), "Greater");
       ++found;
     }
     if (node.name() == "gt") {
-      EXPECT_EQ("LessEqual", node.op());
+      EXPECT_EQ(node.op(), "LessEqual");
       ++found;
     }
     if (node.name() == "ge") {
-      EXPECT_EQ("Less", node.op());
+      EXPECT_EQ(node.op(), "Less");
       ++found;
     }
   }
-  EXPECT_EQ(12, found);
+  EXPECT_EQ(found, 12);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(tensors.size(), tensors_expected.size());
+  ASSERT_EQ(tensors.size(), tensors_expected.size());
   EXPECT_EQ(tensors.size(), item.fetch.size());
   for (int i = 0; i < item.fetch.size(); ++i) {
-    test::ExpectTensorEqual<bool>(tensors_expected[i], tensors[i]);
+    test::ExpectTensorEqual<bool>(tensors[i], tensors_expected[i]);
   }
 }
 
@@ -3354,34 +3419,34 @@ TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWise) {
   item.fetch = {"final_out"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
 
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   // Check if the inputs are switched
   int required_node_count = 0;
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "sqrt") {
-      EXPECT_EQ("Sqrt", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("reduce_max", node.input(0));
+      EXPECT_EQ(node.op(), "Sqrt");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "reduce_max");
       ++required_node_count;
     } else if (node.name() == "reduce_max") {
-      EXPECT_EQ("Max", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "Max");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
       ++required_node_count;
     }
   }
-  EXPECT_EQ(2, required_node_count);
+  EXPECT_EQ(required_node_count, 2);
 }
 
 TEST_F(ArithmeticOptimizerTest, OptimizeArgMaxOrArgMinOfMonotonicElementWise) {
@@ -3395,38 +3460,38 @@ TEST_F(ArithmeticOptimizerTest, OptimizeArgMaxOrArgMinOfMonotonicElementWise) {
   item.fetch = {"final_out"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   const auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   const auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
 
-  test::ExpectTensorEqual<int64>(tensors_expected[0], tensors[0]);
-  EXPECT_EQ(item.graph.node_size() - 1, output.node_size());
+  test::ExpectTensorEqual<int64>(tensors[0], tensors_expected[0]);
+  EXPECT_EQ(output.node_size(), item.graph.node_size() - 1);
   // Check if the inputs are switched
   int required_node_count = 0;
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "final_out") {
-      EXPECT_EQ("Identity", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("arg_max", node.input(0));
+      EXPECT_EQ(node.op(), "Identity");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "arg_max");
       ++required_node_count;
     } else if (node.name() == "arg_max") {
-      EXPECT_EQ("ArgMax", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "ArgMax");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
       ++required_node_count;
     }
   }
-  EXPECT_EQ(2, required_node_count);
+  EXPECT_EQ(required_node_count, 2);
 }
 
 TEST_F(ArithmeticOptimizerTest,
-       OptimizeMaxOrMinOfMonotonicElementWise_DoNotChangeFetchNode) {
+       OptimizeMaxOrMinOfMonotonicElementWiseDoNotChangeFetchNode) {
   tensorflow::Scope s = tensorflow::Scope::NewRootScope();
   auto x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2});
   Output sqrt = ops::Sqrt(s.WithOpName("sqrt"), x);
@@ -3437,7 +3502,7 @@ TEST_F(ArithmeticOptimizerTest,
   item.fetch = {"sqrt", "final_out"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(2, tensors_expected.size());
+  EXPECT_EQ(tensors_expected.size(), 2);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -3461,7 +3526,7 @@ TEST_F(ArithmeticOptimizerTest,
   item.fetch = {"z"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  ASSERT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -3473,7 +3538,7 @@ TEST_F(ArithmeticOptimizerTest,
   VerifyGraphsMatch(item.graph, output, __LINE__);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  ASSERT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
   test::ExpectTensorEqual<int>(tensors[0], tensors_expected[0]);
   test::ExpectTensorEqual<int>(tensors[0], Tensor(-2));
 }
@@ -3490,30 +3555,30 @@ TEST_F(ArithmeticOptimizerTest,
   item.fetch = {"final_out"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
 
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   // Check if the inputs are switched
   int required_node_count = 0;
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "neg") {
-      EXPECT_EQ("Neg", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("reduce_max", node.input(0));
+      EXPECT_EQ(node.op(), "Neg");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "reduce_max");
       ++required_node_count;
     } else if (node.name() == "reduce_max") {
-      EXPECT_EQ("Min", node.op());
-      EXPECT_EQ(2, node.input_size());
-      EXPECT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "Min");
+      ASSERT_EQ(node.input_size(), 2);
+      EXPECT_EQ(node.input(0), "x");
       ++required_node_count;
     }
   }
@@ -3532,7 +3597,7 @@ TEST_F(ArithmeticOptimizerTest,
   item.fetch = {"max_pool"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  ASSERT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -3543,8 +3608,8 @@ TEST_F(ArithmeticOptimizerTest,
   VerifyGraphsMatch(item.graph, output, __LINE__);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  ASSERT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWiseMaxPool) {
@@ -3559,34 +3624,34 @@ TEST_F(ArithmeticOptimizerTest, OptimizeMaxOrMinOfMonotonicElementWiseMaxPool) {
   item.fetch = {"final_out"};
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyOptimizeMaxOrMinOfMonotonic(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
+  ASSERT_EQ(tensors.size(), 1);
 
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
-  EXPECT_EQ(item.graph.node_size(), output.node_size());
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
+  EXPECT_EQ(output.node_size(), item.graph.node_size());
   // Check if the inputs are switched
   int required_node_count = 0;
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "sqrt") {
-      EXPECT_EQ("Sqrt", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("max_pool", node.input(0));
+      EXPECT_EQ(node.op(), "Sqrt");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "max_pool");
       ++required_node_count;
     } else if (node.name() == "max_pool") {
-      EXPECT_EQ("MaxPool", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "MaxPool");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "x");
       ++required_node_count;
     }
   }
-  EXPECT_EQ(2, required_node_count);
+  EXPECT_EQ(required_node_count, 2);
 }
 
 TEST_F(ArithmeticOptimizerTest, UnaryOpsComposition) {
@@ -3608,42 +3673,42 @@ TEST_F(ArithmeticOptimizerTest, UnaryOpsComposition) {
   }
 
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
   EnableOnlyUnaryOpsComposition(&optimizer);
   OptimizeAndPrune(&optimizer, &item, &output);
 
-  EXPECT_EQ(3, output.node_size());
+  EXPECT_EQ(output.node_size(), 3);
 
   // Check that Sqrt/Log/Relu were replaced with a single op.
   int required_node_count = 0;
   for (int i = 0; i < output.node_size(); ++i) {
     const NodeDef& node = output.node(i);
     if (node.name() == "final_out") {
-      EXPECT_EQ("Identity", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("relu/unary_ops_composition", node.input(0));
+      EXPECT_EQ(node.op(), "Identity");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "relu/unary_ops_composition");
       ++required_node_count;
     } else if (node.name() == "relu/unary_ops_composition") {
-      EXPECT_EQ("_UnaryOpsComposition", node.op());
-      EXPECT_EQ(1, node.input_size());
-      EXPECT_EQ("x", node.input(0));
+      EXPECT_EQ(node.op(), "_UnaryOpsComposition");
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(node.input(0), "x");
 
       auto op_names = node.attr().at("op_names").list().s();
-      EXPECT_EQ(3, op_names.size());
-      EXPECT_EQ("Sqrt", op_names[0]);
-      EXPECT_EQ("Log", op_names[1]);
-      EXPECT_EQ("Relu", op_names[2]);
+      ASSERT_EQ(op_names.size(), 3);
+      EXPECT_EQ(op_names[0], "Sqrt");
+      EXPECT_EQ(op_names[1], "Log");
+      EXPECT_EQ(op_names[2], "Relu");
       ++required_node_count;
     }
   }
-  EXPECT_EQ(2, required_node_count);
+  EXPECT_EQ(required_node_count, 2);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorNear<float>(tensors_expected[0], tensors[0], 1e-6);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
 }
 
 TEST_F(ArithmeticOptimizerTest, RemoveStackStridedSliceSameAxis) {
@@ -3781,29 +3846,29 @@ TEST_F(ArithmeticOptimizerTest, RemoveStackStridedSliceSameAxis) {
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
 
   // stacked[:, 0, :] == a.
-  test::ExpectTensorEqual<float>(tensors_expected[fA],
-                                 tensors_expected[fASliceOut]);
+  test::ExpectTensorEqual<float>(tensors_expected[fASliceOut],
+                                 tensors_expected[fA]);
   // stacked[:, 1, :] == b.
-  test::ExpectTensorEqual<float>(tensors_expected[fB],
-                                 tensors_expected[fBSliceOut]);
+  test::ExpectTensorEqual<float>(tensors_expected[fBSliceOut],
+                                 tensors_expected[fB]);
   // stacked[:, 2, :] == c.
-  test::ExpectTensorEqual<float>(tensors_expected[fC],
-                                 tensors_expected[fCSliceOut]);
+  test::ExpectTensorEqual<float>(tensors_expected[fCSliceOut],
+                                 tensors_expected[fC]);
 
   // stacked[:, 0:1, :] == expand_dims(a, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedA],
-                                 tensors_expected[fASlice01Out]);
+  test::ExpectTensorEqual<float>(tensors_expected[fASlice01Out],
+                                 tensors_expected[fExpandedA]);
 
   // stacked[:, :1, :] == expand_dims(a, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedA],
-                                 tensors_expected[fASliceTo1Out]);
+  test::ExpectTensorEqual<float>(tensors_expected[fASliceTo1Out],
+                                 tensors_expected[fExpandedA]);
 
   // stacked[:, 1:2, :] == expand_dims(b, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedB],
-                                 tensors_expected[fBSlice12Out]);
+  test::ExpectTensorEqual<float>(tensors_expected[fBSlice12Out],
+                                 tensors_expected[fExpandedB]);
   // stacked[:, 2:, :] == expand_dims(c, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedC],
-                                 tensors_expected[fCSlice2ToOut]);
+  test::ExpectTensorEqual<float>(tensors_expected[fCSlice2ToOut],
+                                 tensors_expected[fExpandedC]);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -3812,43 +3877,47 @@ TEST_F(ArithmeticOptimizerTest, RemoveStackStridedSliceSameAxis) {
 
   for (const auto& node : output.node()) {
     if (node.name() == "pa_slice_out") {
+      ASSERT_EQ(node.input_size(), 1);
       EXPECT_EQ(node.input(0), "a");
     } else if (node.name() == "pb_slice_out") {
+      ASSERT_EQ(node.input_size(), 1);
       EXPECT_EQ(node.input(0), "b");
     } else if (node.name() == "pc_slice_out") {
+      ASSERT_EQ(node.input_size(), 1);
       EXPECT_EQ(node.input(0), "c");
     } else if (str_util::EndsWith(node.name(), "_out")) {
-      EXPECT_EQ(strings::StrCat(node.input(0), "_out"),
-                strings::StrCat(
-                    "ArithmeticOptimizer/RemoveStackStridedSliceSameAxis_",
-                    node.name()));
+      ASSERT_EQ(node.input_size(), 1);
+      EXPECT_EQ(
+          absl::StrCat(node.input(0), "_out"),
+          absl::StrCat("ArithmeticOptimizer/RemoveStackStridedSliceSameAxis_",
+                       node.name()));
     }
   }
 
   auto tensors = EvaluateNodes(output, item.fetch);
 
   // stacked[:, 0, :] == a.
-  test::ExpectTensorEqual<float>(tensors_expected[fA], tensors[fASliceOut]);
+  test::ExpectTensorEqual<float>(tensors[fASliceOut], tensors_expected[fA]);
 
   // stacked[:, 1, :] == b.
-  test::ExpectTensorEqual<float>(tensors_expected[fB], tensors[fBSliceOut]);
+  test::ExpectTensorEqual<float>(tensors[fBSliceOut], tensors_expected[fB]);
   // stacked[:, 2, :] == c.
-  test::ExpectTensorEqual<float>(tensors_expected[fC], tensors[fCSliceOut]);
+  test::ExpectTensorEqual<float>(tensors[fCSliceOut], tensors_expected[fC]);
 
   // stacked[:, 0:1, :] == expand_dims(a, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedA],
-                                 tensors[fASlice01Out]);
+  test::ExpectTensorEqual<float>(tensors[fASlice01Out],
+                                 tensors_expected[fExpandedA]);
 
   // stacked[:, :1, :] == expand_dims(a, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedA],
-                                 tensors[fASliceTo1Out]);
+  test::ExpectTensorEqual<float>(tensors[fASliceTo1Out],
+                                 tensors_expected[fExpandedA]);
 
   // stacked[:, 1:2, :] == expand_dims(b, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedB],
-                                 tensors[fBSlice12Out]);
+  test::ExpectTensorEqual<float>(tensors[fBSlice12Out],
+                                 tensors_expected[fExpandedB]);
   // stacked[:, 2:, :] == expand_dims(c, 1).
-  test::ExpectTensorEqual<float>(tensors_expected[fExpandedC],
-                                 tensors[fCSlice2ToOut]);
+  test::ExpectTensorEqual<float>(tensors[fCSlice2ToOut],
+                                 tensors_expected[fExpandedC]);
 }
 
 TEST_F(ArithmeticOptimizerTest, SimplifyAggregationBFloat16) {
@@ -3862,7 +3931,7 @@ TEST_F(ArithmeticOptimizerTest, SimplifyAggregationBFloat16) {
   TF_CHECK_OK(s.ToGraphDef(&item.graph));
   item.fetch = {"id"};
   auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
-  EXPECT_EQ(1, tensors_expected.size());
+  ASSERT_EQ(tensors_expected.size(), 1);
 
   GraphDef output;
   ArithmeticOptimizer optimizer;
@@ -3870,11 +3939,11 @@ TEST_F(ArithmeticOptimizerTest, SimplifyAggregationBFloat16) {
   OptimizeAndPrune(&optimizer, &item, &output);
 
   // Extra node created for multiplier.
-  EXPECT_EQ(5, output.node_size());
+  EXPECT_EQ(output.node_size(), 5);
 
   auto tensors = EvaluateNodes(output, item.fetch);
-  EXPECT_EQ(1, tensors.size());
-  test::ExpectTensorEqual<bfloat16>(tensors_expected[0], tensors[0]);
+  ASSERT_EQ(tensors.size(), 1);
+  test::ExpectTensorEqual<bfloat16>(tensors[0], tensors_expected[0]);
 }
 
 }  // namespace grappler