diff --git a/tensorflow/cc/framework/scope.cc b/tensorflow/cc/framework/scope.cc index e93ca8633e6..459149b47d1 100644 --- a/tensorflow/cc/framework/scope.cc +++ b/tensorflow/cc/framework/scope.cc @@ -318,7 +318,7 @@ Status Scope::ToGraph(Graph* g, GraphConstructorOptions opts) const { if (ok()) { GraphDef graph_def; graph()->ToGraphDef(&graph_def); - UpdateStatus(ConvertGraphDefToGraph(opts, graph_def, g)); + UpdateStatus(ConvertGraphDefToGraph(opts, std::move(graph_def), g)); } return *impl()->status_; } diff --git a/tensorflow/compiler/mlir/tensorflow/translate/import_graphdef.cc b/tensorflow/compiler/mlir/tensorflow/translate/import_graphdef.cc index 0b9012d9df0..e334da1df36 100644 --- a/tensorflow/compiler/mlir/tensorflow/translate/import_graphdef.cc +++ b/tensorflow/compiler/mlir/tensorflow/translate/import_graphdef.cc @@ -300,8 +300,8 @@ Status Importer::RemoveBackedges(const Graph& graph) { graph_ = absl::make_unique(graph.flib_def()); GraphConstructorOptions opts; opts.allow_internal_ops = true; - TF_RETURN_IF_ERROR( - ::tensorflow::ConvertGraphDefToGraph(opts, graph_def, graph_.get())); + TF_RETURN_IF_ERROR(::tensorflow::ConvertGraphDefToGraph( + opts, std::move(graph_def), graph_.get())); // Remove all the backedges. So the nodes can be added to the shape refiner. TF_RETURN_IF_ERROR(back_edge_helper_.Remove(graph_.get())); @@ -1394,8 +1394,8 @@ StatusOr ConvertGraphdefToMlir( if (add_default_attributes) { TF_RETURN_IF_ERROR(AddDefaultsToNodeDef(&preprocessed_graphdef)); } - TF_RETURN_IF_ERROR( - ConvertGraphDefToGraph(options, preprocessed_graphdef, &graph)); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph( + options, std::move(preprocessed_graphdef), &graph)); return ConvertGraphToMlir(graph, debug_info, graph.flib_def(), specs, context); diff --git a/tensorflow/compiler/tf2xla/tf2xla.cc b/tensorflow/compiler/tf2xla/tf2xla.cc index 3e4188f3c6d..3c2b256800c 100644 --- a/tensorflow/compiler/tf2xla/tf2xla.cc +++ b/tensorflow/compiler/tf2xla/tf2xla.cc @@ -384,8 +384,8 @@ Status InitGraph(const GraphDef& graph_def, const tf2xla::Config& config, TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef( &second_copy_def, *g->op_registry(), /*node_offset=*/0)); - TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(GraphConstructorOptions(), - second_copy_def, g.get())); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph( + GraphConstructorOptions(), std::move(second_copy_def), g.get())); TF_RETURN_IF_ERROR(RewriteAndPruneGraph(g.get(), config, feed_remapping)); // Functionalize control flow. diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc index 3661367c708..c764a587757 100644 --- a/tensorflow/core/common_runtime/direct_session.cc +++ b/tensorflow/core/common_runtime/direct_session.cc @@ -1614,15 +1614,15 @@ Status DirectSession::CreateGraphs( } } - for (const auto& partition : partitions) { + for (auto& partition : partitions) { std::unique_ptr device_graph( new Graph(client_graph->flib_def.get())); GraphConstructorOptions device_opts; // There are internal operations (e.g., send/recv) that we now allow. device_opts.allow_internal_ops = true; device_opts.expect_device_spec = true; - TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(device_opts, partition.second, - device_graph.get())); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph( + device_opts, std::move(partition.second), device_graph.get())); outputs->emplace(partition.first, std::move(device_graph)); } diff --git a/tensorflow/core/common_runtime/graph_execution_state.cc b/tensorflow/core/common_runtime/graph_execution_state.cc index 49071833f24..7468d6bc72a 100644 --- a/tensorflow/core/common_runtime/graph_execution_state.cc +++ b/tensorflow/core/common_runtime/graph_execution_state.cc @@ -757,8 +757,8 @@ Status GraphExecutionState::OptimizeGraph( GraphConstructorOptions opts; opts.allow_internal_ops = true; - TF_RETURN_IF_ERROR( - ConvertGraphDefToGraph(opts, new_graph, optimized_graph->get())); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, std::move(new_graph), + optimized_graph->get())); // The graph conversion sets the requested device names but not the // assigned device names. However, since at this point the graph is placed // TF expects an assigned device name for every node. Therefore we copy diff --git a/tensorflow/core/distributed_runtime/graph_mgr.cc b/tensorflow/core/distributed_runtime/graph_mgr.cc index 81d6412e1bf..5d06bf9a75b 100644 --- a/tensorflow/core/distributed_runtime/graph_mgr.cc +++ b/tensorflow/core/distributed_runtime/graph_mgr.cc @@ -179,14 +179,14 @@ Status GraphMgr::InitItem(const string& handle, const GraphDef& gdef, } std::unordered_map> partition_graphs; - for (const auto& partition : partitions) { + for (auto& partition : partitions) { std::unique_ptr device_graph(new Graph(OpRegistry::Global())); GraphConstructorOptions device_opts; // There are internal operations (e.g., send/recv) that we now allow. device_opts.allow_internal_ops = true; device_opts.expect_device_spec = true; - TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(device_opts, partition.second, - device_graph.get())); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph( + device_opts, std::move(partition.second), device_graph.get())); partition_graphs.emplace(partition.first, std::move(device_graph)); } diff --git a/tensorflow/core/graph/graph_def_builder_util.cc b/tensorflow/core/graph/graph_def_builder_util.cc index 102c72185f7..3ca9f8a21ff 100644 --- a/tensorflow/core/graph/graph_def_builder_util.cc +++ b/tensorflow/core/graph/graph_def_builder_util.cc @@ -22,7 +22,7 @@ Status GraphDefBuilderToGraph(const GraphDefBuilder& builder, Graph* graph) { GraphDef graph_def; TF_RETURN_IF_ERROR(builder.ToGraphDef(&graph_def)); GraphConstructorOptions opts; - return ConvertGraphDefToGraph(opts, graph_def, graph); + return ConvertGraphDefToGraph(opts, std::move(graph_def), graph); } } // namespace tensorflow diff --git a/tensorflow/core/grappler/grappler_item_builder.cc b/tensorflow/core/grappler/grappler_item_builder.cc index 9790915eb96..6d49b2f29d0 100644 --- a/tensorflow/core/grappler/grappler_item_builder.cc +++ b/tensorflow/core/grappler/grappler_item_builder.cc @@ -267,8 +267,8 @@ Status RuntimeGraphOptimizer(const GraphDef& graph_def_arg, graph_ctor_opts.expect_device_spec = false; std::unique_ptr graphptr(new Graph(function_library)); - TF_RETURN_IF_ERROR( - ConvertGraphDefToGraph(graph_ctor_opts, graph_def, graphptr.get())); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph( + graph_ctor_opts, std::move(graph_def), graphptr.get())); // Optimize the graph. ::tensorflow::GraphOptimizer optimizer(*optimizer_opts); diff --git a/tensorflow/core/grappler/optimizers/function_optimizer.cc b/tensorflow/core/grappler/optimizers/function_optimizer.cc index b4f5c36bb9c..ca8f7a2e05f 100644 --- a/tensorflow/core/grappler/optimizers/function_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/function_optimizer.cc @@ -784,7 +784,7 @@ constexpr const char* const kLowerAsMultiDeviceFunctionAttr = using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode; using OutputControlSource = InlineFunctionBodyOptions::OutputControlSource; -// Checks if boolean attribute is defined and it's value is 'true'. +// Checks if boolean attribute is defined and its value is 'true'. bool CheckBoolAttr(const Node* n, absl::string_view attr_name) { bool match; Status s = GetNodeAttr(n->attrs(), attr_name, &match); diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.cc b/tensorflow/core/grappler/optimizers/meta_optimizer.cc index 7f1302d6b09..00164c52bd8 100644 --- a/tensorflow/core/grappler/optimizers/meta_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/meta_optimizer.cc @@ -802,8 +802,6 @@ Status OptimizeGraph( std::unique_ptr optimized_graph( new tensorflow::Graph(OpRegistry::Global())); - TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(GraphConstructorOptions(), - out_graph, optimized_graph.get())); // Copy optimized functions back to the overlay lib. if (flib) { @@ -817,25 +815,28 @@ Status OptimizeGraph( } } - *g = std::move(optimized_graph); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph( + GraphConstructorOptions(), std::move(out_graph), optimized_graph.get())); // The graph conversion sets the requested device names but not the // assigned device names. However, since at this point the graph is // placed TF expects an assigned device name for every node. Therefore // we copy the requested device into the assigned device field. - for (Node* node : (*g)->nodes()) { + for (Node* node : optimized_graph->nodes()) { if (node->IsOp() && node->assigned_device_name().empty()) { if (node->requested_device().empty()) { return errors::Internal( "Either placer did not place the node or Grappler did not " "copy the assigned device. Contact Grappler team since latter " "is more likely. Node=", - node->name(), " Graph: ", (*g)->ToGraphDefDebug().DebugString()); + node->name(), + " Graph: ", optimized_graph->ToGraphDefDebug().DebugString()); } node->set_assigned_device_name(node->requested_device()); } } + *g = std::move(optimized_graph); return Status::OK(); } diff --git a/tensorflow/tools/optimization/optimization_pass_runner.cc b/tensorflow/tools/optimization/optimization_pass_runner.cc index 162d39d7aee..8cd9e32ba6f 100644 --- a/tensorflow/tools/optimization/optimization_pass_runner.cc +++ b/tensorflow/tools/optimization/optimization_pass_runner.cc @@ -111,8 +111,8 @@ Status OptimizationPassRunner::Run(absl::string_view pass_to_run, GraphConstructorOptions graph_opts; graph_opts.expect_device_spec = true; graph_opts.allow_internal_ops = true; - TF_RETURN_IF_ERROR( - ConvertGraphDefToGraph(graph_opts, input, options.graph->get())); + TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(graph_opts, std::move(input), + options.graph->get())); // Add all devices that were previously configured with AddDevice. DeviceSet device_set;