From 2bbab6a5000a212b64e3a66c887b33b25ece0245 Mon Sep 17 00:00:00 2001 From: Jacques Pienaar <jpienaar@google.com> Date: Thu, 23 Apr 2020 12:46:11 -0700 Subject: [PATCH] Switch from unordered_map to absl::flat_hash_map Showed a sizeable improvement for one large shape inference case. PiperOrigin-RevId: 308109572 Change-Id: Ia926bd597246e399b38102a7bb09a4861b1cc7f0 --- tensorflow/core/common_runtime/shape_refiner.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tensorflow/core/common_runtime/shape_refiner.h b/tensorflow/core/common_runtime/shape_refiner.h index 33ebba07b9f..c83bd81705b 100644 --- a/tensorflow/core/common_runtime/shape_refiner.h +++ b/tensorflow/core/common_runtime/shape_refiner.h @@ -17,6 +17,7 @@ limitations under the License. #include <vector> +#include "absl/container/flat_hash_map.h" #include "tensorflow/core/common_runtime/graph_runner.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/shape_inference.h" @@ -236,7 +237,8 @@ class ShapeRefiner { GraphRunner graph_runner_; // Stores a map from a node to its ExtendedInferenceContext. - std::unordered_map<const Node*, std::unique_ptr<ExtendedInferenceContext>> + absl::flat_hash_map<const Node*, std::unique_ptr<ExtendedInferenceContext>, + hash<const Node*>> node_to_context_; // Holds a cache from 'tensor name' to the tensor that is @@ -257,9 +259,10 @@ class ShapeRefiner { // shape inference. const tensorflow::FunctionLibraryDefinition* function_library_ = nullptr; - // Cache the graph corresponding to each functin definition for which shapes + // Cache the graph corresponding to each function definition for which shapes // are refined. - std::unordered_map<const FunctionDef*, std::unique_ptr<const Graph>> + absl::flat_hash_map<const FunctionDef*, std::unique_ptr<const Graph>, + hash<const FunctionDef*>> functions_; TF_DISALLOW_COPY_AND_ASSIGN(ShapeRefiner);