diff --git a/tensorflow/lite/arena_planner.cc b/tensorflow/lite/arena_planner.cc index e695c43f13a..3258f612c18 100644 --- a/tensorflow/lite/arena_planner.cc +++ b/tensorflow/lite/arena_planner.cc @@ -153,7 +153,7 @@ TfLiteStatus ArenaPlanner::PlanAllocations() { } } // Go through the graph in execution order. - for (int i = 0; i < graph_info_->num_nodes(); ++i) { + for (size_t i = 0; i < graph_info_->num_nodes(); ++i) { const TfLiteNode& node = graph_info_->node(i); // First queue output tensors for allocation. @@ -193,7 +193,7 @@ TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) { TF_LITE_ENSURE_STATUS(CalculateAllocations(first_node, last_node)); TF_LITE_ENSURE_STATUS(Commit()); - for (int i = 0; i < graph_info_->num_tensors(); ++i) { + for (int i = 0; i < static_cast(graph_info_->num_tensors()); ++i) { // TODO(ahentz): we could do this only for the tensors that were modified // in CalculateAllocations(), instead of redoing it for tensors that // already had proper pointers. However we must be very careful, because @@ -237,9 +237,14 @@ TfLiteStatus ArenaPlanner::CalculateAllocations(int first_node, int last_node) { } } - // Don't forget to deallocate temporaries of last node. - TF_LITE_ENSURE_STATUS( - CalculateDeallocationOfInternalTensors(active_node - 1)); + // For the case if the graph is empty the node index can be negative since we + // substract from the active node, so the node_index can be zero for those + // cases + if (active_node > 0) { + // Don't forget to deallocate temporaries of last node. + TF_LITE_ENSURE_STATUS( + CalculateDeallocationOfInternalTensors(active_node - 1)); + } return kTfLiteOk; } @@ -284,8 +289,8 @@ TfLiteStatus ArenaPlanner::CalculateTensorDeallocation(int tensor_index) { TfLiteStatus ArenaPlanner::CalculateAllocationOfInternalTensors( int node_index) { - if (node_index < graph_info_->num_nodes()) { - const TfLiteNode& node = graph_info_->node(node_index); + if (node_index < static_cast(graph_info_->num_nodes())) { + const TfLiteNode& node = graph_info_->node(static_cast(node_index)); TfLiteIntArray* node_temporaries = node.temporaries; for (int i = 0; i < node_temporaries->size; ++i) { int tensor_index = node_temporaries->data[i]; @@ -297,8 +302,8 @@ TfLiteStatus ArenaPlanner::CalculateAllocationOfInternalTensors( TfLiteStatus ArenaPlanner::CalculateDeallocationOfInternalTensors( int node_index) { - if (node_index < graph_info_->num_nodes()) { - const TfLiteNode& node = graph_info_->node(node_index); + if (node_index < static_cast(graph_info_->num_nodes())) { + const TfLiteNode& node = graph_info_->node(static_cast(node_index)); TfLiteIntArray* node_temporaries = node.temporaries; for (int i = 0; i < node_temporaries->size; ++i) { int tensor_index = node_temporaries->data[i]; diff --git a/tensorflow/lite/arena_planner_test.cc b/tensorflow/lite/arena_planner_test.cc index 3b6c9d5f54d..0e80d429c0d 100644 --- a/tensorflow/lite/arena_planner_test.cc +++ b/tensorflow/lite/arena_planner_test.cc @@ -211,6 +211,18 @@ TEST_F(ArenaPlannerTest, EmptyGraph) { Execute(0, 10); } +TEST_F(ArenaPlannerTest, DeallocationOfInputTensor) { + // This is a negative TC, which will try to make sure that no allocation for + // input tensors is done, when making call with negative node_index, since + // previous check was doing comparison of node_index which was int and + // unsigned int, implicit conversion was passing this case, as the negative + // number was converted to unsigned it making it invalid.The new check + // takes care of this problem and removes the warning as well. + TestGraph graph({-1}, {}, {1}); + SetGraph(&graph); + Execute(0, 10); +} + TEST_F(ArenaPlannerTest, GraphWithNoOps) { TestGraph graph({0, 10}, {}, {5, 11}); SetGraph(&graph);