Adds a delegate util to create temporary tensors with same shape & different type. Also remove old function that is redundant after partition-preview API.

PiperOrigin-RevId: 300614528
Change-Id: I8a866aec877ebe61e02568a52bbae272709d8e2d
This commit is contained in:
Sachin Joglekar 2020-03-12 13:36:46 -07:00 committed by TensorFlower Gardener
parent 24e4a95157
commit a5f38a866e
3 changed files with 67 additions and 122 deletions
tensorflow/lite/delegates

View File

@ -15,54 +15,30 @@ limitations under the License.
#include "tensorflow/lite/delegates/utils.h"
#include <algorithm>
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace delegates {
TfLiteStatus PruneContinuousSubsets(TfLiteContext* context,
const int max_subsets,
std::vector<int>* indices) {
if (!indices) {
context->ReportError(context, "indices cannot be nullptr");
TfLiteStatus CreateNewTensorWithDifferentType(TfLiteContext* context,
const int original_tensor_index,
TfLiteType new_type,
TfLiteTensor** new_tensor,
int* new_tensor_index) {
const TfLiteTensor& original_tensor = context->tensors[original_tensor_index];
TF_LITE_ENSURE_STATUS(context->AddTensors(context, 1, new_tensor_index));
*new_tensor = &context->tensors[*new_tensor_index];
(*new_tensor)->type = new_type;
(*new_tensor)->allocation_type = kTfLiteArenaRw;
const auto* original_dims = original_tensor.dims;
TfLiteIntArray* dims = TfLiteIntArrayCreate(original_dims->size);
for (int i = 0; i < original_dims->size; ++i) {
dims->data[i] = original_dims->data[i];
}
if (context->ResizeTensor(context, *new_tensor, dims) != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "Could not resize new delegate tensor");
return kTfLiteError;
}
if (indices->empty() || indices->size() < max_subsets) return kTfLiteOk;
// Sort indices just in case.
std::sort(indices->begin(), indices->end());
// Build a vector of subsets.
std::vector<std::vector<int>> continuous_subsets;
int last_index = indices->at(0) - 2;
for (const auto idx : *indices) {
if (idx > last_index + 1) {
continuous_subsets.emplace_back();
}
continuous_subsets.back().push_back(idx);
last_index = idx;
}
// Nothing to be done if number of subsets is already less than max_subsets.
if (continuous_subsets.size() <= max_subsets) return kTfLiteOk;
// Sort the vector of subsets in descending order of length.
std::sort(continuous_subsets.begin(), continuous_subsets.end(),
[](const std::vector<int>& a, const std::vector<int>& b) {
return a.size() > b.size();
});
// Re-build indices vector from top subsets.
indices->clear();
for (int i = 0; i < max_subsets; ++i) {
indices->reserve(indices->size() + continuous_subsets[i].size());
indices->insert(indices->end(), continuous_subsets[i].begin(),
continuous_subsets[i].end());
}
std::sort(indices->begin(), indices->end());
return kTfLiteOk;
}

View File

@ -23,15 +23,13 @@ limitations under the License.
namespace tflite {
namespace delegates {
// Given a list(vector<int>) of indices, modifies it in-place to contain
// max_subsets number of continuous subsets. Subsets are selected in descending
// order of their length.
// Resulting vector contains sorted list of pruned indices.
//
// This util can be used by delegates to avoid accepting too many node-subsets.
TfLiteStatus PruneContinuousSubsets(TfLiteContext* context,
const int max_subsets,
std::vector<int>* indices);
// Creates a new Read/Write tensor having the same shape as the original, but
// with a different type.
TfLiteStatus CreateNewTensorWithDifferentType(TfLiteContext* context,
const int original_tensor_index,
TfLiteType new_type,
TfLiteTensor** new_tensor,
int* new_tensor_index);
} // namespace delegates
} // namespace tflite

View File

@ -24,81 +24,52 @@ namespace tflite {
namespace delegates {
namespace {
using ::testing::ElementsAreArray;
void ReportError(TfLiteContext* context, const char* format, ...) {}
TEST(UtilsTest, PruneContinuousSubsets_NoSubsets) {
TEST(UtilsTest, CreateNewTensorWithDifferentTypeTest) {
std::vector<TfLiteTensor> tensors(2);
// Data about original tensor.
// The same shape should be reflected in tensors[1] later.
tensors[0].dims = TfLiteIntArrayCreate(2);
tensors[0].dims->data[0] = 2;
tensors[0].dims->data[1] = 3;
tensors[0].type = kTfLiteFloat32;
// To simulate a valid TFLite Context.
TfLiteContext context;
context.ReportError = ReportError;
std::vector<int> original_indices = {};
context.AddTensors = [](struct TfLiteContext*, int tensors_to_add,
int* first_new_tensor_index) {
// The util should be adding exactly one tensor to the graph.
if (tensors_to_add != 1) {
return kTfLiteError;
}
// This ensures that the 'new tensor' is the second tensor in the vector
// above.
*first_new_tensor_index = 1;
return kTfLiteOk;
};
context.ResizeTensor = [](struct TfLiteContext*, TfLiteTensor* tensor,
TfLiteIntArray* new_size) {
// Ensure dimensions are the same as the original tensor.
if (new_size->size != 2 || new_size->data[0] != 2 || new_size->data[1] != 3)
return kTfLiteError;
tensor->dims = new_size;
return kTfLiteOk;
};
context.tensors = tensors.data();
ASSERT_EQ(PruneContinuousSubsets(&context, 5, nullptr), kTfLiteError);
TfLiteTensor* new_tensor = nullptr;
int new_tensor_index = -1;
EXPECT_EQ(CreateNewTensorWithDifferentType(
&context, /**original_tensor_index**/ 0,
/**new_type**/ kTfLiteUInt8, &new_tensor, &new_tensor_index),
kTfLiteOk);
EXPECT_EQ(new_tensor_index, 1);
EXPECT_NE(new_tensor, nullptr);
EXPECT_NE(new_tensor->dims, nullptr);
EXPECT_EQ(new_tensor->type, kTfLiteUInt8);
EXPECT_EQ(new_tensor->allocation_type, kTfLiteArenaRw);
ASSERT_EQ(PruneContinuousSubsets(&context, 0, &original_indices), kTfLiteOk);
ASSERT_TRUE(original_indices.empty());
ASSERT_EQ(PruneContinuousSubsets(&context, 2, &original_indices), kTfLiteOk);
ASSERT_TRUE(original_indices.empty());
}
TEST(UtilsTest, PruneContinuousSubsets_SingleSubset) {
TfLiteContext context;
std::vector<int> original_indices = {0, 1, 2, 3};
std::vector<int> indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 1, &indices), kTfLiteOk);
EXPECT_THAT(indices, ElementsAreArray({0, 1, 2, 3}));
indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 0, &indices), kTfLiteOk);
ASSERT_TRUE(indices.empty());
indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 2, &indices), kTfLiteOk);
EXPECT_THAT(indices, ElementsAreArray({0, 1, 2, 3}));
}
TEST(UtilsTest, PruneContinuousSubsets_MultipleSubsets) {
TfLiteContext context;
// 5 subsets: (0, 1), (3, 4, 5), (7), (10, 11), (19).
std::vector<int> original_indices = {0, 1, 3, 4, 5, 7, 10, 11, 19};
std::vector<int> indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 4, &indices), kTfLiteOk);
EXPECT_THAT(indices, ElementsAreArray({0, 1, 3, 4, 5, 7, 10, 11}));
// Only the longest subset is selected.
indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 1, &indices), kTfLiteOk);
EXPECT_THAT(indices, ElementsAreArray({3, 4, 5}));
indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 0, &indices), kTfLiteOk);
ASSERT_TRUE(indices.empty());
indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 1000, &indices), kTfLiteOk);
EXPECT_THAT(indices, ElementsAreArray({0, 1, 3, 4, 5, 7, 10, 11, 19}));
}
TEST(UtilsTest, PruneContinuousSubsets_UnsortedIndices) {
TfLiteContext context;
// 5 subsets: (0, 1), (3, 4, 5), (7), (10, 11), (19).
std::vector<int> original_indices = {5, 7, 4, 10, 11, 19, 0, 1, 3};
std::vector<int> indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 4, &indices), kTfLiteOk);
EXPECT_THAT(indices, ElementsAreArray({0, 1, 3, 4, 5, 7, 10, 11}));
// Only the longest subset is selected.
indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 1, &indices), kTfLiteOk);
EXPECT_THAT(indices, ElementsAreArray({3, 4, 5}));
indices = original_indices;
ASSERT_EQ(PruneContinuousSubsets(&context, 0, &indices), kTfLiteOk);
ASSERT_TRUE(indices.empty());
// Cleanup.
TfLiteIntArrayFree(tensors[0].dims);
TfLiteIntArrayFree(tensors[1].dims);
}
} // namespace