Merge pull request from linux-on-ibm-z:tflite-verifier-fix

PiperOrigin-RevId: 346482388
Change-Id: Id3faba8f67a3e4807ebdcef0e31f332708ab4cc5
This commit is contained in:
TensorFlower Gardener 2020-12-08 22:52:39 -08:00
commit 7544a9d00e
4 changed files with 54 additions and 7 deletions

View File

@ -2056,7 +2056,13 @@ TEST(NNAPIDelegate, LSHProjectionDense1DInputs) {
m.Invoke();
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// Hash returns differently on machines with different endianness
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 1, 1, 1, 0));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 0, 1, 0, 0));
#endif
}
TEST(NNAPIDelegate, LSHProjectionSparse1DInputs) {
@ -2067,7 +2073,13 @@ TEST(NNAPIDelegate, LSHProjectionSparse1DInputs) {
m.Invoke();
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// Hash returns differently on machines with different endianness
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 1, 8 + 0));
#endif
}
TEST(NNAPIDelegate, LSHProjectionSparse3DInputs) {
@ -2080,7 +2092,13 @@ TEST(NNAPIDelegate, LSHProjectionSparse3DInputs) {
m.Invoke();
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// Hash returns differently on machines with different endianness
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 2, 4 + 1, 8 + 1));
#endif
}
class BaseActivationsOpModel : public SingleOpModelWithNNAPI {

View File

@ -37,7 +37,13 @@ constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* axis, TfLiteTensor* output) {
int axis_value = *GetTensorData<int>(axis);
int axis_value;
// Retrive all 8 bytes when axis type is kTfLiteInt64 to avoid data loss.
if (axis->type == kTfLiteInt64) {
axis_value = static_cast<int>(*GetTensorData<int64_t>(axis));
} else {
axis_value = *GetTensorData<int>(axis);
}
if (axis_value < 0) {
axis_value += NumDimensions(input);
}

View File

@ -87,7 +87,13 @@ TEST(LSHProjectionOpTest2, Dense1DInputs) {
m.Invoke();
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// Hash returns differently on machines with different endianness
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 1, 1, 1, 0));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 0, 1, 0, 0));
#endif
}
TEST(LSHProjectionOpTest2, Sparse1DInputs) {
@ -98,7 +104,13 @@ TEST(LSHProjectionOpTest2, Sparse1DInputs) {
m.Invoke();
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// Hash returns differently on machines with different endianness
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 1, 8 + 0));
#endif
}
TEST(LSHProjectionOpTest2, Sparse3DInputs) {
@ -111,7 +123,13 @@ TEST(LSHProjectionOpTest2, Sparse3DInputs) {
m.Invoke();
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// Hash returns differently on machines with different endianness
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 2, 4 + 1, 8 + 1));
#endif
}
} // namespace

View File

@ -50,8 +50,13 @@ void ReportError(ErrorReporter* error_reporter, const char* format, ...) {
}
}
// Returns the int32_t value pointed by ptr.
const uint32_t* GetIntPtr(const char* ptr) {
return reinterpret_cast<const uint32_t*>(ptr);
const uint32_t GetIntPtr(const char* ptr) {
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return flatbuffers::EndianScalar(*reinterpret_cast<const uint32_t*>(ptr));
#else
return *reinterpret_cast<const uint32_t*>(ptr);
#endif
}
// Verifies flatbuffer format of the model contents and returns the in-memory
@ -79,7 +84,7 @@ bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
}
const char* buffer_ptr = reinterpret_cast<const char*>(buffer.data()->data());
uint32_t num_strings = *GetIntPtr(buffer_ptr);
uint32_t num_strings = GetIntPtr(buffer_ptr);
if (num_strings > kMaxNumString) {
ReportError(error_reporter,
"String tensor %s has invalid num of string set: %d",
@ -100,7 +105,7 @@ bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
uint32_t prev_ptr = header_offsets;
uint32_t offset = sizeof(int32_t);
if (*GetIntPtr(buffer_ptr + offset) != header_offsets) {
if (GetIntPtr(buffer_ptr + offset) != header_offsets) {
ReportError(error_reporter,
"String tensor %s buffer initial offset must be: %d",
NameOrEmptyString(tensor.name()), header_offsets);
@ -108,7 +113,7 @@ bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
}
offset += sizeof(int32_t);
for (int i = 1, end = num_strings; i <= end; i++, offset += sizeof(int32_t)) {
int string_offset = *GetIntPtr(buffer_ptr + offset);
int string_offset = GetIntPtr(buffer_ptr + offset);
if (string_offset < static_cast<int>(prev_ptr) ||
string_offset > static_cast<int>(buffer_size)) {
ReportError(error_reporter,
@ -117,7 +122,7 @@ bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
return false;
}
}
if (*GetIntPtr(buffer_ptr + offset - sizeof(int32_t)) != buffer_size) {
if (GetIntPtr(buffer_ptr + offset - sizeof(int32_t)) != buffer_size) {
ReportError(error_reporter,
"String tensor %s buffer last offset must be %d",
NameOrEmptyString(tensor.name()), buffer_size);