TFLite GPU Delegate: Add error message printing when test is failed.

PiperOrigin-RevId: 253269168
This commit is contained in:
A. Unique TensorFlower 2019-06-14 11:52:09 -07:00 committed by TensorFlower Gardener
parent 5f2291877d
commit 3f2192fb1a
21 changed files with 94 additions and 80 deletions

View File

@ -571,6 +571,8 @@ cc_library(
"//tensorflow/lite/delegates/gpu/gl:object_manager",
"//tensorflow/lite/delegates/gpu/gl:runtime_options",
"//tensorflow/lite/delegates/gpu/gl/workgroups:default_calculator",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)

View File

@ -49,7 +49,7 @@ TEST(AddTest, TwoInputTensorsOfTheSameShape) {
{augend, addend}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8}));
ASSERT_TRUE(model.PopulateTensor(1, {0.1, 0.2, 0.3, 0.5}));
ASSERT_TRUE(model.Invoke(*NewAddNodeShader()));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.9, 0.4, 1.0, 1.3}));
}
@ -69,7 +69,7 @@ TEST(AddTest, InputTensorAndScalar) {
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}));
ASSERT_TRUE(model.Invoke(*NewAddNodeShader()));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.9, 0.3, 0.8, 0.9, 1.2, 2.1}));
}
@ -97,7 +97,7 @@ TEST(AddTest, InputTensorWithConstandBroadcast) {
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_TRUE(model.Invoke(*NewAddNodeShader()));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{11.0, 22.0, 13.0, 24.0, 15.0, 26.0, 17.0, 28.0}));
@ -126,7 +126,7 @@ TEST(AddTest, InputTensorWithRuntimeBroadcast) {
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_TRUE(model.PopulateTensor(1, {10.0, 20.0}));
ASSERT_TRUE(model.Invoke(*NewAddNodeShader()));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{11.0, 22.0, 13.0, 24.0, 15.0, 26.0, 17.0, 28.0}));

View File

@ -51,7 +51,7 @@ TEST(ConcatTest, TwoInputTensorsByUnalignedChannel) {
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 3, 5, 7}));
ASSERT_TRUE(model.PopulateTensor(1, {2, 4, 6, 8}));
ASSERT_TRUE(model.Invoke(*NewConcatNodeShader()));
ASSERT_OK(model.Invoke(*NewConcatNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 2, 3, 4, 5, 6, 7, 8}));
}
@ -77,7 +77,7 @@ TEST(ConcatTest, TwoInputTensorsByAlignedChannel) {
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {5, 6, 7, 8}));
ASSERT_TRUE(model.Invoke(*NewAlignedConcatNodeShader()));
ASSERT_OK(model.Invoke(*NewAlignedConcatNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 2, 3, 4, 5, 6, 7, 8}));
}
@ -103,7 +103,7 @@ TEST(ConcatTest, TwoInputTensorsByHeight) {
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2}));
ASSERT_TRUE(model.PopulateTensor(1, {3, 4, 5, 6}));
ASSERT_TRUE(model.Invoke(*NewFlatConcatNodeShader()));
ASSERT_OK(model.Invoke(*NewFlatConcatNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 2, 3, 4, 5, 6}));
}
@ -129,7 +129,7 @@ TEST(ConcatTest, TwoInputTensorsByWidth) {
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {2, 3, 5, 6}));
ASSERT_TRUE(model.Invoke(*NewFlatConcatNodeShader()));
ASSERT_OK(model.Invoke(*NewFlatConcatNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 2, 3, 4, 5, 6}));
}

View File

@ -63,7 +63,7 @@ TEST(ConvTest, O2H2W1I1Stride1x1Dilation1x1) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolutionNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {4, 8, 4, 8, 2, 4, 2, 4}));
}
@ -101,7 +101,7 @@ TEST(ConvTest, O1H2W2I1Stride1x1Dilation2x2) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1, 1, 1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolutionNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {10}));
}
@ -138,7 +138,7 @@ TEST(ConvTest, O1H3W3I1Stride1x1Dilation1x1) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolutionNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {11}));
}
@ -175,7 +175,7 @@ TEST(ConvTest, O2H1W1I2Stride1x1Dilation1x1) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolution1x1NodeShader()));
ASSERT_OK(model.Invoke(*NewConvolution1x1NodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {4, 8, 4, 8}));
}
@ -213,7 +213,7 @@ TEST(ConvTest, O1H1W1I1Stride2x2Dilation1x1) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 2, 0, 0, 0, 4, 0, 8}));
ASSERT_TRUE(model.Invoke(*NewConvolutionNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 8, 16}));
}

View File

@ -64,7 +64,7 @@ TEST(DepthwiseConvTest, O4H1W1I2Strides1x1Dilation1x1) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 3}));
ASSERT_TRUE(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
ASSERT_OK(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 12, 16}));
}
@ -102,7 +102,7 @@ TEST(DepthwiseConvTest, O2H1W1I1Strides2x2Dilation1x1) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 1, 1, 0, 1, 1, 0, 1}));
ASSERT_TRUE(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
ASSERT_OK(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 3, 1, 3, 1, 3, 1, 3}));
}
@ -141,7 +141,7 @@ TEST(DepthwiseConvTest, O2H2W2I1Strides1x1Dilation2x2) {
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 1, 1, 0, 1, 1, 0, 1}));
ASSERT_TRUE(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
ASSERT_OK(model.Invoke(*NewDepthwiseConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {10, 26}));
}

View File

@ -47,7 +47,7 @@ TEST_F(ElementwiseOneArgumentTest, Abs) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 6.2, 2.0, 4.0}));
}
@ -57,7 +57,7 @@ TEST_F(ElementwiseOneArgumentTest, Sin) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 3.1415926, -3.1415926, 1.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.0, 0.0, 0.841471}));
}
@ -67,7 +67,7 @@ TEST_F(ElementwiseOneArgumentTest, Cos) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 3.1415926, -3.1415926, 1}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, -1.0, -1.0, 0.540302}));
}
@ -77,7 +77,7 @@ TEST_F(ElementwiseOneArgumentTest, Log) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 3.1415926, 1.0, 1.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.14473, 0.0, 0.0}));
}
@ -87,7 +87,7 @@ TEST_F(ElementwiseOneArgumentTest, Sqrt) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.0, 1.414213, 2.0}));
}
@ -97,7 +97,7 @@ TEST_F(ElementwiseOneArgumentTest, Rsqrt) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 4.0, 9.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 0.707106, 0.5, 0.333333}));
}
@ -107,7 +107,7 @@ TEST_F(ElementwiseOneArgumentTest, Square) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 0.5, -3.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 4.0, 0.25, 9.0}));
}
@ -117,7 +117,7 @@ TEST_F(ElementwiseOneArgumentTest, Sigmoid) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.0, 2.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.5, 0.002473, 0.880797, 0.982014}));
}
@ -127,7 +127,7 @@ TEST_F(ElementwiseOneArgumentTest, Tanh) {
SingleOpModel model({ToString(op_type), {}}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.0, 2.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, -0.999987, 0.964027, 0.999329}));
}
@ -152,7 +152,7 @@ TEST_F(ElementwiseTwoArgumentsTest, Sub) {
{GetTensorRef(0), GetTensorRef(1)}, {GetTensorRef(2)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, 3.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.0, -8.2, -1.0, 0.0}));
}
@ -163,7 +163,7 @@ TEST_F(ElementwiseTwoArgumentsTest, Div) {
{GetTensorRef(0), GetTensorRef(1)}, {GetTensorRef(2)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, -0.5, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, -3.1, -4.0, 1.0}));
}
@ -174,7 +174,7 @@ TEST_F(ElementwiseTwoArgumentsTest, Pow) {
{GetTensorRef(0), GetTensorRef(1)}, {GetTensorRef(2)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 2.0, 3.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 1.0, 8.0, 256.0}));
}
@ -185,7 +185,7 @@ TEST_F(ElementwiseTwoArgumentsTest, SquaredDiff) {
{GetTensorRef(0), GetTensorRef(1)}, {GetTensorRef(2)});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, 2.0, 2.0, 4.0}));
ASSERT_TRUE(model.PopulateTensor(1, {1.0, 1.0, 5.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewElementwiseNodeShader(op_type)));
ASSERT_OK(model.Invoke(*NewElementwiseNodeShader(op_type)));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 1.0, 9.0, 0.0}));
}

View File

@ -58,7 +58,7 @@ TEST(FullyConnectedTest, MatrixByVectorMultiplication) {
SingleOpModel model({ToString(OperationType::FULLY_CONNECTED), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2}));
ASSERT_TRUE(model.Invoke(*NewFullyConnectedNodeShader()));
ASSERT_OK(model.Invoke(*NewFullyConnectedNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {6, 13, 20, 27}));
}

View File

@ -58,7 +58,7 @@ TEST(LstmTest, Input2x2x1) {
{input, prev_state}, {output_state, output_activation});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {5, 6, 7, 8}));
ASSERT_TRUE(model.Invoke(*NewLstmNodeShader()));
ASSERT_OK(model.Invoke(*NewLstmNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2.5, 3.0, 3.5, 4.0}));
EXPECT_THAT(

View File

@ -56,7 +56,7 @@ TEST(MaxUnpoolingTest, Kernel2x2Stride2x2) {
{input, indices}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {0, 0, 0, 0}));
ASSERT_TRUE(model.Invoke(*NewMaxUnpoolingNodeShader()));
ASSERT_OK(model.Invoke(*NewMaxUnpoolingNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{1, 0, 2, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0}));

View File

@ -46,7 +46,7 @@ TEST(MulTest, Scalar) {
SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewMultiplyScalarNodeShader()));
ASSERT_OK(model.Invoke(*NewMultiplyScalarNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 6, 8}));
}
@ -70,7 +70,7 @@ TEST(MulTest, Linear) {
SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewMultiplyScalarNodeShader()));
ASSERT_OK(model.Invoke(*NewMultiplyScalarNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 6, 6, 12}));
}
@ -94,7 +94,7 @@ TEST(ApplyMaskTest, MaskChannel1) {
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {2, 3}));
ASSERT_TRUE(model.Invoke(*NewApplyMaskNodeShader()));
ASSERT_OK(model.Invoke(*NewApplyMaskNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 9, 12}));
}
@ -118,7 +118,7 @@ TEST(ApplyMaskTest, MaskChannelEqualsToInputChannel) {
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.PopulateTensor(1, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewApplyMaskNodeShader()));
ASSERT_OK(model.Invoke(*NewApplyMaskNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 4, 9, 16}));
}

View File

@ -51,7 +51,7 @@ void TestPadOperation(const HWC& prepend, const HWC& append,
SingleOpModel model({ToString(OperationType::PAD), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0}));
ASSERT_TRUE(model.Invoke(*NewPadNodeShader()));
ASSERT_OK(model.Invoke(*NewPadNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), expected));
}

View File

@ -62,7 +62,7 @@ TEST(PoolingTest, MaxKernel2x2Stride2x2WithIndices) {
{input}, {output, indices});
ASSERT_TRUE(model.PopulateTensor(
0, {1, 2, 1, 2, 3, 4, 3, 4, 7, 8, 7, 8, 5, 6, 5, 6}));
ASSERT_TRUE(model.Invoke(*NewPoolingNodeShader()));
ASSERT_OK(model.Invoke(*NewPoolingNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {4, 4, 8, 8}));
// Indices tensor is a vector<float>, but these float values should be treated
// as integers, that's why special matcher IntNear() is used.
@ -91,7 +91,7 @@ TEST(PoolingTest, MaxKernel2x2Stride2x2WithoutIndices) {
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(
0, {1, 2, 1, 2, 3, 4, 3, 4, 7, 8, 7, 8, 5, 6, 5, 6}));
ASSERT_TRUE(model.Invoke(*NewPoolingNodeShader()));
ASSERT_OK(model.Invoke(*NewPoolingNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {4, 4, 8, 8}));
}
@ -117,7 +117,7 @@ TEST(PoolingTest, AverageKernel2x2Stride2x2) {
{output});
ASSERT_TRUE(model.PopulateTensor(
0, {1, 1, 2, 2, 1, 1, 2, 2, 3, 3, 4, 4, 3, 3, 4, 4}));
ASSERT_TRUE(model.Invoke(*NewPoolingNodeShader()));
ASSERT_OK(model.Invoke(*NewPoolingNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 2, 3, 4}));
}

View File

@ -50,7 +50,7 @@ TEST(PReluTest, LinearAlphaNoClip) {
SingleOpModel model({ToString(OperationType::PRELU), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {-1.0, -2.0, 1.0, 2.0}));
ASSERT_TRUE(model.Invoke(*NewPReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-2, -4, 1, 2}));
}
@ -76,7 +76,7 @@ TEST(PReluTest, LinearAlphaWithClip) {
SingleOpModel model({ToString(OperationType::PRELU), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {-1.0, -2.0, 1.0, 2.0}));
ASSERT_TRUE(model.Invoke(*NewPReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-2, -4, 1, 1}));
}
@ -102,7 +102,7 @@ TEST(PReluTest, 3DAlphaNoClip) {
SingleOpModel model({ToString(op_type), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -1.0, 2.0, -3.0}));
ASSERT_TRUE(model.Invoke(*NewPReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0, -2, 2, -6}));
}
@ -128,7 +128,7 @@ TEST(PReluTest, 3DAlphaWithClip) {
SingleOpModel model({ToString(op_type), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -1.0, 2.0, -3.0}));
ASSERT_TRUE(model.Invoke(*NewPReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0, -2, 1, -6}));
}

View File

@ -50,7 +50,7 @@ TEST_F(ReluTest, Smoke) {
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_TRUE(model.Invoke(*NewReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.0, 2.0, 8.0}));
}
@ -63,7 +63,7 @@ TEST_F(ReluTest, ClipOnly) {
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_TRUE(model.Invoke(*NewReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.0, 2.0, 6.0}));
}
@ -76,7 +76,7 @@ TEST_F(ReluTest, AlphaOnly) {
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_TRUE(model.Invoke(*NewReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 8.0}));
}
@ -89,7 +89,7 @@ TEST_F(ReluTest, ClipAndAlpha) {
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_TRUE(model.Invoke(*NewReLUNodeShader()));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 6.0}));
}

View File

@ -47,7 +47,7 @@ TEST(Reshape, 1x2x3To3x2x1) {
SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4, 5, 6}));
ASSERT_TRUE(model.Invoke(*NewReshapeNodeShader()));
ASSERT_OK(model.Invoke(*NewReshapeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 2, 3, 4, 5, 6}));
}
@ -69,7 +69,7 @@ TEST(Reshape, 3x1x2To2x1x3) {
SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4, 5, 6}));
ASSERT_TRUE(model.Invoke(*NewReshapeNodeShader()));
ASSERT_OK(model.Invoke(*NewReshapeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1, 2, 3, 4, 5, 6}));
}
@ -91,7 +91,7 @@ TEST(Reshape, 1x1x4To2x2x1) {
SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewReshapeNodeShader()));
ASSERT_OK(model.Invoke(*NewReshapeNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 2, 3, 4}));
}
@ -112,7 +112,9 @@ TEST(Reshape, BatchIsUnsupported) {
SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
EXPECT_FALSE(model.Invoke(*NewReshapeNodeShader()));
ASSERT_THAT(
model.Invoke(*NewReshapeNodeShader()).message(),
testing::HasSubstr("Only identical batch dimension is supported"));
}
} // namespace

View File

@ -49,7 +49,7 @@ TEST(SliceTest, Identity) {
SingleOpModel model({ToString(OperationType::SLICE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewSliceNodeShader()));
ASSERT_OK(model.Invoke(*NewSliceNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 2, 3, 4}));
}
@ -72,7 +72,7 @@ TEST(SliceTest, NegativeEnds) {
SingleOpModel model({ToString(OperationType::SLICE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewSliceNodeShader()));
ASSERT_OK(model.Invoke(*NewSliceNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 2, 3, 4}));
}
@ -95,7 +95,7 @@ TEST(SliceTest, NegativeEndsNonZeroStarts) {
SingleOpModel model({ToString(OperationType::SLICE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewSliceNodeShader()));
ASSERT_OK(model.Invoke(*NewSliceNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {3}));
}
@ -118,7 +118,7 @@ TEST(SliceTest, StridesByHeight) {
SingleOpModel model({ToString(OperationType::SLICE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewSliceNodeShader()));
ASSERT_OK(model.Invoke(*NewSliceNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 3}));
}
@ -141,7 +141,7 @@ TEST(SliceTest, StridesByWidth) {
SingleOpModel model({ToString(OperationType::SLICE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewSliceNodeShader()));
ASSERT_OK(model.Invoke(*NewSliceNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4}));
}
@ -164,7 +164,7 @@ TEST(SliceTest, StridesByChannels) {
SingleOpModel model({ToString(OperationType::SLICE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
ASSERT_TRUE(model.Invoke(*NewSliceNodeShader()));
ASSERT_OK(model.Invoke(*NewSliceNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {3}));
}

View File

@ -48,7 +48,7 @@ TEST(SoftmaxTest, WorksForChannelsAxis) {
SingleOpModel model({ToString(OperationType::SOFT_MAX), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.1, 0.2}));
ASSERT_TRUE(model.Invoke(*NewSoftMaxNodeShader()));
ASSERT_OK(model.Invoke(*NewSoftMaxNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 1, 1, 1}));
}
@ -69,7 +69,9 @@ TEST(SoftmaxTest, DoesNotWorkForHeightAxis) {
SingleOpModel model({ToString(OperationType::SOFT_MAX), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
EXPECT_FALSE(model.Invoke(*NewSoftMaxNodeShader()));
ASSERT_THAT(
model.Invoke(*NewSoftMaxNodeShader()).message(),
testing::HasSubstr("Softmax is only supported for channels axis."));
}
TEST(SoftmaxTest, DoesNotWorkForWidthAxis) {
@ -89,7 +91,9 @@ TEST(SoftmaxTest, DoesNotWorkForWidthAxis) {
SingleOpModel model({ToString(OperationType::SOFT_MAX), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4}));
EXPECT_FALSE(model.Invoke(*NewSoftMaxNodeShader()));
ASSERT_THAT(
model.Invoke(*NewSoftMaxNodeShader()).message(),
testing::HasSubstr("Softmax is only supported for channels axis."));
}
} // namespace

View File

@ -68,9 +68,9 @@ bool SingleOpModel::PopulateTensor(int index, std::vector<float>&& data) {
return true;
}
Status SingleOpModel::InvokeInternal(const CompilationOptions& compile_options,
const RuntimeOptions& runtime_options,
const NodeShader& shader) {
Status SingleOpModel::Invoke(const CompilationOptions& compile_options,
const RuntimeOptions& runtime_options,
const NodeShader& shader) {
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
@ -128,6 +128,10 @@ Status SingleOpModel::InvokeInternal(const CompilationOptions& compile_options,
return OkStatus();
}
Status SingleOpModel::Invoke(const NodeShader& shader) {
return Invoke(CompilationOptions(), RuntimeOptions(), shader);
}
} // namespace gl
} // namespace gpu
} // namespace tflite

View File

@ -19,6 +19,8 @@ limitations under the License.
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
@ -27,6 +29,10 @@ limitations under the License.
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/runtime_options.h"
#ifndef ASSERT_OK
#define ASSERT_OK(x) ASSERT_THAT(x.message(), testing::StrEq(""));
#endif
namespace tflite {
namespace gpu {
namespace gl {
@ -42,9 +48,10 @@ class SingleOpModel {
bool PopulateTensor(int index, std::vector<float>&& data);
bool Invoke(const NodeShader& shader) {
return InvokeInternal(CompilationOptions(), RuntimeOptions(), shader).ok();
}
Status Invoke(const NodeShader& shader);
Status Invoke(const CompilationOptions& compile_options,
const RuntimeOptions& runtime_options,
const NodeShader& shader);
const std::vector<float>& GetOutput(int index) const {
return outputs_[index].data;
@ -54,11 +61,6 @@ class SingleOpModel {
GraphFloat32 graph_;
std::vector<TensorFloat32> inputs_;
std::vector<TensorFloat32> outputs_;
private:
Status InvokeInternal(const CompilationOptions& compile_options,
const RuntimeOptions& runtime_options,
const NodeShader& shader);
};
} // namespace gl

View File

@ -63,7 +63,7 @@ TEST(TransposeConvTest, O2H2W1I1Stride1x1DAdjacent1x1) {
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolutionTransposedNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2, 4, 2, 4, 4, 8, 4, 8}));
}
@ -101,7 +101,7 @@ TEST(TransposeConvTest, O1H2W2I1Stride1x1Adjacent2x2) {
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1, 1, 1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolutionTransposedNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1}));
}
@ -138,7 +138,7 @@ TEST(TransposeConvTest, O1H3W3I1Stride1x1Adjacent1x1) {
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolutionTransposedNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {7}));
}
@ -175,7 +175,7 @@ TEST(TransposeConvTest, O2H1W1I2Stride1x1Dilation1x1) {
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_TRUE(model.Invoke(*NewConvolutionTransposedNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {4, 8, 4, 8}));
}
@ -213,7 +213,7 @@ TEST(TransposeConvTest, O1H1W1I1Stride2x2Dilation1x1) {
{ToString(OperationType::CONVOLUTION_TRANSPOSED), std::move(attr)},
{input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 2, 0, 0, 0, 4, 0, 8}));
ASSERT_TRUE(model.Invoke(*NewConvolutionTransposedNodeShader()));
ASSERT_OK(model.Invoke(*NewConvolutionTransposedNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2}));
}

View File

@ -49,7 +49,7 @@ TEST(UpsamplingBilinearTest, 1x1x2To2x2x2) {
SingleOpModel model({ToString(OperationType::UPSAMPLE_2D), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0}));
ASSERT_TRUE(model.Invoke(*NewUpsamplingNodeShader()));
ASSERT_OK(model.Invoke(*NewUpsamplingNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0}));
@ -74,7 +74,7 @@ TEST(UpsamplingBilinearTest, 1x2x1To1x4x1) {
SingleOpModel model({ToString(OperationType::UPSAMPLE_2D), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 4.0}));
ASSERT_TRUE(model.Invoke(*NewUpsamplingNodeShader()));
ASSERT_OK(model.Invoke(*NewUpsamplingNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.5, 4.0, 4.0}));
}
@ -98,7 +98,7 @@ TEST(UpsamplingBilinearTest, 2x2x1To4x4x1) {
SingleOpModel model({ToString(OperationType::UPSAMPLE_2D), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 4.0, 6.0, 8.0}));
ASSERT_TRUE(model.Invoke(*NewUpsamplingNodeShader()));
ASSERT_OK(model.Invoke(*NewUpsamplingNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.5, 4.0, 4.0, 3.5, 4.75, 6.0, 6.0, 6.0,