diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc b/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc index efaf39390d9..e59343df7b6 100644 --- a/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc +++ b/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc @@ -15,10 +15,9 @@ limitations under the License. #include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h" -#include -#include -#include +#include #include +#include #include #include "absl/memory/memory.h" @@ -33,6 +32,13 @@ namespace gpu { namespace gl { namespace { +float4 GetMask(int num_channels) { + float4 mask(0.0f); + const int remainder = num_channels % 4 == 0 ? 4 : num_channels % 4; + for (int i = 0; i < remainder; ++i) mask[i] = 1.0f; + return mask; +} + class Softmax : public NodeShader { public: Status GenerateCode(const GenerationContext& ctx, @@ -42,37 +48,117 @@ class Softmax : public NodeShader { const auto& attr = absl::any_cast( ctx.node->operation.attributes); if (input->tensor.shape != output->tensor.shape) { - return InvalidArgumentError("Input and output shape does not match"); + return InvalidArgumentError("Input and output shapes do not match."); } if (attr.axis != Axis::CHANNELS) { return UnimplementedError("Softmax is only supported for channels axis."); } + return input->tensor.shape.h == 1 && input->tensor.shape.w == 1 + ? GenerateCodeFor1x1(ctx, generated_code) + : GenerateCodeGeneral(ctx, generated_code); + } - float4 mask(0.0f); - const int channels = output->tensor.shape.c; - const int reminder = (channels % 4 == 0) ? 4 : channels % 4; - for (int i = 0; i < reminder; ++i) { - mask[i] = 1.0f; + private: + Status GenerateCodeFor1x1(const GenerationContext& ctx, + GeneratedCode* generated_code) const { + const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0]; + const int depth = IntegralDivideRoundUp(output->tensor.shape.c, 4); + std::vector shared_variables = { + {"partial_sum", std::vector(8)}, + }; + std::vector uniform_parameters = { + {"depth", depth}, + {"depth_div_32", IntegralDivideRoundUp(depth, 32)}, + {"mask", GetMask(output->tensor.shape.c)}, + }; + std::string source_code = R"( + highp vec4 kOnes = vec4(1.0); + highp float sum = 0.0; + int offset = 0; + int s = 0; + int tid = int(gl_LocalInvocationID.x); + do { + int z = offset + tid; + if (z < $depth$) { + highp vec4 mask_temp = z == $depth$ - 1 ? $mask$ : kOnes; + highp vec4 src = $input_data_0[0, 0, z]$; + sum += dot(mask_temp, exp(src)); + offset += 32; } + s++; + } while (s < $depth_div_32$); + + partial_sum[tid / 4][tid % 4] = sum; + + memoryBarrierShared(); + barrier(); + + if (tid == 0) { + sum = dot(kOnes, partial_sum[0]); + sum += dot(kOnes, partial_sum[1]); + sum += dot(kOnes, partial_sum[2]); + sum += dot(kOnes, partial_sum[3]); + sum += dot(kOnes, partial_sum[4]); + sum += dot(kOnes, partial_sum[5]); + sum += dot(kOnes, partial_sum[6]); + sum += dot(kOnes, partial_sum[7]); + partial_sum[0][0] = 1.0 / sum; + } + + memoryBarrierShared(); + barrier(); + + sum = partial_sum[0][0]; + + offset = 0; + s = 0; + do { + int z = offset + tid; + if (z < $depth$) { + highp vec4 src = $input_data_0[0, 0, z]$; + highp vec4 temp = exp(src) * sum; + $output_data_0[0, 0, z]$ = temp; + offset += 32; + } + s++; + } while (s < $depth_div_32$); +)"; + *generated_code = { + /*parameters=*/std::move(uniform_parameters), + /*objects=*/{}, + /*shared_variables=*/std::move(shared_variables), + /*workload=*/uint3(32, 1, 1), + /*workgroup=*/uint3(32, 1, 1), + /*source_code=*/std::move(source_code), + /*input=*/IOStructure::ONLY_DEFINITIONS, + /*output=*/IOStructure::ONLY_DEFINITIONS, + }; + return OkStatus(); + } + + Status GenerateCodeGeneral(const GenerationContext& ctx, + GeneratedCode* generated_code) const { + const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0]; std::vector parameters = { {"src_depth", IntegralDivideRoundUp(output->tensor.shape.c, 4)}, - {"mask", mask}, + {"mask", GetMask(output->tensor.shape.c)}, }; - std::string source = R"( + std::string source_code = R"( + highp vec4 kOnes = vec4(1.0); highp float sum = 0.0; for (int d = 0; d < $src_depth$ - 1; ++d) { - highp vec4 v = $input_data_0[gid.x, gid.y, d]$; - sum += dot(vec4(1.0), exp(v)); + highp vec4 src = $input_data_0[gid.x, gid.y, d]$; + sum += dot(kOnes, exp(src)); } { int d = $src_depth$ - 1; - highp vec4 v = $input_data_0[gid.x, gid.y, d]$; - sum += dot($mask$, exp(v)); + highp vec4 src = $input_data_0[gid.x, gid.y, d]$; + sum += dot($mask$, exp(src)); } for (int d = 0; d < $src_depth$; ++d) { - highp vec4 v = $input_data_0[gid.x, gid.y, d]$; - vec4 temp_sum = exp(v) / sum; + highp vec4 src = $input_data_0[gid.x, gid.y, d]$; + highp vec4 temp_sum = exp(src) / sum; $output_data_0[gid.x, gid.y, d] = temp_sum$; } )"; @@ -82,7 +168,7 @@ class Softmax : public NodeShader { /*shared_variables=*/{}, /*workload=*/uint3(output->tensor.shape.w, output->tensor.shape.h, 1), /*workgroup=*/uint3(), - /*source_code=*/std::move(source), + /*source_code=*/std::move(source_code), /*input=*/IOStructure::ONLY_DEFINITIONS, /*output=*/IOStructure::ONLY_DEFINITIONS, }; diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/softmax_test.cc b/tensorflow/lite/delegates/gpu/gl/kernels/softmax_test.cc index 2e031c6db68..1707e1efb8f 100644 --- a/tensorflow/lite/delegates/gpu/gl/kernels/softmax_test.cc +++ b/tensorflow/lite/delegates/gpu/gl/kernels/softmax_test.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h" +#include #include #include @@ -47,9 +48,10 @@ TEST(SoftmaxTest, Softmax) { SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); - ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.1, 0.2})); + ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader())); - EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 1, 1, 1})); + EXPECT_THAT(model.GetOutput(0), + Pointwise(FloatNear(1e-6f), {1.0f, 1.0f, 1.0f, 1.0f})); } TEST(SoftmaxTest, DoesNotWorkForHeightAxis) { @@ -68,7 +70,7 @@ TEST(SoftmaxTest, DoesNotWorkForHeightAxis) { SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); - ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4})); + ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok()); } @@ -88,7 +90,7 @@ TEST(SoftmaxTest, DoesNotWorkForWidthAxis) { SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); - ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4})); + ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok()); } @@ -106,17 +108,17 @@ TEST(SoftmaxTest, Softmax1x1) { SoftmaxAttributes attr; attr.axis = Axis::CHANNELS; - const double sum = - std::exp(0.1) + std::exp(0.2) + std::exp(0.3) + std::exp(0.4); + const float sum = + std::exp(0.1f) + std::exp(0.2f) + std::exp(0.3f) + std::exp(0.4f); SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); - ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4})); + ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader())); - EXPECT_THAT( - model.GetOutput(0), - Pointwise(FloatNear(1e-6), {std::exp(0.1) / sum, std::exp(0.2) / sum, - std::exp(0.3) / sum, std::exp(0.4) / sum})); + EXPECT_THAT(model.GetOutput(0), + Pointwise(FloatNear(1e-6f), + {std::exp(0.1f) / sum, std::exp(0.2f) / sum, + std::exp(0.3f) / sum, std::exp(0.4f) / sum})); } } // namespace