TFLite GPU OpenGL: Add specialization of SOFTMAX 1x1.

About 15x speedup.

PiperOrigin-RevId: 262675867
This commit is contained in:
Juhyun Lee 2019-08-09 19:07:02 -07:00 committed by TensorFlower Gardener
parent 9e88516e6d
commit e6dc56ced2
3 changed files with 113 additions and 23 deletions
tensorflow/lite/delegates/gpu/gl

View File

@ -371,6 +371,11 @@ std::string VariableAccessor::GetConstDeclarations() const {
// with index.
std::string declarations;
for (const auto& variable : name_to_variable_) {
// Skip shared variables.
if (shared_variables_.find(variable.second.name) !=
shared_variables_.end()) {
continue;
}
const auto& value = variable.second.value;
if (IsVariableLength(value)) {
absl::StrAppend(&declarations, "const ", GetVariableType(value), " ",

View File

@ -15,10 +15,9 @@ limitations under the License.
#include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
@ -33,6 +32,13 @@ namespace gpu {
namespace gl {
namespace {
float4 GetMask(int num_channels) {
float4 mask(0.0f);
const int remainder = num_channels % 4 == 0 ? 4 : num_channels % 4;
for (int i = 0; i < remainder; ++i) mask[i] = 1.0f;
return mask;
}
class Softmax : public NodeShader {
public:
Status GenerateCode(const GenerationContext& ctx,
@ -42,24 +48,101 @@ class Softmax : public NodeShader {
const auto& attr = absl::any_cast<const SoftmaxAttributes&>(
ctx.node->operation.attributes);
if (input->tensor.shape != output->tensor.shape) {
return InvalidArgumentError("Input and output shape does not match");
return InvalidArgumentError("Input and output shapes do not match.");
}
if (attr.axis != Axis::CHANNELS) {
return UnimplementedError("Softmax is only supported for channels axis.");
}
return input->tensor.shape.h == 1 && input->tensor.shape.w == 1
? GenerateCodeFor1x1(ctx, generated_code)
: GenerateCodeGeneral(ctx, generated_code);
}
float4 mask(0.0f);
const int channels = output->tensor.shape.c;
const int reminder = (channels % 4 == 0) ? 4 : channels % 4;
for (int i = 0; i < reminder; ++i) {
mask[i] = 1.0f;
private:
Status GenerateCodeFor1x1(const GenerationContext& ctx,
GeneratedCode* generated_code) const {
const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0];
const int depth = IntegralDivideRoundUp(output->tensor.shape.c, 4);
std::vector<Variable> shared_variables = {
{"partial_sum", std::vector<float4>(8)},
};
std::vector<Variable> uniform_parameters = {
{"depth", depth},
{"depth_div_32", IntegralDivideRoundUp(depth, 32)},
{"mask", GetMask(output->tensor.shape.c)},
};
std::string source_code = R"(
highp float sum = 0.0f;
int offset = 0;
int s = 0;
int tid = int(gl_LocalInvocationID.x);
do {
int z = offset + tid;
if (z < $depth$) {
vec4 mask_temp = z == $depth$ - 1 ? $mask$ : vec4(1.0f);
vec4 src = $input_data_0[0, 0, z]$;
sum += dot(mask_temp, exp(src));
offset += 32;
}
s++;
} while (s < $depth_div_32$);
partial_sum[tid / 4][tid % 4] = sum;
memoryBarrierShared();
barrier();
if (tid == 0) {
sum = dot(vec4(1.0f), partial_sum[0]);
sum += dot(vec4(1.0f), partial_sum[1]);
sum += dot(vec4(1.0f), partial_sum[2]);
sum += dot(vec4(1.0f), partial_sum[3]);
sum += dot(vec4(1.0f), partial_sum[4]);
sum += dot(vec4(1.0f), partial_sum[5]);
sum += dot(vec4(1.0f), partial_sum[6]);
sum += dot(vec4(1.0f), partial_sum[7]);
partial_sum[0][0] = 1.0 / sum;
}
memoryBarrierShared();
barrier();
sum = partial_sum[0][0];
offset = 0;
s = 0;
do {
int z = offset + tid;
if (z < $depth$) {
vec4 temp = exp($input_data_0[0, 0, z]$) * sum;
$output_data_0[0, 0, z]$ = temp;
offset += 32;
}
s++;
} while (s < $depth_div_32$);
)";
*generated_code = {
/*parameters=*/std::move(uniform_parameters),
/*objects=*/{},
/*shared_variables=*/std::move(shared_variables),
/*workload=*/uint3(32, 1, 1),
/*workgroup=*/uint3(32, 1, 1),
/*source_code=*/std::move(source_code),
/*input=*/IOStructure::ONLY_DEFINITIONS,
/*output=*/IOStructure::ONLY_DEFINITIONS,
};
return OkStatus();
}
Status GenerateCodeGeneral(const GenerationContext& ctx,
GeneratedCode* generated_code) const {
const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0];
std::vector<Variable> parameters = {
{"src_depth", IntegralDivideRoundUp(output->tensor.shape.c, 4)},
{"mask", mask},
{"mask", GetMask(output->tensor.shape.c)},
};
std::string source = R"(
std::string source_code = R"(
highp float sum = 0.0;
for (int d = 0; d < $src_depth$ - 1; ++d) {
sum += dot(vec4(1.0), exp($input_data_0[gid.x, gid.y, d]$));
@ -79,7 +162,7 @@ class Softmax : public NodeShader {
/*shared_variables=*/{},
/*workload=*/uint3(output->tensor.shape.w, output->tensor.shape.h, 1),
/*workgroup=*/uint3(),
/*source_code=*/std::move(source),
/*source_code=*/std::move(source_code),
/*input=*/IOStructure::ONLY_DEFINITIONS,
/*output=*/IOStructure::ONLY_DEFINITIONS,
};

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h"
#include <cmath>
#include <vector>
#include <gmock/gmock.h>
@ -47,9 +48,10 @@ TEST(SoftmaxTest, Softmax) {
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.1, 0.2}));
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 1, 1, 1}));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6f), {1.0f, 1.0f, 1.0f, 1.0f}));
}
TEST(SoftmaxTest, DoesNotWorkForHeightAxis) {
@ -68,7 +70,7 @@ TEST(SoftmaxTest, DoesNotWorkForHeightAxis) {
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok());
}
@ -88,7 +90,7 @@ TEST(SoftmaxTest, DoesNotWorkForWidthAxis) {
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok());
}
@ -106,17 +108,17 @@ TEST(SoftmaxTest, Softmax1x1) {
SoftmaxAttributes attr;
attr.axis = Axis::CHANNELS;
const double sum =
std::exp(0.1) + std::exp(0.2) + std::exp(0.3) + std::exp(0.4);
const float sum =
std::exp(0.1f) + std::exp(0.2f) + std::exp(0.3f) + std::exp(0.4f);
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {std::exp(0.1) / sum, std::exp(0.2) / sum,
std::exp(0.3) / sum, std::exp(0.4) / sum}));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6f),
{std::exp(0.1f) / sum, std::exp(0.2f) / sum,
std::exp(0.3f) / sum, std::exp(0.4f) / sum}));
}
} // namespace