TFLite GPU OpenGL: Add specialization of SOFTMAX 1x1.
About 15x speedup. PiperOrigin-RevId: 262675867
This commit is contained in:
parent
9e88516e6d
commit
e6dc56ced2
@ -371,6 +371,11 @@ std::string VariableAccessor::GetConstDeclarations() const {
|
|||||||
// with index.
|
// with index.
|
||||||
std::string declarations;
|
std::string declarations;
|
||||||
for (const auto& variable : name_to_variable_) {
|
for (const auto& variable : name_to_variable_) {
|
||||||
|
// Skip shared variables.
|
||||||
|
if (shared_variables_.find(variable.second.name) !=
|
||||||
|
shared_variables_.end()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
const auto& value = variable.second.value;
|
const auto& value = variable.second.value;
|
||||||
if (IsVariableLength(value)) {
|
if (IsVariableLength(value)) {
|
||||||
absl::StrAppend(&declarations, "const ", GetVariableType(value), " ",
|
absl::StrAppend(&declarations, "const ", GetVariableType(value), " ",
|
||||||
|
@ -15,10 +15,9 @@ limitations under the License.
|
|||||||
|
|
||||||
#include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h"
|
#include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <memory>
|
||||||
#include <cstdint>
|
|
||||||
#include <cstring>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "absl/memory/memory.h"
|
#include "absl/memory/memory.h"
|
||||||
@ -33,6 +32,13 @@ namespace gpu {
|
|||||||
namespace gl {
|
namespace gl {
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
float4 GetMask(int num_channels) {
|
||||||
|
float4 mask(0.0f);
|
||||||
|
const int remainder = num_channels % 4 == 0 ? 4 : num_channels % 4;
|
||||||
|
for (int i = 0; i < remainder; ++i) mask[i] = 1.0f;
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
|
||||||
class Softmax : public NodeShader {
|
class Softmax : public NodeShader {
|
||||||
public:
|
public:
|
||||||
Status GenerateCode(const GenerationContext& ctx,
|
Status GenerateCode(const GenerationContext& ctx,
|
||||||
@ -42,24 +48,101 @@ class Softmax : public NodeShader {
|
|||||||
const auto& attr = absl::any_cast<const SoftmaxAttributes&>(
|
const auto& attr = absl::any_cast<const SoftmaxAttributes&>(
|
||||||
ctx.node->operation.attributes);
|
ctx.node->operation.attributes);
|
||||||
if (input->tensor.shape != output->tensor.shape) {
|
if (input->tensor.shape != output->tensor.shape) {
|
||||||
return InvalidArgumentError("Input and output shape does not match");
|
return InvalidArgumentError("Input and output shapes do not match.");
|
||||||
}
|
}
|
||||||
if (attr.axis != Axis::CHANNELS) {
|
if (attr.axis != Axis::CHANNELS) {
|
||||||
return UnimplementedError("Softmax is only supported for channels axis.");
|
return UnimplementedError("Softmax is only supported for channels axis.");
|
||||||
}
|
}
|
||||||
|
return input->tensor.shape.h == 1 && input->tensor.shape.w == 1
|
||||||
float4 mask(0.0f);
|
? GenerateCodeFor1x1(ctx, generated_code)
|
||||||
const int channels = output->tensor.shape.c;
|
: GenerateCodeGeneral(ctx, generated_code);
|
||||||
const int reminder = (channels % 4 == 0) ? 4 : channels % 4;
|
|
||||||
for (int i = 0; i < reminder; ++i) {
|
|
||||||
mask[i] = 1.0f;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Status GenerateCodeFor1x1(const GenerationContext& ctx,
|
||||||
|
GeneratedCode* generated_code) const {
|
||||||
|
const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0];
|
||||||
|
const int depth = IntegralDivideRoundUp(output->tensor.shape.c, 4);
|
||||||
|
std::vector<Variable> shared_variables = {
|
||||||
|
{"partial_sum", std::vector<float4>(8)},
|
||||||
|
};
|
||||||
|
std::vector<Variable> uniform_parameters = {
|
||||||
|
{"depth", depth},
|
||||||
|
{"depth_div_32", IntegralDivideRoundUp(depth, 32)},
|
||||||
|
{"mask", GetMask(output->tensor.shape.c)},
|
||||||
|
};
|
||||||
|
std::string source_code = R"(
|
||||||
|
highp float sum = 0.0f;
|
||||||
|
int offset = 0;
|
||||||
|
int s = 0;
|
||||||
|
int tid = int(gl_LocalInvocationID.x);
|
||||||
|
do {
|
||||||
|
int z = offset + tid;
|
||||||
|
if (z < $depth$) {
|
||||||
|
vec4 mask_temp = z == $depth$ - 1 ? $mask$ : vec4(1.0f);
|
||||||
|
vec4 src = $input_data_0[0, 0, z]$;
|
||||||
|
sum += dot(mask_temp, exp(src));
|
||||||
|
offset += 32;
|
||||||
|
}
|
||||||
|
s++;
|
||||||
|
} while (s < $depth_div_32$);
|
||||||
|
|
||||||
|
partial_sum[tid / 4][tid % 4] = sum;
|
||||||
|
|
||||||
|
memoryBarrierShared();
|
||||||
|
barrier();
|
||||||
|
|
||||||
|
if (tid == 0) {
|
||||||
|
sum = dot(vec4(1.0f), partial_sum[0]);
|
||||||
|
sum += dot(vec4(1.0f), partial_sum[1]);
|
||||||
|
sum += dot(vec4(1.0f), partial_sum[2]);
|
||||||
|
sum += dot(vec4(1.0f), partial_sum[3]);
|
||||||
|
sum += dot(vec4(1.0f), partial_sum[4]);
|
||||||
|
sum += dot(vec4(1.0f), partial_sum[5]);
|
||||||
|
sum += dot(vec4(1.0f), partial_sum[6]);
|
||||||
|
sum += dot(vec4(1.0f), partial_sum[7]);
|
||||||
|
partial_sum[0][0] = 1.0 / sum;
|
||||||
|
}
|
||||||
|
|
||||||
|
memoryBarrierShared();
|
||||||
|
barrier();
|
||||||
|
|
||||||
|
sum = partial_sum[0][0];
|
||||||
|
|
||||||
|
offset = 0;
|
||||||
|
s = 0;
|
||||||
|
do {
|
||||||
|
int z = offset + tid;
|
||||||
|
if (z < $depth$) {
|
||||||
|
vec4 temp = exp($input_data_0[0, 0, z]$) * sum;
|
||||||
|
$output_data_0[0, 0, z]$ = temp;
|
||||||
|
offset += 32;
|
||||||
|
}
|
||||||
|
s++;
|
||||||
|
} while (s < $depth_div_32$);
|
||||||
|
)";
|
||||||
|
*generated_code = {
|
||||||
|
/*parameters=*/std::move(uniform_parameters),
|
||||||
|
/*objects=*/{},
|
||||||
|
/*shared_variables=*/std::move(shared_variables),
|
||||||
|
/*workload=*/uint3(32, 1, 1),
|
||||||
|
/*workgroup=*/uint3(32, 1, 1),
|
||||||
|
/*source_code=*/std::move(source_code),
|
||||||
|
/*input=*/IOStructure::ONLY_DEFINITIONS,
|
||||||
|
/*output=*/IOStructure::ONLY_DEFINITIONS,
|
||||||
|
};
|
||||||
|
return OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
Status GenerateCodeGeneral(const GenerationContext& ctx,
|
||||||
|
GeneratedCode* generated_code) const {
|
||||||
|
const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0];
|
||||||
std::vector<Variable> parameters = {
|
std::vector<Variable> parameters = {
|
||||||
{"src_depth", IntegralDivideRoundUp(output->tensor.shape.c, 4)},
|
{"src_depth", IntegralDivideRoundUp(output->tensor.shape.c, 4)},
|
||||||
{"mask", mask},
|
{"mask", GetMask(output->tensor.shape.c)},
|
||||||
};
|
};
|
||||||
|
|
||||||
std::string source = R"(
|
std::string source_code = R"(
|
||||||
highp float sum = 0.0;
|
highp float sum = 0.0;
|
||||||
for (int d = 0; d < $src_depth$ - 1; ++d) {
|
for (int d = 0; d < $src_depth$ - 1; ++d) {
|
||||||
sum += dot(vec4(1.0), exp($input_data_0[gid.x, gid.y, d]$));
|
sum += dot(vec4(1.0), exp($input_data_0[gid.x, gid.y, d]$));
|
||||||
@ -79,7 +162,7 @@ class Softmax : public NodeShader {
|
|||||||
/*shared_variables=*/{},
|
/*shared_variables=*/{},
|
||||||
/*workload=*/uint3(output->tensor.shape.w, output->tensor.shape.h, 1),
|
/*workload=*/uint3(output->tensor.shape.w, output->tensor.shape.h, 1),
|
||||||
/*workgroup=*/uint3(),
|
/*workgroup=*/uint3(),
|
||||||
/*source_code=*/std::move(source),
|
/*source_code=*/std::move(source_code),
|
||||||
/*input=*/IOStructure::ONLY_DEFINITIONS,
|
/*input=*/IOStructure::ONLY_DEFINITIONS,
|
||||||
/*output=*/IOStructure::ONLY_DEFINITIONS,
|
/*output=*/IOStructure::ONLY_DEFINITIONS,
|
||||||
};
|
};
|
||||||
|
@ -15,6 +15,7 @@ limitations under the License.
|
|||||||
|
|
||||||
#include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h"
|
#include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h"
|
||||||
|
|
||||||
|
#include <cmath>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include <gmock/gmock.h>
|
#include <gmock/gmock.h>
|
||||||
@ -47,9 +48,10 @@ TEST(SoftmaxTest, Softmax) {
|
|||||||
|
|
||||||
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
||||||
{output});
|
{output});
|
||||||
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.1, 0.2}));
|
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
|
||||||
ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader()));
|
ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader()));
|
||||||
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 1, 1, 1}));
|
EXPECT_THAT(model.GetOutput(0),
|
||||||
|
Pointwise(FloatNear(1e-6f), {1.0f, 1.0f, 1.0f, 1.0f}));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(SoftmaxTest, DoesNotWorkForHeightAxis) {
|
TEST(SoftmaxTest, DoesNotWorkForHeightAxis) {
|
||||||
@ -68,7 +70,7 @@ TEST(SoftmaxTest, DoesNotWorkForHeightAxis) {
|
|||||||
|
|
||||||
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
||||||
{output});
|
{output});
|
||||||
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
|
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
|
||||||
EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok());
|
EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +90,7 @@ TEST(SoftmaxTest, DoesNotWorkForWidthAxis) {
|
|||||||
|
|
||||||
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
||||||
{output});
|
{output});
|
||||||
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
|
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
|
||||||
EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok());
|
EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,17 +108,17 @@ TEST(SoftmaxTest, Softmax1x1) {
|
|||||||
SoftmaxAttributes attr;
|
SoftmaxAttributes attr;
|
||||||
attr.axis = Axis::CHANNELS;
|
attr.axis = Axis::CHANNELS;
|
||||||
|
|
||||||
const double sum =
|
const float sum =
|
||||||
std::exp(0.1) + std::exp(0.2) + std::exp(0.3) + std::exp(0.4);
|
std::exp(0.1f) + std::exp(0.2f) + std::exp(0.3f) + std::exp(0.4f);
|
||||||
|
|
||||||
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input},
|
||||||
{output});
|
{output});
|
||||||
ASSERT_TRUE(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
|
ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
|
||||||
ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader()));
|
ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader()));
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(model.GetOutput(0),
|
||||||
model.GetOutput(0),
|
Pointwise(FloatNear(1e-6f),
|
||||||
Pointwise(FloatNear(1e-6), {std::exp(0.1) / sum, std::exp(0.2) / sum,
|
{std::exp(0.1f) / sum, std::exp(0.2f) / sum,
|
||||||
std::exp(0.3) / sum, std::exp(0.4) / sum}));
|
std::exp(0.3f) / sum, std::exp(0.4f) / sum}));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
Loading…
Reference in New Issue
Block a user