ConcatXY converted to new style.

Added support of concatenation in Batch and Depth axis.

PiperOrigin-RevId: 316999295
Change-Id: I94f2168f2861790b3a30c79b2b3476aa44c55748
This commit is contained in:
Raman Sarokin 2020-06-17 17:29:06 -07:00 committed by TensorFlower Gardener
parent b1933d67e5
commit 274a0f944e
2 changed files with 99 additions and 67 deletions

View File

@ -15,7 +15,9 @@ limitations under the License.
#include "tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.h" #include "tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.h"
#include <map>
#include <string> #include <string>
#include <vector>
#include "tensorflow/lite/delegates/gpu/cl/kernels/util.h" #include "tensorflow/lite/delegates/gpu/cl/kernels/util.h"
#include "tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.h" #include "tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.h"
@ -27,51 +29,93 @@ namespace gpu {
namespace cl { namespace cl {
namespace { namespace {
std::string GetConcatKernelCode( std::string GetConcatKernelCode(const OperationDef& op_def,
const OperationDef& op_def, int tensors_count, const ConcatAttributes& attr, Arguments* args) {
const std::vector<ElementwiseOperation*>& linked_operations) { std::vector<std::string> tensor_names(op_def.src_tensors.size());
std::vector<TensorCodeGenerator> srcs(tensors_count); for (int i = 0; i < op_def.src_tensors.size(); ++i) {
for (int i = 0; i < tensors_count; ++i) { tensor_names[i] = "src_tensor_" + std::to_string(i);
const std::string tensor_name = "src_data_" + std::to_string(i); args->AddObjectRef(
const std::string width = "src_size_" + std::to_string(i) + ".x"; tensor_names[i], AccessType::READ,
const std::string height = "src_size_" + std::to_string(i) + ".y"; absl::make_unique<TensorDescriptor>(op_def.src_tensors[0]));
srcs[i] = }
TensorCodeGenerator(tensor_name, WHSPoint{width, height, "dst_size.z"}, args->AddObjectRef(
op_def.src_tensors[i]); "dst_tensor", AccessType::WRITE,
absl::make_unique<TensorDescriptor>(op_def.dst_tensors[0]));
std::map<Axis, std::string> axis_to_selector = {
{Axis::WIDTH, "Width"}, {Axis::HEIGHT, "Height"},
{Axis::DEPTH, "Depth"}, {Axis::CHANNELS, "Channels"},
{Axis::BATCH, "Batch"},
};
std::map<Axis, std::string> axis_to_coord = {
{Axis::WIDTH, "X"}, {Axis::HEIGHT, "Y"}, {Axis::DEPTH, "D"},
{Axis::CHANNELS, "S"}, {Axis::BATCH, "B"},
};
std::vector<std::string> src_coords;
std::vector<std::string> dst_coords;
for (auto axis :
{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH, Axis::CHANNELS, Axis::BATCH}) {
if (op_def.src_tensors[0].HasAxis(axis) && axis != Axis::BATCH) {
if (axis == attr.axis) {
src_coords.push_back("coord");
} else {
src_coords.push_back(axis_to_coord[axis]);
}
}
if (op_def.dst_tensors[0].HasAxis(axis)) {
dst_coords.push_back(axis_to_coord[axis]);
}
}
std::string src_coord = src_coords[0];
for (int i = 1; i < src_coords.size(); ++i) {
src_coord += ", " + src_coords[i];
}
std::string dst_coord = dst_coords[0];
for (int i = 1; i < dst_coords.size(); ++i) {
dst_coord += ", " + dst_coords[i];
} }
TensorCodeGenerator dst("dst_data",
WHSPoint{"dst_size.x", "dst_size.y", "dst_size.z"},
op_def.dst_tensors[0]);
std::string c = GetCommonDefines(op_def.precision); std::string c = GetCommonDefines(op_def.precision);
c += "__kernel void main_function(\n"; c += "__kernel void main_function(\n";
for (const auto& src : srcs) { c += "$0) {\n";
c += src.GetDeclaration(AccessType::READ) + ",\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id_0 = get_global_id(0);\n";
c += " int X = linear_id_0 / args.dst_tensor.Batch();\n";
c += " int B = linear_id_0 % args.dst_tensor.Batch();\n";
} else {
c += " int X = get_global_id(0);\n";
} }
c += dst.GetDeclaration(AccessType::WRITE); if (op_def.dst_tensors[0].HasAxis(Axis::DEPTH)) {
c += GetArgsDeclaration(linked_operations); c += " int linear_id_1 = get_global_id(1);\n";
for (int i = 0; i < tensors_count; ++i) { c += " int Y = linear_id_1 / args.dst_tensor.Depth();\n";
const std::string uniform_name = "src_size_" + std::to_string(i); c += " int D = linear_id_1 % args.dst_tensor.Depth();\n";
c += " int4 " + uniform_name + ",\n"; } else {
c += " int Y = get_global_id(1);\n";
} }
c += " int4 dst_size \n"; c += " int S = get_global_id(2);\n";
c += ") {\n"; c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
c += " int X = get_global_id(0);\n"; "S >= args.dst_tensor.Slices()) { \n";
c += " int Y = get_global_id(1);\n"; c += " return; \n";
c += " int Z = get_global_id(2);\n"; c += " } \n";
c += " if (Z >= dst_size.z) return;\n"; c += " FLT4 result = (FLT4)(0.0f);\n";
for (int i = 0; i < tensors_count; ++i) { c += " int coord = " + axis_to_coord[attr.axis] + ";\n";
const std::string size_name = "src_size_" + std::to_string(i); for (int i = 0; i < op_def.src_tensors.size(); ++i) {
c += " if (X < " + size_name + ".x && Y < " + size_name + ".y) { \n"; const std::string field =
c += " FLT4 result = " + srcs[i].ReadWHS("X", "Y", "Z") + ";\n"; "args." + tensor_names[i] + "." + axis_to_selector[attr.axis] + "()";
c += " int dst_x = X + " + size_name + ".z;\n"; c += " if (coord >= 0 && coord < " + field + ") { \n";
c += " int dst_y = Y + " + size_name + ".w;\n"; if (op_def.src_tensors[i].HasAxis(Axis::BATCH)) {
const LinkingContext context{"result", "dst_x", "dst_y", "Z"}; if (attr.axis == Axis::BATCH) {
c += PostProcess(linked_operations, context); c += " args." + tensor_names[i] + ".SetBatchRef(coord);\n";
c += " " + dst.WriteWHS("result", "dst_x", "dst_y", "Z"); } else {
c += " args." + tensor_names[i] + ".SetBatchRef(B);\n";
}
}
c += " result = args." + tensor_names[i] + ".Read(" + src_coord + ");\n";
c += " } \n"; c += " } \n";
c += " coord -= " + field + ";\n";
} }
c += " args.dst_tensor.Write(result, " + dst_coord + ");\n";
c += "}\n"; c += "}\n";
return c; return c;
} }
@ -97,46 +141,32 @@ ConcatXY& ConcatXY::operator=(ConcatXY&& operation) {
} }
absl::Status ConcatXY::Compile(const CreationContext& creation_context) { absl::Status ConcatXY::Compile(const CreationContext& creation_context) {
const auto code = std::string code = GetConcatKernelCode(definition_, attr_, &args_);
GetConcatKernelCode(definition_, tensors_count_, linked_operations_); std::string element_wise_code;
RETURN_IF_ERROR(
MergeOperations(linked_operations_, &args_, &element_wise_code));
RETURN_IF_ERROR(args_.TransformToCLCode(creation_context.device->GetInfo(),
{{"dst_tensor", element_wise_code}},
&code));
return creation_context.cache->GetOrCreateCLKernel( return creation_context.cache->GetOrCreateCLKernel(
code, "main_function", *creation_context.context, code, "main_function", *creation_context.context,
*creation_context.device, &kernel_); *creation_context.device, &kernel_);
} }
absl::Status ConcatXY::BindArguments() { absl::Status ConcatXY::BindArguments() {
kernel_.ResetBindingCounter(); for (int i = 0; i < definition_.src_tensors.size(); ++i) {
for (int i = 0; i < tensors_count_; ++i) {
RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[i]->GetMemoryPtr()));
}
RETURN_IF_ERROR(kernel_.SetMemoryAuto(dst_[0]->GetMemoryPtrForWriting()));
RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
int x_offset = 0;
int y_offset = 0;
for (int i = 0; i < tensors_count_; ++i) {
const int width = src_[i]->Width() * src_[i]->Batch();
const int height = src_[i]->Height();
RETURN_IF_ERROR( RETURN_IF_ERROR(
kernel_.SetBytesAuto(int4(width, height, x_offset, y_offset))); args_.SetObjectRef("src_tensor_" + std::to_string(i), src_[i]));
x_offset += attr_.axis == Axis::WIDTH ? width : 0;
y_offset += attr_.axis == Axis::HEIGHT ? height : 0;
} }
RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB())); RETURN_IF_ERROR(args_.SetObjectRef("dst_tensor", dst_[0]));
return absl::OkStatus(); RETURN_IF_ERROR(SetArguments(linked_operations_, &args_));
return args_.Bind(kernel_.kernel());
} }
int3 ConcatXY::GetGridSize() const { int3 ConcatXY::GetGridSize() const {
int max_src_width = 0; const int grid_x = dst_[0]->Width() * dst_[0]->Batch();
int max_src_height = 0; const int grid_y = dst_[0]->Height() * dst_[0]->Depth();
for (int i = 0; i < tensors_count_; ++i) {
max_src_width = std::max(max_src_width, src_[i]->Width());
max_src_height = std::max(max_src_height, src_[i]->Height());
}
const int grid_x = max_src_width * dst_[0]->Batch();
const int grid_y = max_src_height;
const int grid_z = dst_[0]->Slices(); const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z); return int3(grid_x, grid_y, grid_z);
} }

View File

@ -105,8 +105,10 @@ absl::Status SelectConcat(const ConcatAttributes& attr,
*ptr = absl::make_unique<ConcatZ>(std::move(operation)); *ptr = absl::make_unique<ConcatZ>(std::move(operation));
return absl::OkStatus(); return absl::OkStatus();
} }
case Axis::WIDTH: case Axis::BATCH:
case Axis::HEIGHT: { case Axis::DEPTH:
case Axis::HEIGHT:
case Axis::WIDTH: {
ConcatXY operation = CreateConcatXY(op_def, attr, channels.size()); ConcatXY operation = CreateConcatXY(op_def, attr, channels.size());
*ptr = absl::make_unique<ConcatXY>(std::move(operation)); *ptr = absl::make_unique<ConcatXY>(std::move(operation));
return absl::OkStatus(); return absl::OkStatus();