Add Tensorflow Lite Hexagon Delegate which accelerate graph inference on Hexagon NN for Snapdragon Hexagon DSPs

PiperOrigin-RevId: 285512192
Change-Id: Id0f1887dff0c0605507495691dbccae07ce8dea9
This commit is contained in:
Karim Nosir 2019-12-13 19:02:35 -08:00 committed by TensorFlower Gardener
parent 0505a9e2cb
commit 8e7ed7ee5a
69 changed files with 6123 additions and 0 deletions

View File

@ -0,0 +1,110 @@
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
load("//tensorflow/lite:build_def.bzl", "tflite_copts", "tflite_linkopts")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"], # Apache 2.0
)
cc_library(
name = "hexagon_implementation",
srcs = ["hexagon_implementation.cc"],
hdrs = [
"hexagon_implementation.h",
"hexagon_nn_interface.h",
],
tags = [
"manual",
"nobuilder",
],
deps = [
"//tensorflow/lite:minimal_logging",
"//tensorflow/lite/experimental/delegates/hexagon/hexagon_nn:hexagon_nn_header",
"//tensorflow/lite/kernels/internal:compatibility",
],
)
cc_library(
name = "hexagon_delegate_kernel",
srcs = [
"hexagon_delegate.h",
"hexagon_delegate_kernel.cc",
],
hdrs = ["hexagon_delegate_kernel.h"],
tags = [
"manual",
"nobuilder",
],
deps = [
":hexagon_implementation",
":utils",
"//tensorflow/lite:kernel_api",
"//tensorflow/lite/c:common",
"//tensorflow/lite/experimental/delegates/hexagon/builders:op_builder",
"//tensorflow/lite/experimental/delegates/hexagon/hexagon_nn:hexagon_nn_header",
"//tensorflow/lite/schema:schema_fbs",
"@hexagon_nn//:hexagon_nn_ops",
],
)
cc_library(
name = "hexagon_delegate",
srcs = ["hexagon_delegate.cc"],
hdrs = ["hexagon_delegate.h"],
tags = [
"manual",
"nobuilder",
],
deps = [
":hexagon_delegate_kernel",
":hexagon_implementation",
":utils",
"//tensorflow/lite:kernel_api",
"//tensorflow/lite/c:common",
],
)
cc_library(
name = "utils",
srcs = ["utils.cc"],
hdrs = ["utils.h"],
copts = tflite_copts(),
tags = [
"manual",
"nobuilder",
],
deps = [
"//tensorflow/lite:kernel_api",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels:kernel_util",
],
)
cc_test(
name = "utils_test",
srcs = ["utils_test.cc"],
linkopts = tflite_linkopts() + ["-lm"],
deps = [
":utils",
"//tensorflow/lite/c:common",
"@com_google_googletest//:gtest_main",
],
)
exports_files(["version_script.lds"])

View File

@ -0,0 +1,99 @@
# Hexagon Delegate
Experimental delegate which uses Hexagon SDK to delegate the processing
to QC DSP.
Note that we only support quantized models, since the DSP is efficient
with quantized versions. So all op support is for quantized versions.
Usage:
- Add dependency on hexagon_delegate rule.
- Code change example:
```
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
// Assuming shared libraries are under "/data/local/tmp/"
// If files are packaged with native lib in android App then it
// will typically be equivalent to the path provided by
// "getContext().getApplicationInfo().nativeLibraryDir"
const char[] library_directory_path = "/data/local/tmp/";
TfLiteHexagonInitWithPath(library_directory_path); // Needed once at startup.
::tflite::TfLiteHexagonDelegateOptions params = {0};
// 'delegate_ptr' Need to outlive the interpreter. For example,
// If use case will need to resize input or anything that can trigger
// re-applying delegates then 'delegate_ptr' need to outlive the interpreter.
auto* delegate_ptr = ::tflite::TfLiteHexagonDelegateCreate(&params);
Interpreter::TfLiteDelegatePtr delegate(delegate_ptr,
[](TfLiteDelegate* delegate) {
::tflite::TfLiteHexagonDelegateDelete(delegate);
});
interpreter->ModifyGraphWithDelegate(delegate.get());
TfLiteHexagonTearDown(); // Needed once at end of app/DSP usage.
```
* Shared libraries:
- 'libhexagon_interface.so' which holds the interface that the delegate uses.
It must be available if you linked the hexagon_delegate library to TFLite.
You can load it either from shell by overriding
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"path to the so",
or add it inside your apk in a way it is available.
- 'libhexagon_nn_skel(_v65/_v66).so' which holds the DSP code.
Use TfLiteHexagonInitWithPath(..) and provide the path to the directory
which holds the shared libraries for the Hexagon NN on device.
If you're using TfLiteHexagonInit() then
You will need to set environment variable "ADSP_LIBRARY_PATH" to
"path_to_the_lib";/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp
Note that separator here is ';' not ':'
You can push all 3 files, and the library will pick the one needed based
on the runtime. Or if you are sure of what you will use on the device then
push only one of them.
## Supported Ops
Hexagon only supports ops that have inputs/outputs of <= 4 dimensions.
The following operations have been implemented, with a few constraints that
are verified in `IsNodeSupportedByHexagon`:
* Add
* ArgMax
* ArgMin
* AveragePool2D:
* Constraints:
- No Activation
* Concat
* Conv2D:
* Constraints:
- stride width/height <= 3
* DepthwiseConv2D:
* Constraints:
- Filter width == 3
- depth_multiplier == 1
- dilation only supported when stride == 1
- Otherwise, stride height/width <= 3
* FullyConnected (without any activation)
* L2Normalization (without any activation)
* Logistic (aka Sigmoid)
* MaxPool2D (without any activation) (b/129276536)
* Mul (without any activation) (b/129276536)
* Neg
* Pad: Only supports 0 padding (b/139277813)
* Relu
* Relu6
* Reshape
* Resize Bilinear:
* Constraints:
- Requested size <= 65 (b/143105433)
* Resize Nearest Neighbor
* SoftMax
* Split
* Sub
* Tanh
* Transpose
* TransposeConv2D:
* Constraints:
- stride height/width <= 3
- dilation height/width == 1

View File

@ -0,0 +1,78 @@
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"], # Apache 2.0
)
cc_library(
name = "op_builder",
srcs = [
"activation_builder.cc",
"arg_min_max_builder.cc",
"arithmetic_builder.cc",
"concat_builder.cc",
"conv_2d_builder.cc",
"l2_normalization_builder.cc",
"matmul_builder.cc",
"neg_op_builder.cc",
"op_builder.cc",
"pad_builder.cc",
"pool_2d_builder.cc",
"reduce_builder.cc",
"reshape_builder.cc",
"resize_bilinear_builder.cc",
"resize_nearest_neighbor_builder.cc",
"softmax_builder.cc",
"split_builder.cc",
"transpose_builder.cc",
"transpose_conv_2d_builder.cc",
],
hdrs = [
"activation_builder.h",
"arg_min_max_builder.h",
"arithmetic_builder.h",
"concat_builder.h",
"conv_2d_builder.h",
"l2_normalization_builder.h",
"matmul_builder.h",
"neg_op_builder.h",
"op_builder.h",
"pad_builder.h",
"pool_2d_builder.h",
"reduce_builder.h",
"reshape_builder.h",
"resize_bilinear_builder.h",
"resize_nearest_neighbor_builder.h",
"softmax_builder.h",
"split_builder.h",
"transpose_builder.h",
"transpose_conv_2d_builder.h",
],
tags = [
"manual",
"nobuilder",
],
deps = [
":op_factory",
"//tensorflow/lite:kernel_api",
"//tensorflow/lite/c:common",
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_implementation",
"//tensorflow/lite/experimental/delegates/hexagon/hexagon_nn:hexagon_nn_header",
"//tensorflow/lite/kernels:kernel_util",
"//tensorflow/lite/kernels:padding",
"//tensorflow/lite/kernels/internal:optimized_base",
"@hexagon_nn//:hexagon_nn_ops",
],
)
cc_library(
name = "op_factory",
hdrs = ["op_factory.h"],
tags = [
"manual",
"nobuilder",
],
deps = [
],
)

View File

@ -0,0 +1,87 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/activation_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus ActivationOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int scalar_shape[] = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
auto* input_min_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&input_min_), sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&input_max_), sizeof(input_max_));
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
if (op_node_.op_type == OP_QuantizedReluX_8) {
auto* relu_value_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&relu_value_),
sizeof(relu_value_));
AddInput(TensorID(relu_value_const->GetID(), 0));
}
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus ActivationOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ActivationOpBuilder::~ActivationOpBuilder() {}
OpBuilder* CreateActivationBuilder(GraphBuilder* graph_builder, int op_type) {
return new ActivationOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,52 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ACTIVATION_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ACTIVATION_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ActivationOpBuilder : public OpBuilder {
public:
explicit ActivationOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
explicit ActivationOpBuilder(GraphBuilder* graph_builder, int op_type,
int relu_value)
: OpBuilder(graph_builder, op_type), relu_value_(relu_value) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ActivationOpBuilder() override;
private:
TensorID node_output_;
float input_min_, input_max_;
float relu_value_ = 6;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ACTIVATION_BUILDER_H_

View File

@ -0,0 +1,98 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/arg_min_max_builder.h"
#include <limits>
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus ArgMinMaxOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
if (inputs->size != 2) {
context->ReportError(context, "Expecting 2 inputs %d != 2\n", inputs->size);
return kTfLiteError;
}
// Input data tensor.
int input_tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[input_tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(input_tensor_id));
// Axis tensor.
const int axis_tensor_id = inputs->data[1];
const auto& axis = context->tensors[axis_tensor_id];
if (axis.allocation_type != kTfLiteMmapRo) {
context->ReportError(context,
"Axis tensor doesn't have correct allocation type: %s",
axis.name);
return kTfLiteError;
}
int axis_value = axis.data.i32[0];
if (axis_value < 0) {
axis_value += input_tensor.dims->size;
}
auto* input_axis_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&axis_value), sizeof(int32_t));
AddInput(TensorID(input_axis_const->GetID(), 0));
// Compute Min/Max
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Output Node
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
return kTfLiteOk;
}
TfLiteStatus ArgMinMaxOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ArgMinMaxOpBuilder::~ArgMinMaxOpBuilder() {}
OpBuilder* CreateArgMinMaxOpBuilder(GraphBuilder* graph_builder, int op_type) {
return new ArgMinMaxOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,46 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ARG_MIN_MAX_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ARG_MIN_MAX_BUILDER_H_
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ArgMinMaxOpBuilder : public OpBuilder {
public:
explicit ArgMinMaxOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ArgMinMaxOpBuilder() override;
private:
TensorID node_output_;
float input_min_, input_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ARG_MIN_MAX_BUILDER_H_

View File

@ -0,0 +1,127 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/arithmetic_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus ArithmeticOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
int tensor_id;
// First input data tensor.
tensor_id = inputs->data[0];
const auto& input1_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input1_tensor, &input1_min_, &input1_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input1_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input1_min_),
sizeof(input1_min_));
auto* input1_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input1_max_),
sizeof(input1_max_));
// Second input data tensor.
tensor_id = inputs->data[1];
const auto& input2_tensor = context->tensors[tensor_id];
// TODO(karimnosseir): Have this as util to generalize to all ops.
if (input2_tensor.allocation_type == kTfLiteMmapRo) {
auto* const_input_node =
graph_builder_->AddConstNodeWithData(tensor_id, input2_tensor);
graph_builder_->AddTensorWithID(tensor_id, const_input_node->GetID(), 0);
}
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input2_tensor, &input2_min_, &input2_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input2_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input2_min_),
sizeof(input2_min_));
auto* input2_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input2_max_),
sizeof(input2_max_));
// Min/max values for input tensors.
AddInput(TensorID(input1_min_const->GetID(), 0));
AddInput(TensorID(input1_max_const->GetID(), 0));
AddInput(TensorID(input2_min_const->GetID(), 0));
AddInput(TensorID(input2_max_const->GetID(), 0));
// Output min/max as inputs, only if it's an Add node.
if (op_node_.op_type == OP_QuantizedAdd_8p8to8) {
output_min_ = 0;
output_max_ = 0;
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
context->tensors[outputs->data[0]], &output_min_, &output_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
if (output_max_ != 0) {
auto* output_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&output_min_),
sizeof(output_min_));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&output_max_),
sizeof(output_max_));
AddInput(TensorID(output_min_const->GetID(), 0));
AddInput(TensorID(output_max_const->GetID(), 0));
}
}
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus ArithmeticOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ArithmeticOpBuilder::~ArithmeticOpBuilder() {}
OpBuilder* CreateArithmeticBuilder(GraphBuilder* graph_builder, int op_type) {
return new ArithmeticOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,49 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ARITHMETIC_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ARITHMETIC_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ArithmeticOpBuilder : public OpBuilder {
public:
explicit ArithmeticOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ArithmeticOpBuilder() override;
private:
TensorID node_output_;
float input1_min_, input1_max_, input2_min_, input2_max_, output_min_,
output_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_ARITHMETIC_BUILDER_H_

View File

@ -0,0 +1,133 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/concat_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus ConcatOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
// Only axis 3 is supported.
const TfLiteConcatenationParams* concat_params =
reinterpret_cast<const TfLiteConcatenationParams*>(builtin_data_);
auto* axis_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, (char*)&concat_params->axis,
sizeof(concat_params->axis));
AddInput(TensorID(axis_const->GetID(), 0));
int tensor_id;
// Input data tensors.
input_minima_.reserve(inputs->size);
input_maxima_.reserve(inputs->size);
for (int i = 0; i < inputs->size; ++i) {
tensor_id = inputs->data[i];
float data_min, data_max;
const auto& data_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
data_tensor, &data_min, &data_max, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
input_minima_.push_back(data_min);
input_maxima_.push_back(data_max);
}
// Minima tensors.
for (int i = 0; i < input_minima_.size(); ++i) {
auto* data_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_minima_[i]),
sizeof(input_minima_[i]));
AddInput(TensorID(data_min_const->GetID(), 0));
}
// Maxima tensors.
for (int i = 0; i < input_minima_.size(); ++i) {
auto* data_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_maxima_[i]),
sizeof(input_maxima_[i]));
AddInput(TensorID(data_max_const->GetID(), 0));
}
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
// We requantize the output from concat to the range expected by TFLite.
// Otherwise, we see accuracy issues for cases where the inputs have different
// min/max bounds.
TensorID concat_out = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& concat_out_min = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
const auto& concat_out_max = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// Output min/max for requantization.
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
context->tensors[outputs->data[0]], &output_min_, &output_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* output_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, (char*)&output_min_, sizeof(output_min_));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, (char*)&output_max_, sizeof(output_max_));
auto* requantize_op = graph_builder_->AddNode();
requantize_op->SetOpType(OP_Requantize_8to8);
requantize_op->AddInput(concat_out);
requantize_op->AddInput(concat_out_min);
requantize_op->AddInput(concat_out_max);
requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
node_output_ =
requantize_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
requantize_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
requantize_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus ConcatOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ConcatOpBuilder::~ConcatOpBuilder() {}
OpBuilder* CreateConcatBuilder(GraphBuilder* graph_builder, int op_type) {
return new ConcatOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,50 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_CONCAT_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_CONCAT_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ConcatOpBuilder : public OpBuilder {
public:
explicit ConcatOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ConcatOpBuilder() override;
private:
TensorID node_output_;
std::vector<float> input_minima_;
std::vector<float> input_maxima_;
float output_min_, output_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_CONCAT_BUILDER_H_

View File

@ -0,0 +1,378 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/conv_2d_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
namespace {
// Dilated Depthwise Convolution performs SpaceToBatchND & BatchToSpaceND before
// and after the op respectively.
// This helper computes the paddings param for SpaceToBatchND and crops param
// for BatchToSpaceND.
//
// Inspired by tf.nn.with_space_to_batch & tf.required_space_to_batch_paddings.
void ComputeSpaceToBatchParams(int input_height, int input_width,
int weights_height, int weights_width,
const std::vector<int>& dilation_factors_h_w,
const TfLitePadding padding_type,
std::vector<int>* paddings,
std::vector<int>* crops) {
// Base paddings depend on padding applied to the Depthwise Conv op.
// 4-element array: {top, bottom, left, right}.
std::vector<int> base_paddings(4, 0);
if (padding_type == kTfLitePaddingSame) {
const int dilated_weights_h =
dilation_factors_h_w[0] * (weights_height - 1) + 1;
const int dilated_weights_w =
dilation_factors_h_w[1] * (weights_width - 1) + 1;
base_paddings[0] = (dilated_weights_h - 1) / 2;
base_paddings[1] = dilated_weights_h - 1 - (dilated_weights_h - 1) / 2;
base_paddings[2] = (dilated_weights_w - 1) / 2;
base_paddings[3] = dilated_weights_w - 1 - (dilated_weights_w - 1) / 2;
}
// paddings represents {pad_top, pad_bottom, pad_left, pad_right}.
paddings->resize(4, 0);
// crops represents {crop_top, crop_bottom, crop_left, crop_right}.
crops->resize(4, 0);
// Logic for computing paddings & crops follows.
// Taken from tf.required_space_to_batch_paddings, but without array
// operations since we only deal with 2 dimensions.
int pad_start_h = base_paddings[0];
int pad_start_w = base_paddings[2];
int orig_pad_end_h = base_paddings[1];
int orig_pad_end_w = base_paddings[3];
int full_input_h = input_height + pad_start_h + orig_pad_end_h;
int full_input_w = input_width + pad_start_w + orig_pad_end_w;
int pad_end_extra_h =
(dilation_factors_h_w[0] - full_input_h % dilation_factors_h_w[0]) %
dilation_factors_h_w[0];
int pad_end_extra_w =
(dilation_factors_h_w[1] - full_input_w % dilation_factors_h_w[1]) %
dilation_factors_h_w[1];
int pad_end_h = orig_pad_end_h + pad_end_extra_h;
int pad_end_w = orig_pad_end_w + pad_end_extra_w;
// Assign values.
(*paddings)[0] = pad_start_h;
(*paddings)[1] = pad_end_h;
(*paddings)[2] = pad_start_w;
(*paddings)[3] = pad_end_w;
(*crops)[0] = 0;
(*crops)[1] = pad_end_extra_h;
(*crops)[2] = 0;
(*crops)[3] = pad_end_extra_w;
}
// Computes output dimensions for the SpaceToBatchND op used in the dilated
// Depthwise Conv case.
// space_to_batch_paddings should be in format {top, bottom, left, right}.
// These are computed from the documentation for SpaceToBatchND_8's output.
void PopulateSpaceToBatchOutputDims(
int input_batch_size, int input_height_size, int input_width_size,
int input_depth_size, const std::vector<int>& dilation_factors_h_w,
const std::vector<int>& space_to_batch_paddings,
std::vector<int>* space_to_batch_output_dims) {
// Batches.
space_to_batch_output_dims->push_back(
input_batch_size * dilation_factors_h_w[0] * dilation_factors_h_w[1]);
// Height.
space_to_batch_output_dims->push_back((space_to_batch_paddings[0] +
input_height_size +
space_to_batch_paddings[1]) /
dilation_factors_h_w[0]);
// Width.
space_to_batch_output_dims->push_back((space_to_batch_paddings[2] +
input_width_size +
space_to_batch_paddings[3]) /
dilation_factors_h_w[1]);
// Depth.
space_to_batch_output_dims->push_back(input_depth_size);
}
} // namespace
TfLiteStatus Conv2dOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static std::vector<int> quant_bound_shape = {1, 1, 1, 1};
static std::vector<int> dilation_factors_shape = {1, 1, 1, 2};
static std::vector<int> paddings_shape = {1, 1, 2, 2};
// Input data tensor.
const auto& data_tensor = context->tensors[inputs->data[0]];
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
data_tensor, &data_min_, &data_max_, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* data_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&data_min_, sizeof(data_min_));
auto* data_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&data_max_, sizeof(data_max_));
// Gather information about the Convolution operations.
TfLitePadding padding_type = kTfLitePaddingUnknown;
int stride_height = 0;
int stride_width = 0;
bool is_dilated_depthwise_conv = false;
if (op_node_.op_type == OP_Supernode_8x8p32to8) {
const TfLiteConvParams* conv_params =
reinterpret_cast<const TfLiteConvParams*>(builtin_data_);
stride_height = conv_params->stride_height;
stride_width = conv_params->stride_width;
padding_type = conv_params->padding;
} else if (op_node_.op_type == OP_DepthwiseSupernode_8x8p32to8) {
const TfLiteDepthwiseConvParams* conv_params =
reinterpret_cast<const TfLiteDepthwiseConvParams*>(builtin_data_);
stride_height = conv_params->stride_height;
stride_width = conv_params->stride_width;
padding_type = conv_params->padding;
// We only support dilation for DepthwiseConv.
if (conv_params->dilation_height_factor > 1 ||
conv_params->dilation_width_factor > 1) {
is_dilated_depthwise_conv = true;
dilation_factors_h_w_.push_back(conv_params->dilation_height_factor);
dilation_factors_h_w_.push_back(conv_params->dilation_width_factor);
}
}
// Weights tensor
const auto& weights_tensor = context->tensors[inputs->data[1]];
if (weights_tensor.allocation_type != kTfLiteMmapRo) {
context->ReportError(
context, "Weights tensor doesn't have correct allocation type: %s",
weights_tensor.name);
return kTfLiteError;
}
int weights_batch_size, weights_height_size, weights_width_size,
weights_depth_size;
// Hexagon lib expects the weight tensor in HWCN, TFLite uses NHWC.
// Transpose NHWC -> HWCN
GetDims(&weights_batch_size, &weights_height_size, &weights_width_size,
&weights_depth_size, weights_tensor.dims);
weight_shape_ = {weights_height_size, weights_width_size, weights_depth_size,
weights_batch_size};
RuntimeShape nhwc_shape({weights_batch_size, weights_height_size,
weights_width_size, weights_depth_size});
RuntimeShape hwcn_shape({weights_height_size, weights_width_size,
weights_depth_size, weights_batch_size});
std::vector<uint8_t> hwcn(NumElements(&weights_tensor));
TransposeParams transpose_params;
transpose_params.perm_count = 4;
transpose_params.perm[0] = 1;
transpose_params.perm[1] = 2;
transpose_params.perm[2] = 3;
transpose_params.perm[3] = 0;
optimized_ops::Transpose<uint8_t>(transpose_params, nhwc_shape,
weights_tensor.data.uint8, hwcn_shape,
hwcn.data());
// Quantization params for Weights tensor.
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(weights_tensor, &weights_min_, &weights_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* weights_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&weights_min_, sizeof(weights_min_));
auto* weights_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&weights_max_, sizeof(weights_max_));
auto* const_weights_node = graph_builder_->AddConstNodeWithData(
weight_shape_.data(), (char*)hwcn.data(), hwcn.size() * sizeof(hwcn[0]));
graph_builder_->AddTensorWithID(inputs->data[1], const_weights_node->GetID(),
0);
// Stride node.
static int dummy = 0;
stride_shape_ = {1, stride_height, stride_width, 1};
auto* stride_node = graph_builder_->AddConstNodeWithData(
stride_shape_.data(), (char*)&dummy, sizeof(dummy));
// Output dimensions.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
// Output min/max.
// TODO(b/129276536): Add support for other activations here. Current
// implementation assumes None/Relu.
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
context->tensors[outputs->data[0]], &output_min_, &output_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* output_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&output_min_, sizeof(output_min_));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&output_max_, sizeof(output_max_));
// Bias node.
const auto& bias_tensor = context->tensors[inputs->data[2]];
auto* bias_data_node =
graph_builder_->AddConstNodeWithData(inputs->data[2], bias_tensor);
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
bias_tensor, &bias_min_, &bias_max_, std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max()));
auto* bias_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&bias_min_, sizeof(bias_min_));
auto* bias_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&bias_max_, sizeof(bias_max_));
if (is_dilated_depthwise_conv) {
// For dilated Depthwise Conv, we convert this node into SpaceToBatchND, and
// then chain Supernode & BatchToSpaceND after it.
int input_batch_size, input_height_size, input_width_size, input_depth_size;
GetDims(&input_batch_size, &input_height_size, &input_width_size,
&input_depth_size, data_tensor.dims);
ComputeSpaceToBatchParams(
input_height_size, input_width_size, weights_height_size,
weights_width_size, dilation_factors_h_w_, padding_type,
&space_to_batch_paddings_, &batch_to_space_crops_);
auto* dilation_factors_const = graph_builder_->AddConstNodeWithData(
dilation_factors_shape.data(), (char*)dilation_factors_h_w_.data(),
dilation_factors_h_w_.size() * sizeof(stride_height));
auto* paddings_const = graph_builder_->AddConstNodeWithData(
paddings_shape.data(), (char*)space_to_batch_paddings_.data(),
space_to_batch_paddings_.size() * sizeof(stride_height));
auto* crops_const = graph_builder_->AddConstNodeWithData(
paddings_shape.data(), (char*)batch_to_space_crops_.data(),
batch_to_space_crops_.size() * sizeof(stride_height));
// 1. SpaceToBatch.
SetOpType(OP_SpaceToBatchND_8);
AddInput(graph_builder_->GetHexagonTensorId(inputs->data[0]));
AddInput(TensorID(dilation_factors_const->GetID(), 0));
AddInput(TensorID(paddings_const->GetID(), 0));
AddInput(TensorID(data_min_const->GetID(), 0));
AddInput(TensorID(data_max_const->GetID(), 0));
std::vector<int> space_to_batch_output_dims;
PopulateSpaceToBatchOutputDims(
input_batch_size, input_height_size, input_width_size, input_depth_size,
dilation_factors_h_w_, space_to_batch_paddings_,
&space_to_batch_output_dims);
TensorID space_to_batch_op_out =
AddOutput(sizeof(uint8_t), 4, space_to_batch_output_dims);
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// 2. Depthwise Conv.
auto* conv_op = graph_builder_->AddNode();
conv_op->SetOpType(OP_DepthwiseSupernode_8x8p32to8);
conv_op->AddInput(space_to_batch_op_out);
conv_op->AddInput(TensorID(const_weights_node->GetID(), 0));
conv_op->AddInput(TensorID(data_min_const->GetID(), 0));
conv_op->AddInput(TensorID(data_max_const->GetID(), 0));
conv_op->AddInput(TensorID(weights_min_const->GetID(), 0));
conv_op->AddInput(TensorID(weights_max_const->GetID(), 0));
conv_op->AddInput(TensorID(stride_node->GetID(), 0));
conv_op->AddInput(TensorID(bias_data_node->GetID(), 0));
conv_op->AddInput(TensorID(bias_min_const->GetID(), 0));
conv_op->AddInput(TensorID(bias_max_const->GetID(), 0));
conv_op->AddInput(TensorID(output_min_const->GetID(), 0));
conv_op->AddInput(TensorID(output_max_const->GetID(), 0));
// The padding is handled by the SpaceToBatch/BatchToSpace ops surrounding
// this node. Hence, this op's padding remains VALID only.
// tf.nn.with_space_to_batch's docs state the following pattern:
// """
// batch_to_space_nd(
// op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
// num_spatial_dims,
// "VALID")
// adjusted_dilation_rate,
// adjusted_crops)
// """
conv_op->SetPaddingType(NN_PAD_VALID);
// These dimensions are probably a little excessive, but they upper-bound
// the possible output from DepthwiseConv.
// TODO(b/139955809): Find better bounds?
TensorID conv_output = conv_op->AddOutput(
sizeof(uint8_t), 4,
{output_batch_size * dilation_factors_h_w_[0] *
dilation_factors_h_w_[1],
output_height_size, output_width_size, output_depth_size});
conv_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
conv_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// 3. BatchToSpace.
auto* batch_to_space_op = graph_builder_->AddNode();
batch_to_space_op->SetOpType(OP_BatchToSpaceND_8);
batch_to_space_op->AddInput(conv_output);
batch_to_space_op->AddInput(TensorID(dilation_factors_const->GetID(), 0));
batch_to_space_op->AddInput(TensorID(crops_const->GetID(), 0));
batch_to_space_op->AddInput(TensorID(output_min_const->GetID(), 0));
batch_to_space_op->AddInput(TensorID(output_max_const->GetID(), 0));
node_output_ =
batch_to_space_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
batch_to_space_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
batch_to_space_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
} else {
// Standard case.
// Padding type.
if (padding_type == kTfLitePaddingSame) {
SetPaddingType(NN_PAD_SAME);
} else if (padding_type == kTfLitePaddingValid) {
SetPaddingType(NN_PAD_VALID);
}
// Inputs
AddInput(graph_builder_->GetHexagonTensorId(inputs->data[0]));
AddInput(TensorID(const_weights_node->GetID(), 0));
AddInput(TensorID(data_min_const->GetID(), 0));
AddInput(TensorID(data_max_const->GetID(), 0));
AddInput(TensorID(weights_min_const->GetID(), 0));
AddInput(TensorID(weights_max_const->GetID(), 0));
AddInput(TensorID(stride_node->GetID(), 0));
AddInput(TensorID(bias_data_node->GetID(), 0));
AddInput(TensorID(bias_min_const->GetID(), 0));
AddInput(TensorID(bias_max_const->GetID(), 0));
AddInput(TensorID(output_min_const->GetID(), 0));
AddInput(TensorID(output_max_const->GetID(), 0));
// Outputs
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
}
return kTfLiteOk;
}
TfLiteStatus Conv2dOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
Conv2dOpBuilder::~Conv2dOpBuilder() {}
OpBuilder* CreateConv2DBuilder(GraphBuilder* graph_builder, int op_type) {
return new Conv2dOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,57 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_CONV_2D_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_CONV_2D_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class Conv2dOpBuilder : public OpBuilder {
public:
explicit Conv2dOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~Conv2dOpBuilder();
private:
TensorID node_output_;
std::vector<float> transposed_weights_;
std::vector<int> stride_shape_;
std::vector<int> weight_shape_;
float data_min_, data_max_, weights_min_, weights_max_, bias_min_, bias_max_,
output_min_, output_max_;
// Only used for dilated Depthwise Conv.
std::vector<int> dilation_factors_h_w_;
std::vector<int> space_to_batch_paddings_;
std::vector<int> batch_to_space_crops_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_CONV_2D_BUILDER_H_

View File

@ -0,0 +1,83 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/l2_normalization_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus L2NormalizationOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus L2NormalizationOpBuilder::RegisterOutputs(
const TfLiteIntArray* outputs, TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
L2NormalizationOpBuilder::~L2NormalizationOpBuilder() {}
OpBuilder* CreateL2NormalizationBuilder(GraphBuilder* graph_builder,
int op_type) {
return new L2NormalizationOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,48 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_L2_NORMALIZATION_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_L2_NORMALIZATION_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class L2NormalizationOpBuilder : public OpBuilder {
public:
explicit L2NormalizationOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~L2NormalizationOpBuilder() override;
private:
TensorID node_output_;
float input_min_, input_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_L2_NORMALIZATION_BUILDER_H_

View File

@ -0,0 +1,211 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/matmul_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
// The TFLite 'Fully-connected' quantized op corresponds to the following
// subgraph in Hexagon:
// Data (uint8), Weights (const, uint8) => MatMul => MatMul out (int32)
// Bias (const, int32) => Quantize => Bias (uint8)
// MatMul out (int32) => Quantize => MatMul out (uint8)
// MatMul out (uint8), Bias (uint8) => QuantizedAdd => Output (uint8)
// TODO(b/129276536): Add activation support.
TfLiteStatus MatMulOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
// Data tensor.
int data_tensor_id = inputs->data[0];
const auto& data_tensor = context->tensors[data_tensor_id];
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
data_tensor, &data_min_, &data_max_, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* data_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&data_min_),
sizeof(data_min_));
auto* data_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&data_max_),
sizeof(data_max_));
// Weights vector.
int weights_tensor_id = inputs->data[1];
const auto& weights_tensor = context->tensors[weights_tensor_id];
// TODO(srjoglekar): Abstract out.
if (weights_tensor.allocation_type != kTfLiteMmapRo) {
context->ReportError(
context, "Weights tensor doesn't have correct allocation type: %s",
weights_tensor.name);
return kTfLiteError;
}
int batch_size, height_size, width_size, depth_size;
// Hexagon lib expects the weight tensor in NHCW, TFLite uses NHWC.
// Transpose NHWC -> NHCW
GetDims(&batch_size, &height_size, &width_size, &depth_size,
weights_tensor.dims);
weights_shape_ = {batch_size, height_size, depth_size, width_size};
RuntimeShape nhwc_shape({batch_size, height_size, width_size, depth_size});
RuntimeShape nhcw_shape({batch_size, height_size, depth_size, width_size});
std::vector<uint8_t> nhcw(NumElements(&weights_tensor));
TransposeParams transpose_params;
transpose_params.perm_count = 4;
transpose_params.perm[0] = 0;
transpose_params.perm[1] = 1;
transpose_params.perm[2] = 3;
transpose_params.perm[3] = 2;
optimized_ops::Transpose<uint8_t>(transpose_params, nhwc_shape,
weights_tensor.data.uint8, nhcw_shape,
nhcw.data());
auto* const_weights_node = graph_builder_->AddConstNodeWithData(
weights_shape_.data(), reinterpret_cast<char*>(nhcw.data()),
weights_tensor.bytes);
graph_builder_->AddTensorWithID(weights_tensor_id,
const_weights_node->GetID(), 0);
ComputeMinAndMaxQuantValues(weights_tensor, &weights_min_, &weights_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
auto* weights_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&weights_min_),
sizeof(weights_min_));
auto* weights_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&weights_max_),
sizeof(weights_max_));
// Data and weight tensors in required order.
AddInput(graph_builder_->GetHexagonTensorId(data_tensor_id));
AddInput(graph_builder_->GetHexagonTensorId(weights_tensor_id));
AddInput(TensorID(data_min_const->GetID(), 0));
AddInput(TensorID(data_max_const->GetID(), 0));
AddInput(TensorID(weights_min_const->GetID(), 0));
AddInput(TensorID(weights_max_const->GetID(), 0));
// Outputs for the MatMul node, which are in int32 format.
// Output shape should still be the same.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
const auto& matmul_out = AddOutput(sizeof(int32_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& matmul_out_min = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
const auto& matmul_out_max = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// Quantize the MatMul output to quint8.
auto* quantize_matmul_op = graph_builder_->AddNode();
quantize_matmul_op->SetOpType(OP_QuantizeDownAndShrinkRange_32to8);
quantize_matmul_op->AddInput(matmul_out);
quantize_matmul_op->AddInput(matmul_out_min);
quantize_matmul_op->AddInput(matmul_out_max);
const auto& quantized_matmul_out =
quantize_matmul_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& quantized_matmul_out_min =
quantize_matmul_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
const auto& quantized_matmul_out_max =
quantize_matmul_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// Bias tensor.
int bias_tensor_id = inputs->data[2];
const auto& bias_tensor = context->tensors[bias_tensor_id];
auto* const_bias_node =
graph_builder_->AddConstNodeWithData(bias_tensor_id, bias_tensor);
graph_builder_->AddTensorWithID(bias_tensor_id, const_bias_node->GetID(), 0);
ComputeMinAndMaxQuantValues(bias_tensor, &bias_min_, &bias_max_,
std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max());
auto* bias_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&bias_min_),
sizeof(bias_min_));
auto* bias_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&bias_max_),
sizeof(bias_max_));
// Quantize bias
auto* quantize_bias_op = graph_builder_->AddNode();
quantize_bias_op->SetOpType(OP_QuantizeDownAndShrinkRange_32to8);
quantize_bias_op->AddInput(
graph_builder_->GetHexagonTensorId(bias_tensor_id));
quantize_bias_op->AddInput(TensorID(bias_min_const->GetID(), 0));
quantize_bias_op->AddInput(TensorID(bias_max_const->GetID(), 0));
const auto& quantized_bias_out =
quantize_bias_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& quantized_bias_out_min =
quantize_bias_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
const auto& quantized_bias_out_max =
quantize_bias_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// Output min/max.
ComputeMinAndMaxQuantValues(context->tensors[outputs->data[0]], &output_min_,
&output_max_, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
auto* output_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&output_min_),
sizeof(output_min_));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&output_max_),
sizeof(output_max_));
// MatMul + Bias.
auto* bias_add_op = graph_builder_->AddNode();
bias_add_op->SetOpType(OP_QuantizedAdd_8p8to8);
bias_add_op->AddInput(quantized_matmul_out);
bias_add_op->AddInput(quantized_bias_out);
bias_add_op->AddInput(quantized_matmul_out_min);
bias_add_op->AddInput(quantized_matmul_out_max);
bias_add_op->AddInput(quantized_bias_out_min);
bias_add_op->AddInput(quantized_bias_out_max);
bias_add_op->AddInput(TensorID(output_min_const->GetID(), 0));
bias_add_op->AddInput(TensorID(output_max_const->GetID(), 0));
node_output_ = bias_add_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
bias_add_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
bias_add_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus MatMulOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
MatMulOpBuilder::~MatMulOpBuilder() {}
OpBuilder* CreateMatMulBuilder(GraphBuilder* graph_builder, int op_type) {
return new MatMulOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,51 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_MATMUL_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_MATMUL_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class MatMulOpBuilder : public OpBuilder {
public:
explicit MatMulOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~MatMulOpBuilder() override;
private:
TensorID node_output_;
std::vector<int> weights_shape_, bias_shape_;
std::vector<float> transposed_weights_;
float data_min_, data_max_, weights_min_, weights_max_, bias_min_, bias_max_,
output_min_, output_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_MATMUL_BUILDER_H_

View File

@ -0,0 +1,69 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/neg_op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus NegOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int scalar_shape[] = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
auto* input_min_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&input_min_), sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
scalar_shape, reinterpret_cast<char*>(&input_max_), sizeof(input_max_));
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus NegOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreateNegOpBuilder(GraphBuilder* graph_builder, int op_type) {
return new NegOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,44 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_NEG_OP_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_NEG_OP_BUILDER_H_
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class NegOpBuilder : public OpBuilder {
public:
explicit NegOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
private:
TensorID node_output_;
float input_min_, input_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_NEG_OP_BUILDER_H_

View File

@ -0,0 +1,207 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_factory.h"
namespace tflite {
namespace delegates {
namespace hexagon {
OpBuilder* GraphBuilder::CreateOpBuilderFromTfLiteOp(int op_type) {
switch (op_type) {
case kTfLiteBuiltinAdd:
return CreateArithmeticBuilder(this, OP_QuantizedAdd_8p8to8);
case kTfLiteBuiltinArgMax:
return CreateArgMinMaxOpBuilder(this, OP_ArgMax_8toInt32);
case kTfLiteBuiltinArgMin:
return CreateArgMinMaxOpBuilder(this, OP_ArgMin_8);
case kTfLiteBuiltinMul:
return CreateArithmeticBuilder(this, OP_QuantizedMul_8x8to8);
case kTfLiteBuiltinSub:
return CreateArithmeticBuilder(this, OP_QuantizedSub_8p8to8);
case kTfLiteBuiltinMean:
return CreateReduceBuilder(this, OP_QuantizedMean_8);
case kTfLiteBuiltinSum:
return CreateReduceBuilder(this, OP_QuantizedSum_8to32);
case kTfLiteBuiltinPad:
return CreatePadBuilder(this, OP_QuantizedPad_8);
case kTfLiteBuiltinFullyConnected:
return CreateMatMulBuilder(this, OP_QuantizedMatMul_8x8to32);
case kTfLiteBuiltinAveragePool2d:
return CreatePool2DBuilder(this, OP_QuantizedAvgPool_8);
case kTfLiteBuiltinMaxPool2d:
return CreatePool2DBuilder(this, OP_QuantizedMaxPool_8);
case kTfLiteBuiltinConcatenation:
return CreateConcatBuilder(this, OP_QuantizedConcat_8);
case kTfLiteBuiltinConv2d:
return CreateConv2DBuilder(this, OP_Supernode_8x8p32to8);
case kTfLiteBuiltinTransposeConv:
return CreateTransposeConv2DBuilder(
this, OP_QuantizedTransposeConv2d_8x8p32to8);
case kTfLiteBuiltinDepthwiseConv2d:
return CreateConv2DBuilder(this, OP_DepthwiseSupernode_8x8p32to8);
case kTfLiteBuiltinReshape:
return CreateReshapeBuilder(this, OP_Reshape);
case kTfLiteBuiltinSoftmax:
return CreateSoftmaxBuilder(this, OP_QuantizedSoftmax_8);
case kTfLiteBuiltinResizeNearestNeighbor:
return CreateResizeNearestNeighborBuilder(this,
OP_ResizeNearestNeighbor_8);
case kTfLiteBuiltinL2Normalization:
return CreateL2NormalizationBuilder(this, OP_L2Normalize_8);
case kTfLiteBuiltinRelu:
return CreateActivationBuilder(this, OP_QuantizedRelu_8);
case kTfLiteBuiltinRelu6:
return CreateActivationBuilder(this, OP_QuantizedReluX_8);
case kTfLiteBuiltinTanh:
return CreateActivationBuilder(this, OP_QuantizedTanh_8);
case kTfLiteBuiltinLogistic:
return CreateActivationBuilder(this, OP_QuantizedSigmoid_8);
case kTfLiteBuiltinSplit:
return CreateSplitBuilder(this, OP_QuantizedSplit_8);
case kTfLiteBuiltinResizeBilinear:
return CreateResizeBilinearOpBuilder(this, OP_QuantizedResizeBilinear_8);
case kTfLiteBuiltinNeg:
return CreateNegOpBuilder(this, OP_QuantizedNeg_8);
case kTfLiteBuiltinTranspose:
return CreateTransposeBuilder(this, OP_Transpose_8);
default:
context_->ReportError(context_, "Op not supported: %d", op_type);
return nullptr;
}
}
OpBuilder* GraphBuilder::AddConstNodeWithData(const int shape[], char* data,
int data_size) {
builders_.emplace_back(new OpBuilder(this, OP_Const));
builders_.back()->SetConstNode();
builders_.back()->SetNodeId(builders_.size());
int error = hexagon_nn_->hexagon_nn_append_const_node(
graph_id_, builders_.size(), shape[0], shape[1], shape[2], shape[3],
reinterpret_cast<const uint8_t*>(data), data_size);
if (error != 0) {
context_->ReportError(context_, "Error adding const node with shape id: %d",
(int)builders_.size());
return nullptr;
}
return builders_.back().get();
}
OpBuilder* GraphBuilder::AddConstNodeWithData(int tensor_id,
const TfLiteTensor& tensor) {
builders_.emplace_back(new OpBuilder(this, OP_Const));
const int node_id = builders_.size();
builders_.back()->SetConstNode();
builders_.back()->SetNodeId(node_id);
int batch_size, height_size, width_size, depth_size;
GetDims(&batch_size, &height_size, &width_size, &depth_size, tensor.dims);
int error = hexagon_nn_->hexagon_nn_append_const_node(
graph_id_, node_id, batch_size, height_size, width_size, depth_size,
reinterpret_cast<const uint8_t*>(tensor.data.raw), tensor.bytes);
if (error > 0) {
context_->ReportError(
context_, "Failed to add const node for tensor with id: %d", tensor_id);
return nullptr;
}
AddTensorWithID(tensor_id, node_id, 0);
return builders_.back().get();
}
void delegates::hexagon::GraphBuilder::AddInputTensors(
const TfLiteIntArray* input_tensors, TfLiteContext* context) {
builders_.emplace_back(new OpBuilder(this, OP_INPUT));
builders_.back()->SetNodeId(builders_.size());
// We need to track num_inputs since not all input_tensors are actual input
// data. Some are constants.
int num_inputs = 0;
for (int i = 0; i < input_tensors->size; ++i) {
const int tensor_id = input_tensors->data[i];
const auto& tensor = context->tensors[tensor_id];
if (tensor.allocation_type != kTfLiteMmapRo) {
AddTensorWithID(tensor_id, builders_.size(), num_inputs);
builders_.back()->AddOutput(tensor.dims);
++num_inputs;
}
}
}
void delegates::hexagon::GraphBuilder::AddOutputTensors(
const TfLiteIntArray* output_tensors, TfLiteContext* context) {
builders_.emplace_back(new OpBuilder(this, OP_OUTPUT));
builders_.back()->SetNodeId(builders_.size());
for (int i = 0; i < output_tensors->size; ++i) {
const int tensor_id = output_tensors->data[i];
builders_.back()->AddInput(GetHexagonTensorId(tensor_id));
}
}
OpBuilder::TensorID OpBuilder::AddOutput(const TfLiteIntArray* dims) {
op_node_.outputs.push_back(hexagon_nn_output());
op_node_.outputs.back().elementsize = sizeof(float);
op_node_.outputs.back().rank = 4;
// TODO(karimnosseir): What is a good to estimate the max size ?
int batch_size, height_size, width_size, depth_size;
GetDims(&batch_size, &height_size, &width_size, &depth_size, dims);
auto& max_sizes = op_node_.outputs.back().max_sizes;
max_sizes[0] = batch_size;
max_sizes[1] = height_size;
max_sizes[2] = width_size;
max_sizes[3] = depth_size;
return TensorID(GetID(), op_node_.outputs.size() - 1);
}
OpBuilder::TensorID OpBuilder::AddOutput(
int elementsize, int rank, const std::vector<int>& max_sizes_vect) {
op_node_.outputs.push_back(hexagon_nn_output());
op_node_.outputs.back().elementsize = elementsize;
op_node_.outputs.back().rank = rank;
auto& max_sizes = op_node_.outputs.back().max_sizes;
for (int i = 0; i < max_sizes_vect.size(); ++i) {
max_sizes[i] = max_sizes_vect[i];
}
return TensorID(GetID(), op_node_.outputs.size() - 1);
}
const OpNode* OpBuilder::Build() {
for (const auto& id : input_ids_) {
op_node_.inputs.push_back(hexagon_nn_input());
op_node_.inputs.back().src_id = id.first;
op_node_.inputs.back().output_idx = id.second;
}
return &op_node_;
}
OpBuilder* GraphBuilder::AddNode() {
OpBuilder* op = new OpBuilder(this, OP_Nop);
builders_.emplace_back(op);
op->SetNodeId(builders_.size());
return op;
}
OpBuilder* GraphBuilder::AddNodeFromTfLiteOp(int op_type, TfLiteNode* node) {
OpBuilder* op = CreateOpBuilderFromTfLiteOp(op_type);
builders_.emplace_back(op);
op->SetNodeId(builders_.size());
op->SetBuiltinData(node->builtin_data);
op->SetTfLiteNode(node);
return op;
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,277 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_OP_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_OP_BUILDER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "hexagon/hexagon_nn_ops.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_implementation.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
namespace tflite {
namespace delegates {
namespace hexagon {
struct OpNode {
std::vector<hexagon_nn_input> inputs;
std::vector<hexagon_nn_output> outputs;
// Value from the Enum of Ops in hexagon_nn_ops
int op_type;
hexagon_nn_padding_type padding_type = NN_PAD_NA;
// Id of node in the Hexagon graph.
int node_id = -1;
};
class GraphBuilder;
class OpBuilder {
public:
OpBuilder(GraphBuilder* graph_builder, int hexagon_op_type)
: graph_builder_(graph_builder) {
op_node_.op_type = hexagon_op_type;
}
// A tensor is identified in the graph using a pair of IDs
// (Node ID, output Tensor ID)
// Node producing this tensor, and the index of the tensor in this
// node output list.
using TensorID = std::pair<int, int>;
virtual ~OpBuilder() {}
// TODO(karimnosseir): Do we need to have builder pattern, or they are few not
// worth it ?
void SetOpType(int op_type) { op_node_.op_type = op_type; }
void SetNodeId(int node_id) { op_node_.node_id = node_id; }
void SetConstNode() { op_node_.op_type = OP_Const; }
void SetPaddingType(hexagon_nn_padding_type padding_type) {
op_node_.padding_type = padding_type;
}
void SetBuiltinData(void* builtin_data) { builtin_data_ = builtin_data; }
bool IsConstNode() const { return op_node_.op_type == OP_Const; }
void print() {}
const OpNode* Build();
void AddInput(const TensorID& tensor_id) { input_ids_.push_back(tensor_id); }
TensorID AddOutput(const TfLiteIntArray* dims);
TensorID AddOutput(int elementsize, int rank,
const std::vector<int>& max_sizes);
int GetID() const { return op_node_.node_id; }
int GetOpType() const { return op_node_.op_type; }
void SetTfLiteNode(const TfLiteNode* node) { tflite_node_ = node; }
virtual TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
return kTfLiteOk;
}
virtual TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
return kTfLiteOk;
}
protected:
// Helper method to fetch dimensions.
// TODO(karimnosseir): Move to a shared place.
void GetDims(int* batch_size, int* height_size, int* width_size,
int* depth_size, const TfLiteIntArray* dims) {
int* dim[] = {batch_size, height_size, width_size, depth_size};
for (int i = 0; i < 4; ++i) *(dim[i]) = 1;
for (int i = 4 - dims->size; i < 4; ++i) {
*dim[i] = dims->data[i - (4 - dims->size)];
}
}
template <typename T>
TfLiteStatus ComputeMinAndMaxQuantValues(const TfLiteTensor& tensor,
float* min, float* max, T min_value,
T max_value) {
*min = 0;
*max = 0;
const TfLiteQuantization& quant = tensor.quantization;
if (quant.type != TfLiteQuantizationType::kTfLiteAffineQuantization) {
printf("Tensor not quantized: %s\n", tensor.name);
return kTfLiteError;
}
const TfLiteAffineQuantization* params =
static_cast<const TfLiteAffineQuantization*>(quant.params);
if (params->quantized_dimension != 0) {
printf("Quantized dimensions not 0 for tensor: %s\n", tensor.name);
return kTfLiteError;
}
float scale = params->scale->data[0];
float zero_point = static_cast<float>(params->zero_point->data[0]);
*min = scale * (static_cast<float>(min_value) - zero_point);
*max = scale * (static_cast<float>(max_value) - zero_point);
return kTfLiteOk;
}
OpNode op_node_;
// inputs to the current op. Each pair identifies a single output from
// another node (node_id, output_id).
std::vector<TensorID> input_ids_;
// Pointer to the graph builder.
GraphBuilder* graph_builder_ = nullptr;
// Data needed by this node.
void* builtin_data_ = nullptr;
// TODO(karimnosseir): Currently we only use it for getting output
// size. Can we avoid passing it ?
const TfLiteNode* tflite_node_ = nullptr;
};
class GraphBuilder {
public:
GraphBuilder(const HexagonNN* hexagon_nn, TfLiteContext* context,
int graph_id)
: hexagon_nn_(hexagon_nn), context_(context), graph_id_(graph_id) {}
// Returns per OP builder. 'op_type' is the TfLite builtinOperator.
OpBuilder* AddNodeFromTfLiteOp(int op_type, TfLiteNode* node);
// Add node to the graph. The caller responsible for setting correct
// data in the Op.
OpBuilder* AddNode();
// Add const node that provides the data held by 'tensor'.
OpBuilder* AddConstNodeWithData(int tensor_id, const TfLiteTensor& tensor);
// Same as above but takes shape of the tensor that will holds the data.
OpBuilder* AddConstNodeWithData(const int shape[], char* data, int data_size);
OpBuilder* CreateOpBuilderFromTfLiteOp(int op_type);
// Construct Input node with 'input_tensors' as output.
void AddInputTensors(const TfLiteIntArray* input_tensors,
TfLiteContext* context);
// Construct Output node with 'output_tensors' as input.
void AddOutputTensors(const TfLiteIntArray* output_tensors,
TfLiteContext* context);
// Returns tensor id inside Hexagon graph.
OpBuilder::TensorID GetHexagonTensorId(int tflite_tensor_index) {
if (!HasTensor(tflite_tensor_index)) {
printf("Could not find tensor id: %d\n", tflite_tensor_index);
// Return invalid ID.
return OpBuilder::TensorID(-1, -1);
}
return tensors_[tflite_tensor_index];
}
// Return true if this tensor was added before to the graph.
bool HasTensor(int tflite_tensor_index) {
if (tensors_.size() <= tflite_tensor_index) {
return false;
}
// the first field is node ID and id = 0 is reserved
// so anything > 0 is correctly initialized.
return tensors_[tflite_tensor_index].first != 0;
}
void AddDebugNode() {}
void Build() {
for (int i = 0; i < builders_.size(); ++i) {
if (builders_[i]->IsConstNode()) {
continue;
}
const OpNode* op_node = builders_[i]->Build();
int error = hexagon_nn_->hexagon_nn_append_node(
graph_id_, op_node->node_id, op_node->op_type, op_node->padding_type,
op_node->inputs.data(), op_node->inputs.size(),
op_node->outputs.data(), op_node->outputs.size());
if (error != 0) {
printf("Error adding node: id:%d, op_type:%d\n", op_node->node_id,
op_node->op_type);
}
}
}
void print() {
printf("------------------------------\n");
std::vector<unsigned char> buf(10000);
hexagon_nn_->hexagon_nn_snpprint(graph_id_, buf.data(), buf.size());
printf("%s", buf.data());
printf("------------------------------\n");
fflush(stdout);
}
// Add new tensor mapping to the tensor list.
bool AddTensorWithID(int tflite_tensor_id, int hexagon_node_id,
int hexagon_node_output_id) {
if (HasTensor(tflite_tensor_id)) {
return false;
}
if (tensors_.size() <= tflite_tensor_id) {
tensors_.resize(tflite_tensor_id + 1);
}
tensors_[tflite_tensor_id] =
OpBuilder::TensorID(hexagon_node_id, hexagon_node_output_id);
return true;
}
int GetOpTypeId(int node_id) {
if (node_id > builders_.size()) {
return -1;
}
return builders_[node_id - 1]->GetOpType();
}
private:
// Helper method to fetch dimensions.
// TODO(karimnosseir): Move this method to shared place.
void GetDims(int* batch_size, int* height_size, int* width_size,
int* depth_size, const TfLiteIntArray* dims) {
int* dim[] = {batch_size, height_size, width_size, depth_size};
for (int i = 0; i < 4; ++i) *(dim[i]) = 1;
for (int i = 4 - dims->size; i < 4; ++i) {
*dim[i] = dims->data[i - (4 - dims->size)];
}
}
const HexagonNN* hexagon_nn_ = nullptr;
TfLiteContext* context_ = nullptr;
int graph_id_ = -1;
std::vector<std::unique_ptr<OpBuilder>> builders_;
// Index in the vector is the tflite_tensor_index, the value
// is the ID in the hexgon graph.
std::vector<OpBuilder::TensorID> tensors_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_OP_BUILDER_H_

View File

@ -0,0 +1,51 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_OP_FACTORY_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_OP_FACTORY_H_
namespace tflite {
namespace delegates {
namespace hexagon {
class GraphBuilder;
class OpBuilder;
OpBuilder* CreateArgMinMaxOpBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateActivationBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateArithmeticBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateMatMulBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateConcatBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateConv2DBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateTransposeConv2DBuilder(GraphBuilder* graph_builder,
int op_type);
OpBuilder* CreatePool2DBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateReshapeBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateSoftmaxBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateReduceBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreatePadBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateResizeNearestNeighborBuilder(GraphBuilder* graph_builder,
int op_type);
OpBuilder* CreateL2NormalizationBuilder(GraphBuilder* graph_builder,
int op_type);
OpBuilder* CreateSplitBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateResizeBilinearOpBuilder(GraphBuilder* graph_builder,
int op_type);
OpBuilder* CreateNegOpBuilder(GraphBuilder* graph_builder, int op_type);
OpBuilder* CreateTransposeBuilder(GraphBuilder* graph_builder, int op_type);
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_OP_FACTORY_H_

View File

@ -0,0 +1,97 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/pad_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus PadOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
// Min/max values for input tensor.
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Padding tensor.
tensor_id = inputs->data[1];
const auto& padding_tensor = context->tensors[tensor_id];
if (padding_tensor.allocation_type == kTfLiteMmapRo) {
// If the padding input is a constant, bake it into the Hexagon graph as a
// Const node.
auto* const_padding_node =
graph_builder_->AddConstNodeWithData(tensor_id, padding_tensor);
AddInput(TensorID(const_padding_node->GetID(), 0));
} else {
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
}
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus PadOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
PadOpBuilder::~PadOpBuilder() {}
OpBuilder* CreatePadBuilder(GraphBuilder* graph_builder, int op_type) {
return new PadOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,48 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_PAD_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_PAD_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class PadOpBuilder : public OpBuilder {
public:
explicit PadOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~PadOpBuilder() override;
private:
TensorID node_output_;
float input_min_, input_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_PAD_BUILDER_H_

View File

@ -0,0 +1,136 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/pool_2d_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus Pool2dOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static std::vector<int> quant_bound_shape = {1, 1, 1, 1};
// Input data tensor.
int tensor_id = inputs->data[0];
const auto& data_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
data_tensor, &data_min_, &data_max_, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* data_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&data_min_, sizeof(data_min_));
auto* data_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&data_max_, sizeof(data_max_));
AddInput(TensorID(data_min_const->GetID(), 0));
AddInput(TensorID(data_max_const->GetID(), 0));
const TfLitePoolParams* pool_params =
reinterpret_cast<const TfLitePoolParams*>(builtin_data_);
// Padding type.
if (pool_params->padding == kTfLitePaddingSame) {
SetPaddingType(NN_PAD_SAME);
} else if (pool_params->padding == kTfLitePaddingValid) {
SetPaddingType(NN_PAD_VALID);
}
// Pooling window (filter) width/height as inputs.
static int dummy = 0;
filter_shape_ = {1, pool_params->filter_height, pool_params->filter_width, 1};
auto* filter_node = graph_builder_->AddConstNodeWithData(
filter_shape_.data(), (char*)&dummy, sizeof(dummy));
AddInput(TensorID(filter_node->GetID(), 0));
// Stride width/height as inputs.
stride_shape_ = {1, pool_params->stride_height, pool_params->stride_width, 1};
auto* stride_node = graph_builder_->AddConstNodeWithData(
stride_shape_.data(), (char*)&dummy, sizeof(dummy));
AddInput(TensorID(stride_node->GetID(), 0));
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
if (op_node_.op_type == OP_QuantizedMaxPool_8) {
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
} else {
// Hexagon's AvgPool output has different min/max bounds than what TFLite
// expects. Therefore, we add a Requantize op to correct the ranges.
TensorID pool_out = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& pool_out_min = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
const auto& pool_out_max = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
// Output min/max for requantization.
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
context->tensors[outputs->data[0]], &output_min_, &output_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* output_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&output_min_, sizeof(output_min_));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&output_max_, sizeof(output_max_));
auto* requantize_op = graph_builder_->AddNode();
requantize_op->SetOpType(OP_Requantize_8to8);
requantize_op->AddInput(pool_out);
requantize_op->AddInput(pool_out_min);
requantize_op->AddInput(pool_out_max);
requantize_op->AddInput(TensorID(output_min_const->GetID(), 0));
requantize_op->AddInput(TensorID(output_max_const->GetID(), 0));
node_output_ =
requantize_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
requantize_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
requantize_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
}
return kTfLiteOk;
}
TfLiteStatus Pool2dOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
Pool2dOpBuilder::~Pool2dOpBuilder() {}
OpBuilder* CreatePool2DBuilder(GraphBuilder* graph_builder, int op_type) {
return new Pool2dOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,50 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_POOL_2D_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_POOL_2D_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class Pool2dOpBuilder : public OpBuilder {
public:
explicit Pool2dOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~Pool2dOpBuilder();
private:
TensorID node_output_;
std::vector<int> stride_shape_;
std::vector<int> filter_shape_;
float data_min_, data_max_, output_min_, output_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_POOL_2D_BUILDER_H_

View File

@ -0,0 +1,119 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/reduce_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus ReduceOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
// Min/max values for input tensor.
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Axes tensor should be constant.
tensor_id = inputs->data[1];
const auto& axes_tensor = context->tensors[tensor_id];
if (axes_tensor.allocation_type == kTfLiteMmapRo) {
// If the axes input is a constant, bake it into the Hexagon graph as a
// Const node.
auto* const_axes_node =
graph_builder_->AddConstNodeWithData(tensor_id, axes_tensor);
AddInput(TensorID(const_axes_node->GetID(), 0));
} else {
context->ReportError(context, "Reduction op doesn't have constant axis");
return kTfLiteError;
}
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
// Hexagon's sum-reduction outputs int32, so we shrink it down to UInt8.
if (op_node_.op_type == OP_QuantizedSum_8to32) {
const auto& reduce_out = AddOutput(sizeof(int32_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& reduce_out_min = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
const auto& reduce_out_max = AddOutput(sizeof(float), 4, {1, 1, 1, 1});
auto* quantize_output_op = graph_builder_->AddNode();
quantize_output_op->SetOpType(OP_QuantizeDownAndShrinkRange_32to8);
quantize_output_op->AddInput(reduce_out);
quantize_output_op->AddInput(reduce_out_min);
quantize_output_op->AddInput(reduce_out_max);
node_output_ =
quantize_output_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
quantize_output_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
quantize_output_op->AddOutput(sizeof(float), 4, {1, 1, 1, 1});
} else {
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
}
return kTfLiteOk;
}
TfLiteStatus ReduceOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ReduceOpBuilder::~ReduceOpBuilder() {}
OpBuilder* CreateReduceBuilder(GraphBuilder* graph_builder, int op_type) {
return new ReduceOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,48 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_REDUCE_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_REDUCE_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ReduceOpBuilder : public OpBuilder {
public:
explicit ReduceOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ReduceOpBuilder() override;
private:
TensorID node_output_;
float input_min_, input_max_, output_min_, output_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_REDUCE_BUILDER_H_

View File

@ -0,0 +1,122 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/reshape_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
namespace {
void PopulateOutputShapeFromTensor(const TfLiteTensor* shape_tensor,
std::vector<int>* output_shape) {
for (int i = 0; i < shape_tensor->dims->data[0]; ++i) {
output_shape->push_back(shape_tensor->data.i32[i]);
}
}
void PopulateShapeFromParam(const TfLiteReshapeParams* params,
std::vector<int>* output_shape) {
// The function is returned above this line if the shape tensor is usable.
// Now fallback to the shape parameter in `TfLiteReshapeParams`.
int num_dimensions = params->num_dimensions;
if (num_dimensions == 1 && params->shape[0] == 0) {
// Legacy tflite models use a shape parameter of [0] to indicate scalars,
// so adjust accordingly. TODO(b/111614235): Allow zero-sized buffers during
// toco conversion.
num_dimensions = 0;
}
for (int i = 0; i < num_dimensions; ++i) {
output_shape->push_back(params->shape[i]);
}
}
} // namespace
TfLiteStatus ReshapeOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Input data tensor.
AddInput(graph_builder_->GetHexagonTensorId(inputs->data[0]));
// Output shape.
TfLiteTensor* shape_tensor;
bool output_shape_is_dynamic = false;
if (inputs->size == 2) {
shape_tensor = &context->tensors[inputs->data[1]];
bool is_shape_tensor =
(shape_tensor->dims->size == 1 && shape_tensor->type == kTfLiteInt32);
// If tensor shape is dynamic, pass it along directly.
if (shape_tensor->allocation_type != kTfLiteMmapRo && is_shape_tensor) {
output_shape_is_dynamic = true;
AddInput(graph_builder_->GetHexagonTensorId(inputs->data[1]));
}
if (!is_shape_tensor) {
shape_tensor = nullptr;
}
}
if (!output_shape_is_dynamic) {
if (shape_tensor) {
PopulateOutputShapeFromTensor(shape_tensor, &output_shape_);
} else {
const TfLiteReshapeParams* reshape_params =
reinterpret_cast<const TfLiteReshapeParams*>(builtin_data_);
PopulateShapeFromParam(reshape_params, &output_shape_);
}
int num_elements_in_shape = static_cast<int>(output_shape_.size());
output_shape_shape_ = {1, 1, 1, num_elements_in_shape};
auto* shape_node = graph_builder_->AddConstNodeWithData(
output_shape_shape_.data(),
reinterpret_cast<char*>(output_shape_.data()),
sizeof(int) * num_elements_in_shape);
AddInput(TensorID(shape_node->GetID(), 0));
}
// Hexagon output for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
return kTfLiteOk;
}
TfLiteStatus ReshapeOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ReshapeOpBuilder::~ReshapeOpBuilder() {}
OpBuilder* CreateReshapeBuilder(GraphBuilder* graph_builder, int op_type) {
return new ReshapeOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,49 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESHAPE_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESHAPE_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ReshapeOpBuilder : public OpBuilder {
public:
explicit ReshapeOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ReshapeOpBuilder() override;
private:
TensorID node_output_;
std::vector<int> output_shape_;
std::vector<int> output_shape_shape_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESHAPE_BUILDER_H_

View File

@ -0,0 +1,106 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/resize_bilinear_builder.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus ResizeBilinearOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
if (inputs->size != 2) {
context->ReportError(context, "Expecting 2 inputs %d != 2\n", inputs->size);
return kTfLiteError;
}
// Input data tensor.
int input_tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[input_tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(input_tensor_id));
const auto& size_tensor = context->tensors[inputs->data[1]];
if (!IsConstantTensor(&size_tensor)) {
context->ReportError(context,
"Hexagon Delegate doesn't support dynamic shape.\n");
return kTfLiteError;
}
// dims tensor.
const int dims_shape[] = {1, 1, 1, 2};
std::vector<int> dims = {size_tensor.data.i32[0], size_tensor.data.i32[1]};
auto* dims_const = graph_builder_->AddConstNodeWithData(
dims_shape, reinterpret_cast<char*>(dims.data()),
sizeof(int) * dims.size());
AddInput(TensorID(dims_const->GetID(), 0));
// Input min/max
TF_LITE_ENSURE_OK(context, ComputeMinAndMaxQuantValues(
input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Align Corners
const TfLiteResizeBilinearParams* params =
reinterpret_cast<const TfLiteResizeBilinearParams*>(builtin_data_);
int align_corners = params->align_corners ? 1 : 0;
auto* align_corners_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&align_corners),
sizeof(align_corners));
AddInput(TensorID(align_corners_const->GetID(), 0));
// Output
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
auto resize_bilinear_out = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
node_output_ = resize_bilinear_out;
return kTfLiteOk;
}
TfLiteStatus ResizeBilinearOpBuilder::RegisterOutputs(
const TfLiteIntArray* outputs, TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ResizeBilinearOpBuilder::~ResizeBilinearOpBuilder() {}
OpBuilder* CreateResizeBilinearOpBuilder(GraphBuilder* graph_builder,
int op_type) {
return new ResizeBilinearOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,46 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESIZE_BILINEAR_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESIZE_BILINEAR_BUILDER_H_
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ResizeBilinearOpBuilder : public OpBuilder {
public:
explicit ResizeBilinearOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ResizeBilinearOpBuilder() override;
private:
TensorID node_output_;
float input_min_, input_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESIZE_BILINEAR_BUILDER_H_

View File

@ -0,0 +1,107 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/resize_nearest_neighbor_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus ResizeNearestNeighborOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
// Output dimensions tensor.
tensor_id = inputs->data[1];
const auto& output_dim_tensor = context->tensors[tensor_id];
if (output_dim_tensor.allocation_type == kTfLiteMmapRo) {
// If the output dimensions input is a constant, bake it into the Hexagon
// graph as a Const node.
auto* const_output_dim_node =
graph_builder_->AddConstNodeWithData(tensor_id, output_dim_tensor);
AddInput(TensorID(const_output_dim_node->GetID(), 0));
} else {
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
}
// Min/max values for input tensor.
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Align corners.
const TfLiteResizeNearestNeighborParams* params =
reinterpret_cast<const TfLiteResizeNearestNeighborParams*>(builtin_data_);
align_corners_ = params->align_corners;
auto* align_corners_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&align_corners_),
sizeof(align_corners_));
AddInput(TensorID(align_corners_const->GetID(), 0));
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus ResizeNearestNeighborOpBuilder::RegisterOutputs(
const TfLiteIntArray* outputs, TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
ResizeNearestNeighborOpBuilder::~ResizeNearestNeighborOpBuilder() {}
OpBuilder* CreateResizeNearestNeighborBuilder(GraphBuilder* graph_builder,
int op_type) {
return new ResizeNearestNeighborOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,50 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESIZE_NEAREST_NEIGHBOR_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESIZE_NEAREST_NEIGHBOR_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class ResizeNearestNeighborOpBuilder : public OpBuilder {
public:
explicit ResizeNearestNeighborOpBuilder(GraphBuilder* graph_builder,
int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~ResizeNearestNeighborOpBuilder() override;
private:
TensorID node_output_;
float input_min_, input_max_;
bool align_corners_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_RESIZE_NEAREST_NEIGHBOR_BUILDER_H_

View File

@ -0,0 +1,89 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/softmax_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus SoftmaxOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static std::vector<int> quant_bound_shape = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&input_min_, sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&input_max_, sizeof(input_max_));
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// beta value
const TfLiteSoftmaxParams* softmax_params =
reinterpret_cast<const TfLiteSoftmaxParams*>(builtin_data_);
beta_value_ = softmax_params->beta;
auto* beta_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&beta_value_, sizeof(beta_value_));
AddInput(TensorID(beta_const->GetID(), 0));
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus SoftmaxOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
SoftmaxOpBuilder::~SoftmaxOpBuilder() {}
OpBuilder* CreateSoftmaxBuilder(GraphBuilder* graph_builder, int op_type) {
return new SoftmaxOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,49 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_SOFTMAX_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_SOFTMAX_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class SoftmaxOpBuilder : public OpBuilder {
public:
explicit SoftmaxOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~SoftmaxOpBuilder();
private:
TensorID node_output_;
float beta_value_ = 1.0f;
float input_min_, input_max_, output_min_, output_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_SOFTMAX_BUILDER_H_

View File

@ -0,0 +1,107 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/split_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus SplitOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
const int input_tensor_id = inputs->data[1];
const auto& input_tensor = context->tensors[input_tensor_id];
// Axis tensor.
const int axis_tensor_id = inputs->data[0];
const auto& axis = context->tensors[axis_tensor_id];
if (axis.allocation_type != kTfLiteMmapRo) {
context->ReportError(context,
"Axis tensor doesn't have correct allocation type: %s",
axis.name);
return kTfLiteError;
}
// We pad Hexagon tensor dimensions with 1 if dims.size < 4.
// (4 - input_tensor.dims->size) helps maps the input axis value in such
// cases.
int axis_value = axis.data.i32[0] + (4 - input_tensor.dims->size);
if (axis_value < 0) {
axis_value += input_tensor.dims->size;
}
auto* input_axis_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&axis_value), sizeof(int32_t));
AddInput(TensorID(input_axis_const->GetID(), 0));
// Input data tensor & min/max.
AddInput(graph_builder_->GetHexagonTensorId(input_tensor_id));
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Output data tensors.
for (int i = 0; i < outputs->size; ++i) {
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[i]].dims);
TensorID output = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
node_outputs_.push_back(output);
}
// For Hexagon output min/max.
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus SplitOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
for (int i = 0; i < node_outputs_.size(); ++i) {
graph_builder_->AddTensorWithID(outputs->data[i], node_outputs_[i].first,
node_outputs_[i].second);
}
return kTfLiteOk;
}
SplitOpBuilder::~SplitOpBuilder() {}
OpBuilder* CreateSplitBuilder(GraphBuilder* graph_builder, int op_type) {
return new SplitOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,49 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_SPLIT_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_SPLIT_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class SplitOpBuilder : public OpBuilder {
public:
explicit SplitOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~SplitOpBuilder() override;
private:
std::vector<TensorID> node_outputs_;
float input_min_;
float input_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_SPLIT_BUILDER_H_

View File

@ -0,0 +1,85 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/transpose_builder.h"
#include <stdint.h>
#include <limits>
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus TransposeOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
static int quant_bound_shape[] = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[0];
const auto& input_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
// permutation tensor.
tensor_id = inputs->data[1];
const auto& control_tensor = context->tensors[tensor_id];
if (control_tensor.allocation_type == kTfLiteMmapRo) {
auto* const_control_tensor_node =
graph_builder_->AddConstNodeWithData(tensor_id, control_tensor);
AddInput(TensorID(const_control_tensor_node->GetID(), 0));
} else {
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
}
TF_LITE_ENSURE_STATUS(
ComputeMinAndMaxQuantValues(input_tensor, &input_min_, &input_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* input_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_min_),
sizeof(input_min_));
auto* input_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape, reinterpret_cast<char*>(&input_max_),
sizeof(input_max_));
// Min/max values for input tensor.
AddInput(TensorID(input_min_const->GetID(), 0));
AddInput(TensorID(input_max_const->GetID(), 0));
// Hexagon outputs for this node.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus TransposeOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreateTransposeBuilder(GraphBuilder* graph_builder, int op_type) {
return new TransposeOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,43 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TRANSPOSE_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TRANSPOSE_BUILDER_H_
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class TransposeOpBuilder : public OpBuilder {
public:
explicit TransposeOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
private:
TensorID node_output_;
float input_min_, input_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TRANSPOSE_BUILDER_H_

View File

@ -0,0 +1,169 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/builders/transpose_conv_2d_builder.h"
#include <stdint.h>
#include <limits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus TransposeConv2dOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
static std::vector<int> quant_bound_shape = {1, 1, 1, 1};
int tensor_id;
// Input data tensor.
tensor_id = inputs->data[2];
const auto& data_tensor = context->tensors[tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(tensor_id));
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
data_tensor, &data_min_, &data_max_, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max()));
auto* data_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&data_min_, sizeof(data_min_));
auto* data_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&data_max_, sizeof(data_max_));
// Weights tensor
tensor_id = inputs->data[1];
const auto& weights_tensor = context->tensors[tensor_id];
if (weights_tensor.allocation_type != kTfLiteMmapRo) {
context->ReportError(
context, "Weights tensor doesn't have correct allocation type: %s",
weights_tensor.name);
return kTfLiteError;
}
int filter_batch_size, filter_height_size, filter_width_size,
filter_depth_size;
GetDims(&filter_batch_size, &filter_height_size, &filter_width_size,
&filter_depth_size, weights_tensor.dims);
weight_shape_ = {filter_batch_size, filter_height_size, filter_width_size,
filter_depth_size};
auto* const_weights_node = graph_builder_->AddConstNodeWithData(
weight_shape_.data(), (char*)weights_tensor.data.raw,
weights_tensor.bytes);
graph_builder_->AddTensorWithID(tensor_id, const_weights_node->GetID(), 0);
AddInput(TensorID(const_weights_node->GetID(), 0));
ComputeMinAndMaxQuantValues(weights_tensor, &weights_min_, &weights_max_,
std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
auto* weights_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&weights_min_, sizeof(weights_min_));
auto* weights_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&weights_max_, sizeof(weights_max_));
// Min/max inputs for data & weights tensors.
AddInput(TensorID(data_min_const->GetID(), 0));
AddInput(TensorID(data_max_const->GetID(), 0));
AddInput(TensorID(weights_min_const->GetID(), 0));
AddInput(TensorID(weights_max_const->GetID(), 0));
// Output dims are required to compute padding.
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
// Hexagon TransposeConv requires an explicit padding tensor. So we compute
// the same using stride, input & output info.
const TfLiteTransposeConvParams* params =
reinterpret_cast<const TfLiteTransposeConvParams*>(builtin_data_);
int unused_output_height, unused_output_width;
TfLitePaddingValues padding = ComputePaddingHeightWidth(
params->stride_height, params->stride_width, 1, 1, output_height_size,
output_width_size, filter_height_size, filter_width_size, params->padding,
&unused_output_height, &unused_output_width);
std::vector<int> padding_tensor = {padding.height, padding.height,
padding.width, padding.width};
std::vector<int> padding_tensor_shape = {1, 1, 2, 2};
auto* padding_const = graph_builder_->AddConstNodeWithData(
padding_tensor_shape.data(), (char*)padding_tensor.data(),
(sizeof(int) * 4));
AddInput(TensorID(padding_const->GetID(), 0));
// Stride shape.
int stride_height = params->stride_height;
int stride_width = params->stride_width;
static int dummy = 0;
stride_shape_ = {1, stride_height, stride_width, 1};
auto* stride_node = graph_builder_->AddConstNodeWithData(
stride_shape_.data(), (char*)&dummy, sizeof(dummy));
AddInput(TensorID(stride_node->GetID(), 0));
// TFLite's TransposeConv doesn't have a bias input, so we just feed in 0s.
std::vector<int> bias_data(output_depth_size);
// Hexagon's conv ops require bias as a [1, 1, 1, dout] tensor.
bias_shape_ = {1, 1, 1, output_depth_size};
auto* bias_const = graph_builder_->AddConstNodeWithData(
bias_shape_.data(), (char*)bias_data.data(),
sizeof(bias_data[0]) * bias_data.size());
bias_min_ = 0;
bias_max_ = 0;
auto* bias_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&bias_min_, sizeof(bias_min_));
auto* bias_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&bias_max_, sizeof(bias_max_));
AddInput(TensorID(bias_const->GetID(), 0));
AddInput(TensorID(bias_min_const->GetID(), 0));
AddInput(TensorID(bias_max_const->GetID(), 0));
// Output min/max.
ComputeMinAndMaxQuantValues(context->tensors[outputs->data[0]], &output_min_,
&output_max_, std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max());
auto* output_min_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&output_min_, sizeof(output_min_));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
quant_bound_shape.data(), (char*)&output_max_, sizeof(output_max_));
AddInput(TensorID(output_min_const->GetID(), 0));
AddInput(TensorID(output_max_const->GetID(), 0));
// Hexagon outputs for this node.
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
AddOutput(sizeof(float), 4, {1, 1, 1, 1});
return kTfLiteOk;
}
TfLiteStatus TransposeConv2dOpBuilder::RegisterOutputs(
const TfLiteIntArray* outputs, TfLiteContext* context) {
// Should be only 1 output.
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
TransposeConv2dOpBuilder::~TransposeConv2dOpBuilder() {}
OpBuilder* CreateTransposeConv2DBuilder(GraphBuilder* graph_builder,
int op_type) {
return new TransposeConv2dOpBuilder(graph_builder, op_type);
}
} // namespace hexagon
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,53 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TRANSPOSE_CONV_2D_BUILDER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TRANSPOSE_CONV_2D_BUILDER_H_
#include <vector>
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class TransposeConv2dOpBuilder : public OpBuilder {
public:
explicit TransposeConv2dOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
~TransposeConv2dOpBuilder();
private:
TensorID node_output_;
std::vector<float> transposed_weights_;
std::vector<int> stride_shape_;
std::vector<int> weight_shape_, bias_shape_;
std::vector<int> bias_data_;
float data_min_, data_max_, weights_min_, weights_max_, bias_min_, bias_max_,
output_min_, output_max_;
};
} // namespace hexagon
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_BUILDERS_TRANSPOSE_CONV_2D_BUILDER_H_

View File

@ -0,0 +1,184 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate_kernel.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_implementation.h"
#include "tensorflow/lite/experimental/delegates/hexagon/utils.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace {
// Should be > 0. > 16 causes problems.
constexpr int kMaxHexagonGraphs = 4;
TfLiteRegistration GetHexagonKernelRegistration() {
// This is the registration for the Delegate Node that gets added to
// the TFLite graph instead of the subGraph it replaces it.
// It is treated as a an OP node. But in our case
// Init will initialize the delegate
// Invoke will run the delegate graph.
// Prepare for prearing the delegate.
// Free for any cleaning needed by the delegate.
TfLiteRegistration kernel_registration;
kernel_registration.builtin_code = kTfLiteBuiltinDelegate;
kernel_registration.custom_name = "TfLiteHexagonDelegate";
kernel_registration.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<HexagonDelegateKernel*>(buffer);
};
kernel_registration.init = [](TfLiteContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteDelegateParams* params =
reinterpret_cast<const TfLiteDelegateParams*>(buffer);
auto hexagon_kernel = std::make_unique<HexagonDelegateKernel>();
if (hexagon_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return hexagon_kernel.release();
};
kernel_registration.invoke = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
HexagonDelegateKernel* kernel =
reinterpret_cast<HexagonDelegateKernel*>(node->user_data);
if (!kernel) {
context->ReportError(context, "Hexagon Kernel was not initialized");
return kTfLiteError;
}
return kernel->Invoke(context, node);
};
kernel_registration.prepare = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
if (node->user_data == nullptr) {
context->ReportError(context, "Hexagon Kernel was not initialized");
return kTfLiteError;
}
HexagonDelegateKernel* kernel =
reinterpret_cast<HexagonDelegateKernel*>(node->user_data);
return kernel->Prepare(context, node);
};
return kernel_registration;
}
TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
// Reserve 1 element, since we need first element to be size, will be updated
// later.
std::vector<int> supported_nodes(1);
TfLiteIntArray* plan;
TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan));
TfLiteNode* node;
TfLiteRegistration* registration;
// Rudimentary mechanism to check how many Hexagon graphs we initialize.
int num_components = 1;
int last_index = -1;
for (int node_index : TfLiteIntArrayView(plan)) {
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_index, &node, &registration));
if (IsNodeSupportedByHexagon(registration, node, context)) {
// If there is a 'break' in node indices, a new subgraph (and therefore, a
// new Hexagon graph) will be created.
if (last_index != -1 && node_index != last_index + 1) {
if (num_components == kMaxHexagonGraphs) {
break;
}
++num_components;
}
supported_nodes.push_back(node_index);
last_index = node_index;
}
}
// Set first element to the number of nodes to replace.
supported_nodes[0] = supported_nodes.size() - 1;
TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
"Hexagon delegate: %d nodes delegated out of %d nodes.\n",
supported_nodes[0], plan->size);
TfLiteRegistration hexagon_kernel_registration =
GetHexagonKernelRegistration();
return context->ReplaceNodeSubsetsWithDelegateKernels(
context, hexagon_kernel_registration,
reinterpret_cast<TfLiteIntArray*>(supported_nodes.data()), delegate);
}
class HexagonDelegate : public TfLiteDelegate {
public:
explicit HexagonDelegate(const TfLiteHexagonDelegateOptions* params)
: params_(params != nullptr ? *params : TfLiteHexagonDelegateOptions()) {}
TfLiteHexagonDelegateOptions* params() { return &params_; }
bool VerifyDelegate() {
auto* hexagon_nn = HexagonNNImplementation();
if (hexagon_nn == nullptr) {
return false;
}
return hexagon_nn->hexagon_nn_is_device_supported &&
hexagon_nn->hexagon_nn_is_device_supported();
}
private:
TfLiteHexagonDelegateOptions params_;
};
TfLiteDelegate* CreateDelegate(const TfLiteHexagonDelegateOptions* params) {
TfLiteDelegate* delegate = new HexagonDelegate(params);
if (!static_cast<HexagonDelegate*>(delegate)->VerifyDelegate()) {
delete delegate;
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"Hexagon Delegate is not supported.\n");
return nullptr;
}
delegate->data_ = static_cast<HexagonDelegate*>(delegate)->params();
delegate->flags = kTfLiteDelegateFlagsNone;
delegate->Prepare = &DelegatePrepare;
delegate->CopyFromBufferHandle = nullptr;
delegate->CopyToBufferHandle = nullptr;
delegate->FreeBufferHandle = nullptr;
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"Created TensorFlow Lite delegate for Hexagon.");
return delegate;
}
} // namespace
} // namespace tflite
TfLiteDelegate* TfLiteHexagonDelegateCreate(
const TfLiteHexagonDelegateOptions* options) {
return tflite::CreateDelegate(options);
}
void TfLiteHexagonDelegateDelete(TfLiteDelegate* delegate) { delete delegate; }
void TfLiteHexagonInit() { tflite::HexagonDelegateKernel::InitState(); }
void TfLiteHexagonInitWithPath(const char* lib_directory_path) {
if (lib_directory_path != nullptr) {
std::string env_var_value = lib_directory_path;
env_var_value += ";/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp";
setenv("ADSP_LIBRARY_PATH", env_var_value.c_str(), 1 /* overwrite */);
}
tflite::HexagonDelegateKernel::InitState();
}
void TfLiteHexagonTearDown() { tflite::HexagonDelegateKernel::Teardown(); }

View File

@ -0,0 +1,81 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_DELEGATE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_DELEGATE_H_
#include "tensorflow/lite/c/common.h"
#ifdef SWIG
#define TFL_CAPI_EXPORT
#else
#if defined(_WIN32)
#ifdef TFL_COMPILE_LIBRARY
#define TFL_CAPI_EXPORT __declspec(dllexport)
#else
#define TFL_CAPI_EXPORT __declspec(dllimport)
#endif // TFL_COMPILE_LIBRARY
#else
#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#endif // SWIG
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
struct TFL_CAPI_EXPORT TfLiteHexagonDelegateOptions {
// This corresponds to the debug level in the hexagon SDK. 0 (default)
// means no debug.
int debug_level;
// This corresponds to powersave_level in the hexagon SDK.
// where 0 (default) means high performance which means more power
// consumption.
int powersave_level;
// If set to true, performance information about the graph will be dumped
// to Standard output, this includes cpu cycles.
// WARNING: Experimental and subject to change anytime.
bool print_graph_profile;
// If set to true, graph structure will be dumped to Standard output.
// This is usually beneficial to see what actual nodes executed on
// the DSP. Combining with 'debug_level' more information will be printed.
// WARNING: Experimental and subject to change anytime.
bool print_graph_debug;
};
// Return a delegate that uses Hexagon SDK for ops execution.
// Must outlive the interpreter.
TfLiteDelegate* TFL_CAPI_EXPORT
TfLiteHexagonDelegateCreate(const TfLiteHexagonDelegateOptions* options);
// Do any needed cleanup and delete 'delegate'.
void TFL_CAPI_EXPORT TfLiteHexagonDelegateDelete(TfLiteDelegate* delegate);
// Initializes the DSP connection.
// This should be called before doing any usage of the delegate.
// "lib_directory_path": Path to the directory which holds the
// shared libraries for the Hexagon NN libraries on the device.
void TFL_CAPI_EXPORT TfLiteHexagonInitWithPath(const char* lib_directory_path);
// Same as above method but doesn't accept the path params.
// Assumes the environment setup is already done. Only initialize Hexagon.
void TFL_CAPI_EXPORT TfLiteHexagonInit();
// Clean up and switch off the DSP connection.
// This should be called after all processing is done and delegate is deleted.
void TFL_CAPI_EXPORT TfLiteHexagonTearDown();
#ifdef __cplusplus
}
#endif // __cplusplus
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_DELEGATE_H_

View File

@ -0,0 +1,357 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate_kernel.h"
#include <algorithm>
#include <vector>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_implementation.h"
#include "tensorflow/lite/experimental/delegates/hexagon/utils.h"
namespace tflite {
namespace {
inline const char* StateToString(
HexagonDelegateKernel::HexagonKernelState state) {
switch (state) {
case HexagonDelegateKernel::HexagonKernelState::HEALTHY:
return "HEALTHY";
case HexagonDelegateKernel::HexagonKernelState::FAST_RPC_SETUP_FAILED:
return "FAST_RPC_SETUP_FAILED";
case HexagonDelegateKernel::HexagonKernelState::FAILED_TO_INIT_GRAPH:
return "FAILED_TO_INIT_GRAPH";
case HexagonDelegateKernel::HexagonKernelState::FAILED_TO_PREPARE_GRAPH:
return "FAILED_TO_PREPARE_GRAPH";
case HexagonDelegateKernel::HexagonKernelState::MULTIPLE_INPUTS:
return "MULTIPLE_INPUTS";
case HexagonDelegateKernel::HexagonKernelState::INPUT_RANK_NOT_SUPPORTED:
return "INPUT_RANK_NOT_SUPPORTED";
case HexagonDelegateKernel::HexagonKernelState::MULTIPLE_OUTPUTS:
return "MULTIPLE_OUTPUTS";
case HexagonDelegateKernel::HexagonKernelState::FAILED_TO_EXECUTE_GRAPH:
return "FAILED_TO_EXECUTE_GRAPH";
}
}
// Returns uint64 representing total cycles in 'perf_info' by
// combining lo and hi counters.
inline uint64_t GetCycles(const hexagon_nn_perfinfo& perf_info) {
uint64_t res = perf_info.counter_hi;
res <<= 32;
res |= perf_info.counter_lo;
return res;
}
// Comparator for hexagon_nn_perfinfo in descending order based on
// total cycles consumed.
struct PerfInfoCmp {
bool operator()(const hexagon_nn_perfinfo& a,
const hexagon_nn_perfinfo& b) const {
return GetCycles(a) > GetCycles(b);
}
};
} // namespace
void HexagonDelegateKernel::ReportError(TfLiteContext* context,
HexagonKernelState state,
const std::string& msg) {
PrintLog();
context->ReportError(context, "Failed: %s. STATE: %s", msg.c_str(),
StateToString(state));
}
TfLiteStatus HexagonDelegateKernel::Init(TfLiteContext* context,
const TfLiteDelegateParams* params) {
hexagon_nn_ = HexagonNNImplementation();
if (hexagon_nn_ == nullptr) {
context->ReportError(context, "Hexagon interface not available.");
return kTfLiteError;
}
if (params != nullptr && params->delegate != nullptr) {
const ::TfLiteHexagonDelegateOptions* options_ptr =
reinterpret_cast<const ::TfLiteHexagonDelegateOptions*>(
params->delegate->data_);
params_ = (options_ptr == nullptr ? ::TfLiteHexagonDelegateOptions()
: *options_ptr);
}
// Ensure Hexagon NNLib is ready to start working.
int error = hexagon_nn_->hexagon_nn_config();
if (error != 0) {
context->ReportError(context, "hexagon_nn_config failed. Error: %d", error);
return kTfLiteError;
}
// Initialize an empty graph.
error = hexagon_nn_->hexagon_nn_init(&graph_id_);
if (error != 0) {
state_ = HexagonKernelState::FAILED_TO_INIT_GRAPH;
ReportError(context, state_, "failed to init");
return kTfLiteError;
}
error =
hexagon_nn_->hexagon_nn_set_debug_level(graph_id_, params_.debug_level);
if (error != 0) {
context->ReportError(context, "Failed to set debug level, error: %d",
error);
return kTfLiteError;
}
error = hexagon_nn_->hexagon_nn_set_powersave_level(params_.powersave_level);
if (error != 0) {
context->ReportError(context, "Failed to set powersave level, error %d",
error);
return kTfLiteError;
}
for (auto node_index : TfLiteIntArrayView(params->nodes_to_replace)) {
nodes_.push_back(node_index);
}
TF_LITE_ENSURE_STATUS(
BuildGraph(context, params->input_tensors, params->output_tensors));
return kTfLiteOk;
}
TfLiteStatus HexagonDelegateKernel::Invoke(TfLiteContext* context,
TfLiteNode* node) {
if (hexagon_nn_ == nullptr) {
context->ReportError(context, "Hexagon interface not available.");
return kTfLiteError;
}
// Allocate inputs.
std::vector<hexagon_nn_tensordef> input_tensors;
for (auto tensor_index : TfLiteIntArrayView(node->inputs)) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
}
TfLiteTensor* tensor = &context->tensors[tensor_index];
// Const tensors should be added as const nodes during graph construction.
if (tensor->allocation_type != kTfLiteMmapRo) {
if (tensor->dims->size > 4) {
ReportError(context, HexagonKernelState::INPUT_RANK_NOT_SUPPORTED,
"Only up to 4d tensor are supported.");
return kTfLiteError;
}
input_tensors.emplace_back();
auto& input_tensor = input_tensors.back();
input_tensor.data = reinterpret_cast<unsigned char*>(tensor->data.raw);
input_tensor.dataLen = tensor->bytes;
input_tensor.data_valid_len = tensor->bytes;
TF_LITE_ENSURE_STATUS(
Get4DShape(&input_tensor.batches, &input_tensor.height,
&input_tensor.width, &input_tensor.depth, tensor->dims));
}
}
// Allocate outputs.
std::vector<hexagon_nn_tensordef> output_tensors;
for (auto tensor_index : TfLiteIntArrayView(node->outputs)) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
}
TfLiteTensor* tensor = &context->tensors[tensor_index];
if (tensor->allocation_type != kTfLiteMmapRo) {
if (tensor->dims->size > 4) {
ReportError(context, HexagonKernelState::INPUT_RANK_NOT_SUPPORTED,
"Only up to 4d tensor are supported.");
return kTfLiteError;
}
output_tensors.emplace_back();
auto& output_tensor = output_tensors.back();
output_tensor.data = reinterpret_cast<unsigned char*>(tensor->data.raw);
output_tensor.dataLen = tensor->bytes;
}
}
if (params_.print_graph_profile) {
hexagon_nn_->hexagon_nn_reset_perfinfo(graph_id_, 0);
}
// Execute.
int error = hexagon_nn_->hexagon_nn_execute_new(
graph_id_, input_tensors.data(), input_tensors.size(),
output_tensors.data(), output_tensors.size());
if (error != 0) {
ReportError(context, HexagonKernelState::FAILED_TO_EXECUTE_GRAPH,
"Failed to execute graph.");
return kTfLiteError;
}
if (params_.print_graph_profile) {
PrintPerformanceData();
}
return kTfLiteOk;
}
TfLiteStatus HexagonDelegateKernel::Prepare(TfLiteContext* context,
TfLiteNode* node) {
if (hexagon_nn_ == nullptr) {
context->ReportError(context, "Hexagon interface not available. prepare");
return kTfLiteError;
}
int status = hexagon_nn_->hexagon_nn_prepare(graph_id_);
if (status != 0) {
state_ = HexagonKernelState::FAILED_TO_PREPARE_GRAPH;
ReportError(context, state_, "Failed to prepare graph.\n");
return kTfLiteError;
}
// Check input/output tensors.
std::vector<int> tensors;
for (auto tensor_index : TfLiteIntArrayView(node->inputs)) {
tensors.push_back(tensor_index);
}
for (auto tensor_index : TfLiteIntArrayView(node->outputs)) {
tensors.push_back(tensor_index);
}
for (auto tensor_index : tensors) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
}
TfLiteTensor* tensor = &context->tensors[tensor_index];
// Const tensors should be added as const nodes during graph construction.
if (tensor->allocation_type != kTfLiteMmapRo && tensor->dims->size > 4) {
ReportError(context, HexagonKernelState::INPUT_RANK_NOT_SUPPORTED,
"Only up to 4d tensor are supported.");
return kTfLiteError;
}
}
if (params_.print_graph_debug) {
PrintDebuggingGraph();
}
return kTfLiteOk;
}
TfLiteStatus HexagonDelegateKernel::BuildGraph(
TfLiteContext* context, const TfLiteIntArray* input_tensors,
const TfLiteIntArray* output_tensors) {
builder_.reset(
new delegates::hexagon::GraphBuilder(hexagon_nn_, context, graph_id_));
// Add inputs to the graph.
builder_->AddInputTensors(input_tensors, context);
// Add all ops.
TfLiteNode* node;
TfLiteRegistration* reg;
for (int node_index : nodes_) {
TF_LITE_ENSURE_STATUS(
context->GetNodeAndRegistration(context, node_index, &node, &reg));
auto* op_builder = builder_->AddNodeFromTfLiteOp(reg->builtin_code, node);
TF_LITE_ENSURE_STATUS(
op_builder->PopulateSubGraph(node->inputs, node->outputs, context));
TF_LITE_ENSURE_STATUS(op_builder->RegisterOutputs(node->outputs, context));
}
// Add Outputs.
builder_->AddOutputTensors(output_tensors, context);
builder_->Build();
return kTfLiteOk;
}
HexagonDelegateKernel::~HexagonDelegateKernel() {
if (graph_id_ != -1) {
hexagon_nn_->hexagon_nn_teardown(graph_id_);
}
}
void HexagonDelegateKernel::PrintLog() {
std::vector<unsigned char> buf(3000000);
time_t my_time = time(nullptr);
hexagon_nn_->hexagon_nn_getlog(graph_id_, buf.data(), buf.size());
printf("----------------\n");
printf("Timestamp: %s\n\n", ctime(&my_time));
printf("Log\n%s\n", buf.data());
printf("----------------\n");
fflush(stdout);
}
void HexagonDelegateKernel::PrintPerformanceData() {
const int kMaxNodes = 2048;
const int kMaxNameLen = 100;
std::vector<hexagon_nn_perfinfo> perf_data(kMaxNodes);
std::vector<char> op_name(kMaxNameLen);
uint64_t total_cycles = 0;
uint64_t cum_cycles = 0;
uint64_t counter = 0;
unsigned int num_nodes;
printf("------- Performance Debug Data Start -------\n");
if (hexagon_nn_->hexagon_nn_get_perfinfo(graph_id_, perf_data.data(),
kMaxNodes, &num_nodes) != 0) {
printf("Failed fetching perf data.\n");
return;
}
printf("Total %d nodes.\n", num_nodes);
std::sort(perf_data.begin(), perf_data.begin() + num_nodes, PerfInfoCmp());
for (int i = 0; i < num_nodes; i++) {
total_cycles += GetCycles(perf_data[i]);
}
printf("Total %lu cycles\n", static_cast<unsigned long>(total_cycles));
printf(
"Node ID,\tOP Name,\tExecutions,\tCycles,\t%% of total,\tCummulative "
"cycles,\tCummulative %%\n");
for (int i = 0; i < num_nodes; i++) {
counter = GetCycles(perf_data[i]);
cum_cycles += counter;
int op_type_id = builder_->GetOpTypeId(perf_data[i].node_id);
if (op_type_id >= 0 && hexagon_nn_->hexagon_nn_op_id_to_name(
op_type_id, op_name.data(), kMaxNameLen) != 0) {
printf("Failed to fetch name for %u with type %d\n", perf_data[i].node_id,
op_type_id);
continue;
}
printf("0x%x,\t%s,\t%d,\t%lu,\t%f %%,\t%lu,\t%f %%\n", perf_data[i].node_id,
(op_type_id < 0 ? "" : op_name.data()), perf_data[i].executions,
static_cast<unsigned long>(counter),
100.0 * (1.0 * counter / total_cycles),
static_cast<unsigned long>(cum_cycles),
100.0 * (1.0 * cum_cycles / total_cycles));
}
printf("------- Performance Debug Data End -------\n");
}
void HexagonDelegateKernel::PrintDebuggingGraph() {
const int kMaxBufLen = 100000;
std::vector<unsigned char> buf(kMaxBufLen);
if (hexagon_nn_->hexagon_nn_snpprint(graph_id_, buf.data(), kMaxBufLen) !=
0) {
printf("Error fetching graph debug details.\n");
return;
}
printf("------- Graph Debugging Start -------\n");
printf("%s\n", buf.data());
printf("------- Graph Debugging End -------\n");
}
void HexagonDelegateKernel::Teardown() {
auto* hexagon_nn = HexagonNNImplementation();
if (hexagon_nn != nullptr) {
hexagon_nn->hexagon_nn_global_teardown();
}
}
void HexagonDelegateKernel::InitState() {
auto* hexagon_nn = HexagonNNImplementation();
if (hexagon_nn != nullptr) {
hexagon_nn->hexagon_nn_global_init();
}
}
} // namespace tflite

View File

@ -0,0 +1,100 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_DELEGATE_KERNEL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_DELEGATE_KERNEL_H_
#include <time.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "hexagon/hexagon_nn_ops.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/delegates/hexagon/builders/op_builder.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_implementation.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
// Represents an abstraction of a Hexagon NNLib graph with functionality to
// initialize, prepare and invoke it based on the TFLite subgraph to be
// delegated.
class HexagonDelegateKernel {
public:
enum class HexagonKernelState {
HEALTHY = 0,
FAST_RPC_SETUP_FAILED = 1,
FAILED_TO_INIT_GRAPH = 2,
FAILED_TO_PREPARE_GRAPH = 3,
MULTIPLE_INPUTS = 4,
INPUT_RANK_NOT_SUPPORTED = 5,
MULTIPLE_OUTPUTS = 6,
FAILED_TO_EXECUTE_GRAPH = 7,
};
// Initialize the Hexagon graph and add required nodes.
TfLiteStatus Init(TfLiteContext* context, const TfLiteDelegateParams* params);
// Prepare the Hexagon graph with hexagon_nn_prepare.
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node);
// Allocate Hexagon tensordefs for graph I/O & execute it.
TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node);
~HexagonDelegateKernel();
// Sets the environment required for Hexagon execution: DSP attributes,
// rpcmem, etc.
static void InitState();
// Teardown the environment initialized in InitState.
static void Teardown();
private:
// Builds the Hexagon graph based on delegated TFLite subgraph.
TfLiteStatus BuildGraph(TfLiteContext* context,
const TfLiteIntArray* input_tensors,
const TfLiteIntArray* output_tensors);
void ReportError(TfLiteContext* context, HexagonKernelState state,
const std::string& msg);
void PrintLog();
// Prints performance information about the graph including cycles per node.
void PrintPerformanceData();
// Print debugging information about the graph constructed.
// Amount of information can be increased with debug level.
void PrintDebuggingGraph();
HexagonKernelState state_ = HexagonKernelState::HEALTHY;
const HexagonNN* hexagon_nn_ = nullptr; // Not owned.
std::unique_ptr<delegates::hexagon::GraphBuilder> builder_;
hexagon_nn_nn_id graph_id_ = -1;
// Indices of nodes in the delegated TfLite subgraph.
std::vector<int> nodes_;
::TfLiteHexagonDelegateOptions params_;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_DELEGATE_KERNEL_H_

View File

@ -0,0 +1,89 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_implementation.h"
#include <dlfcn.h>
#include <fcntl.h>
#include <cstdio>
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn_interface.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace {
void* LoadFunction(void* dl_handle, const char* name) {
TFLITE_DCHECK(dl_handle != nullptr);
auto* func_pt = dlsym(dl_handle, name);
if (func_pt == nullptr) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Function %s is NULL", name);
}
return func_pt;
}
#define LOAD_FUNCTION(dl_handle, method_name, hexagon_obj) \
hexagon_obj.method_name = reinterpret_cast<method_name##_fn*>( \
LoadFunction(dl_handle, #method_name)); \
if ((hexagon_obj.method_name) == nullptr) { \
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "%s is NULL", (#method_name)); \
return hexagon_obj; \
}
HexagonNN CreateNewHexagonInterface() {
HexagonNN hexagon_nn;
void* libhexagon_interface =
dlopen("libhexagon_interface.so", RTLD_LAZY | RTLD_LOCAL);
if (libhexagon_interface == nullptr) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Failed to load libhexagon_interface.so");
return hexagon_nn;
}
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_config, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_init, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_prepare, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_set_powersave_level,
hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_set_debug_level, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_append_node, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_append_const_node, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_execute, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_execute_new, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_teardown, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_snpprint, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_getlog, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_get_perfinfo, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_reset_perfinfo, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_op_id_to_name, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_global_teardown, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_global_init, hexagon_nn);
LOAD_FUNCTION(libhexagon_interface, hexagon_nn_is_device_supported,
hexagon_nn);
hexagon_nn.interface_loaded = true;
return hexagon_nn;
}
} // namespace
const HexagonNN* HexagonNNImplementation() {
static HexagonNN hexagon_nn = CreateNewHexagonInterface();
if (!hexagon_nn.interface_loaded) {
return nullptr;
}
return &hexagon_nn;
}
} // namespace tflite

View File

@ -0,0 +1,137 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_IMPLEMENTATION_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_IMPLEMENTATION_H_
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn_interface.h"
namespace tflite {
// Holds the methods to use to Construct/Execute NN graph using Hexagon NNLib.
struct HexagonNN {
// Call this function before creating a graph. It allows the environment on
// the DSP to configure some settings.
hexagon_nn_config_fn* hexagon_nn_config;
// Creates a new graph and returns an identifier to refer to the new graph.
// After a graph is
// initialized, nodes can be added to it.
// The returned graph is empty and cannot be executed until all nodes have
// been added and the graph is finalized with hexagon_nn_prepare(). Multiple
// graphs can be created and can be kept alive in the DSP environment
// simultaneously.
hexagon_nn_init_fn* hexagon_nn_init;
// Provides a simple parameter between 0 and 255 to control the power saving
// mode.
// A level of 255 indicates that preference should be given to minimizing
// power consumption. A level of 0 indicates that preference should be given
// to executing as fast as possible.
//
// Returns 0 on success, otherwise failure.
hexagon_nn_set_powersave_level_fn* hexagon_nn_set_powersave_level;
// Changes the debug verbosity level for messages.
hexagon_nn_set_debug_level_fn* hexagon_nn_set_debug_level;
// Prepares a network for execution.
// This function is required after all the nodes have been appended and before
// execution.
// This call provides a hook where memory can be allocated, data
// can be rearranged, inputs and outputs can be linked up, and things in the
// graph can be optimized.
// Once a network has been prepared, it can no longer
// be appended to, but it can be executed.
//
// Returns 0 on success, otherwise failure.
hexagon_nn_prepare_fn* hexagon_nn_prepare;
// Adds an ordinary (non-constant) node to the graph.
// Non-constant nodes can have zero or more inputs and zero or more outputs.
// An input is described as a source node ID as well as an output index to
// refer to which one of several outputs a node may have.
// An output is described with a maximum size. The true size of an output can
// be computed dynamically, but the caller must define the maximum amount of
// data storage required by the output during node creation.
//
// Returns 0 on success, otherwise failure.
hexagon_nn_append_node_fn* hexagon_nn_append_node;
// Adds constant nodes to a graph.
// Constant nodes produce a single output that can be connected to one graph
// node input. Unique node_ids are required for referencing nodes when
// connecting the graph (for example, specifying which outputs of earlier
// nodes will be used as inputs to particular subsequent nodes). Node_ids are
// selected by the caller, but node_id=0 and node_id>0xF0000000 are reserved.
// Node_ids must be unique.
// *** NOTE: On SDM835 and older targets,
// hexagon_nn_append_const_node() will not work properly for arrays larger
// than 32 MB. Instead, use hexagon_nn_append_empty_const_node_large_array(),
// which expects the same arguments.
//
// Returns 0 on success, otherwise failure.
hexagon_nn_append_const_node_fn* hexagon_nn_append_const_node;
// Executes a network, with provided input data and returning output data.
// Execution will fail if the network has not been prepared.
// Input is provided to the INPUT node, and output is returned from the OUTPUT
// node.
//
// Returns 0 on success, otherwise failure.
hexagon_nn_execute_fn* hexagon_nn_execute;
// Newer version of hexagon_nn_execute that utilizes hexagon_nn_tensordefs to
// represent inputs & outputs. Executes a network with provided input tensors
// and returns output tensors. Execution will fail if the network has not
// been prepared.
//
// Returns 0 on success, otherwise failure.
hexagon_nn_execute_new_fn* hexagon_nn_execute_new;
// Tears down and frees an NN graph. This can be done at any time after
// hexagon_nn_init(). After this function has been invoked, the nn_id id is
// invalid.
//
// Returns 0 on success, otherwise failure.
hexagon_nn_teardown_fn* hexagon_nn_teardown;
hexagon_nn_snpprint_fn* hexagon_nn_snpprint;
hexagon_nn_getlog_fn* hexagon_nn_getlog;
hexagon_nn_get_perfinfo_fn* hexagon_nn_get_perfinfo;
hexagon_nn_reset_perfinfo_fn* hexagon_nn_reset_perfinfo;
hexagon_nn_op_id_to_name_fn* hexagon_nn_op_id_to_name;
// Should be called once to shutdown DSP and cleanup.
hexagon_nn_global_teardown_fn* hexagon_nn_global_teardown;
// Should be called once to initialize DSP.
hexagon_nn_global_init_fn* hexagon_nn_global_init;
// Returns true if the device SoC is supported by hexagon library. False
// Otherwise.
hexagon_nn_is_device_supported_fn* hexagon_nn_is_device_supported;
bool interface_loaded = false;
};
// Returns an instance of HexagonNN.
const HexagonNN* HexagonNNImplementation();
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_IMPLEMENTATION_H_

View File

@ -0,0 +1,31 @@
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
package(default_visibility = [
"//visibility:public",
])
licenses(["notice"]) # Apache 2.0
cc_library(
name = "hexagon_nn_header",
hdrs = [
"hexagon_nn.h",
"hexagon_nn_init.h",
],
deps = [
"@hexagon_nn//:hexagon_nn_header",
],
)

View File

@ -0,0 +1,21 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_HEXAGON_NN_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_HEXAGON_NN_H_
#include "hexagon/hexagon_nn.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn_init.h"
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_HEXAGON_NN_H_

View File

@ -0,0 +1,27 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_HEXAGON_NN_INIT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_HEXAGON_NN_INIT_H_
#ifdef __cplusplus
extern "C" {
#endif
void hexagon_nn_global_teardown(void);
void hexagon_nn_global_init(void);
bool hexagon_nn_is_device_supported();
#ifdef __cplusplus
}
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_HEXAGON_NN_INIT_H_

View File

@ -0,0 +1,25 @@
VERS_1.0 {
global:
hexagon_nn_config;
hexagon_nn_init;
hexagon_nn_prepare;
hexagon_nn_set_powersave_level;
hexagon_nn_set_debug_level;
hexagon_nn_append_node;
hexagon_nn_append_const_node;
hexagon_nn_execute;
hexagon_nn_execute_new;
hexagon_nn_teardown;
hexagon_nn_snpprint;
hexagon_nn_getlog;
hexagon_nn_get_perfinfo;
hexagon_nn_reset_perfinfo;
hexagon_nn_op_id_to_name;
hexagon_nn_global_teardown;
hexagon_nn_global_init;
hexagon_nn_is_device_supported;
# Hide everything else.
local:
*;
};

View File

@ -0,0 +1,57 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_INTERFACE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_INTERFACE_H_
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/hexagon_nn.h"
using hexagon_nn_config_fn = decltype(hexagon_nn_config);
using hexagon_nn_init_fn = decltype(hexagon_nn_init);
using hexagon_nn_set_powersave_level_fn =
decltype(hexagon_nn_set_powersave_level);
using hexagon_nn_set_debug_level_fn = decltype(hexagon_nn_set_debug_level);
using hexagon_nn_prepare_fn = decltype(hexagon_nn_prepare);
using hexagon_nn_append_node_fn = decltype(hexagon_nn_append_node);
using hexagon_nn_append_const_node_fn = decltype(hexagon_nn_append_const_node);
using hexagon_nn_execute_fn = decltype(hexagon_nn_execute);
using hexagon_nn_execute_new_fn = decltype(hexagon_nn_execute_new);
using hexagon_nn_teardown_fn = decltype(hexagon_nn_teardown);
using hexagon_nn_snpprint_fn = decltype(hexagon_nn_snpprint);
using hexagon_nn_getlog_fn = decltype(hexagon_nn_getlog);
using hexagon_nn_get_perfinfo_fn = decltype(hexagon_nn_get_perfinfo);
using hexagon_nn_reset_perfinfo_fn = decltype(hexagon_nn_reset_perfinfo);
using hexagon_nn_op_id_to_name_fn = decltype(hexagon_nn_op_id_to_name);
using hexagon_nn_global_teardown_fn = decltype(hexagon_nn_global_teardown);
using hexagon_nn_global_init_fn = decltype(hexagon_nn_global_init);
using hexagon_nn_is_device_supported_fn =
decltype(hexagon_nn_is_device_supported);
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_HEXAGON_NN_INTERFACE_H_

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.tensorflow.lite">
<uses-sdk
android:targetSdkVersion="19" />
<application />
</manifest>

View File

@ -0,0 +1,62 @@
load("@build_bazel_rules_android//android:rules.bzl", "android_library")
load("//tensorflow/lite:build_def.bzl", "tflite_jni_binary")
load("//tensorflow/lite/java:aar_with_jni.bzl", "aar_with_jni")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
)
# EXPERIMENTAL: Native target that runs inference on the Hexagon backend.
# The Hexagon backend-related targets are intentionally not included in BUILD.bazel.
tflite_jni_binary(
name = "libtensorflowlite_hexagon_jni.so",
linkscript = "//tensorflow/lite/experimental/delegates/hexagon:version_script.lds",
tags = [
"manual",
"nobuilder",
"notap",
],
deps = [
"//tensorflow/lite/experimental/delegates/hexagon/java/src/main/native",
],
)
cc_library(
name = "tensorflowlite_hexagon",
srcs = [
"libtensorflowlite_hexagon_jni.so",
] + select({
"//tensorflow:android_arm64": ["@hexagon_nn//:hexagon/arm64-v8a/libhexagon_interface.so"],
"//tensorflow:android_arm": ["@hexagon_nn//:hexagon/armeabi-v7a/libhexagon_interface.so"],
"//conditions:default": [],
}),
tags = [
"manual",
"nobuilder",
"notap",
],
)
android_library(
name = "tensorflowlite_java_hexagon",
srcs = ["//tensorflow/lite/experimental/delegates/hexagon/java/src/main/java/org/tensorflow/lite/experimental:hexagon_delegate"],
manifest = "AndroidManifest.xml",
proguard_specs = ["proguard.flags"],
tags = [
"manual",
"nobuilder",
"notap",
],
deps = [
":tensorflowlite_hexagon",
"//tensorflow/lite/java:tensorflowlite_java",
"@org_checkerframework_qual",
],
)
aar_with_jni(
name = "tensorflow-lite-hexagon",
android_library = ":tensorflowlite_java_hexagon",
headers = ["//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate.h"],
)

View File

@ -0,0 +1,3 @@
-keepclassmembers class org.tensorflow.lite.NativeInterpreterWrapper {
private long inferenceDurationNanoseconds;
}

View File

@ -0,0 +1,7 @@
licenses(["notice"]) # Apache 2.0
filegroup(
name = "hexagon_delegate",
srcs = ["HexagonDelegate.java"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,69 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite.experimental;
import android.content.Context;
import java.io.Closeable;
import org.tensorflow.lite.Delegate;
/** {@link Delegate} for Hexagon inference. */
public class HexagonDelegate implements Delegate, Closeable {
private static final long INVALID_DELEGATE_HANDLE = 0;
private static final String TFLITE_HEXAGON_LIB = "tensorflowlite_hexagon_jni";
private long delegateHandle;
/*
* Creates a new HexagonDelegate object given the current 'context'.
* Throws UnsupportedOperationException if Hexagon DSP delegation is not available
* on this device.
*/
public HexagonDelegate(Context context) throws UnsupportedOperationException {
setAdspLibraryPath(context.getApplicationInfo().nativeLibraryDir);
delegateHandle = createDelegate();
if (delegateHandle == INVALID_DELEGATE_HANDLE) {
throw new UnsupportedOperationException("This Device doesn't support Hexagon DSP execution.");
}
}
@Override
public long getNativeHandle() {
return delegateHandle;
}
/**
* Frees TFLite resources in C runtime.
*
* <p>User is expected to call this method explicitly.
*/
@Override
public void close() {
if (delegateHandle != INVALID_DELEGATE_HANDLE) {
deleteDelegate(delegateHandle);
delegateHandle = INVALID_DELEGATE_HANDLE;
}
}
static {
System.loadLibrary(TFLITE_HEXAGON_LIB);
}
private static native long createDelegate();
private static native void deleteDelegate(long delegateHandle);
private static native boolean setAdspLibraryPath(String libraryPath);
}

View File

@ -0,0 +1,25 @@
# Description:
# Java Native Interface (JNI) library intended for implementing the
# TensorFlow Lite Hexagon delegate Java API using the TensorFlow Lite CC library.
package(default_visibility = ["//tensorflow/lite/experimental/delegates/hexagon/java:__subpackages__"])
load("//tensorflow/lite:build_def.bzl", "tflite_copts")
licenses(["notice"]) # Apache 2.0
cc_library(
name = "native",
srcs = ["hexagon_delegate_jni.cc"],
copts = tflite_copts(),
tags = [
"manual",
"nobuilder",
"notap",
],
deps = [
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate",
"//tensorflow/lite/java/jni",
],
alwayslink = 1,
)

View File

@ -0,0 +1,55 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <jni.h>
#include <sstream>
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jlong JNICALL
Java_org_tensorflow_lite_experimental_HexagonDelegate_createDelegate(
JNIEnv* env, jclass clazz) {
// Auto-choosing the best performing config for closed release.
TfLiteHexagonDelegateOptions options = {0};
TfLiteHexagonInit();
return reinterpret_cast<jlong>(TfLiteHexagonDelegateCreate(&options));
}
JNIEXPORT void JNICALL
Java_org_tensorflow_lite_experimental_HexagonDelegate_deleteDelegate(
JNIEnv* env, jclass clazz, jlong delegate) {
TfLiteHexagonDelegateDelete(reinterpret_cast<TfLiteDelegate*>(delegate));
TfLiteHexagonTearDown();
}
JNIEXPORT jboolean JNICALL
Java_org_tensorflow_lite_experimental_HexagonDelegate_setAdspLibraryPath(
JNIEnv* env, jclass clazz, jstring native_lib_path) {
const char* lib_dir_path = env->GetStringUTFChars(native_lib_path, nullptr);
std::stringstream path;
path << lib_dir_path
<< ";/system/lib/rfsa/adsp;/system/vendor/lib/rfsa/adsp;/dsp";
return setenv("ADSP_LIBRARY_PATH", path.str().c_str(), 1 /*override*/) == 0
? JNI_TRUE
: JNI_FALSE;
}
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus

View File

@ -0,0 +1,270 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/utils.h"
#include <vector>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace {
bool IsActivationReluOrNone(TfLiteFusedActivation activation) {
return (activation == kTfLiteActRelu || activation == kTfLiteActRelu6 ||
activation == kTfLiteActRelu1 || activation == kTfLiteActNone);
}
bool TensorTypeMatch(int tensor_id, TfLiteContext* context,
TfLiteType tensor_type) {
const auto& tensor = context->tensors[tensor_id];
return tensor.type == tensor_type;
}
bool InputsWithCorrectTypes(const TfLiteNode* node, TfLiteContext* context,
const std::vector<TfLiteType>& input_types) {
if (node->inputs->size != input_types.size()) return false;
for (int i = 0; i < input_types.size(); ++i) {
if (!TensorTypeMatch(node->inputs->data[i], context, input_types[i]))
return false;
}
return true;
}
} // namespace
TfLiteStatus Get4DShape(unsigned int* batch_size, unsigned int* height_size,
unsigned int* width_size, unsigned int* depth_size,
TfLiteIntArray* dims) {
if (dims->size > 4) return kTfLiteError;
unsigned int* dim[] = {batch_size, height_size, width_size, depth_size};
for (int i = 0; i < 4; ++i) *(dim[i]) = 1;
for (int i = 4 - dims->size; i < 4; ++i) {
*dim[i] = dims->data[i - (4 - dims->size)];
}
return kTfLiteOk;
}
bool IsNodeSupportedByHexagon(const TfLiteRegistration* registration,
const TfLiteNode* node, TfLiteContext* context) {
// Ensure all inputs & outputs have dim <= 4.
int tensor_id;
for (int i = 0; i < node->inputs->size; ++i) {
tensor_id = node->inputs->data[i];
const auto& tensor = context->tensors[tensor_id];
if (tensor.dims->size > 4) return false;
}
for (int i = 0; i < node->outputs->size; ++i) {
tensor_id = node->outputs->data[i];
const auto& tensor = context->tensors[tensor_id];
if (tensor.dims->size > 4) return false;
}
// Most hexagon kernels are not compatible with op versions > 1.
// We maintain a 'whitelist' here to ensure we don't accept unintended nodes.
if (registration->version > 1) {
if (registration->builtin_code == kTfLiteBuiltinDepthwiseConv2d &&
registration->version == 2) {
return true;
}
return false;
}
switch (registration->builtin_code) {
case kTfLiteBuiltinAdd: {
if (!InputsWithCorrectTypes(node, context, {kTfLiteUInt8, kTfLiteUInt8}))
return false;
const TfLiteAddParams* add_params =
reinterpret_cast<const TfLiteAddParams*>(node->builtin_data);
return IsActivationReluOrNone(add_params->activation);
}
case kTfLiteBuiltinMul: {
if (!InputsWithCorrectTypes(node, context, {kTfLiteUInt8, kTfLiteUInt8}))
return false;
const TfLiteMulParams* mul_params =
reinterpret_cast<const TfLiteMulParams*>(node->builtin_data);
// TODO(b/129276536): Add support for activation on Mul node.
return mul_params->activation == kTfLiteActNone;
}
case kTfLiteBuiltinSub: {
if (!InputsWithCorrectTypes(node, context, {kTfLiteUInt8, kTfLiteUInt8}))
return false;
const TfLiteSubParams* sub_params =
reinterpret_cast<const TfLiteSubParams*>(node->builtin_data);
return IsActivationReluOrNone(sub_params->activation);
}
case kTfLiteBuiltinSum:
case kTfLiteBuiltinMean: {
// TODO(b/139277813): Enable these when they pass unit tests. These seem
// to recompute the output min/max instead of taking them as inputs, which
// causes an unexpected shift in dequantized values.
return false;
}
case kTfLiteBuiltinPad: {
// TODO(b/139277813): Currently we only support padding with the default
// of 0. Add support for user-defined constant if required.
return (
node->inputs->size == 2 &&
InputsWithCorrectTypes(node, context, {kTfLiteUInt8, kTfLiteInt32}) &&
IsConstantTensor(&context->tensors[node->inputs->data[1]]));
}
case kTfLiteBuiltinFullyConnected: {
if (!InputsWithCorrectTypes(node, context,
{kTfLiteUInt8, kTfLiteUInt8, kTfLiteInt32}))
return false;
const TfLiteFullyConnectedParams* matmul_params =
reinterpret_cast<const TfLiteFullyConnectedParams*>(
node->builtin_data);
return (IsActivationReluOrNone(matmul_params->activation) &&
matmul_params->keep_num_dims == false &&
matmul_params->weights_format ==
kTfLiteFullyConnectedWeightsFormatDefault);
}
case kTfLiteBuiltinConcatenation: {
// All concatenated tensors must be Uint8 type.
for (int i = 0; i < node->inputs->size; ++i) {
if (!TensorTypeMatch(node->inputs->data[i], context, kTfLiteUInt8))
return false;
}
// Hexagon only supports concatenation at axis 3.
const TfLiteConcatenationParams* concat_params =
reinterpret_cast<const TfLiteConcatenationParams*>(
node->builtin_data);
return (concat_params->axis == 3);
}
case kTfLiteBuiltinMaxPool2d: {
if (!InputsWithCorrectTypes(node, context, {kTfLiteUInt8})) return false;
// TODO(b/129276536): Add support for activation here.
const TfLitePoolParams* pool_params =
reinterpret_cast<const TfLitePoolParams*>(node->builtin_data);
return pool_params->activation == kTfLiteActNone;
}
case kTfLiteBuiltinAveragePool2d: {
if (!InputsWithCorrectTypes(node, context, {kTfLiteUInt8})) return false;
// AvgPool works fine for filter dim <=7.
const TfLitePoolParams* pool_params =
reinterpret_cast<const TfLitePoolParams*>(node->builtin_data);
return (node->inputs->size == 1 &&
pool_params->activation == kTfLiteActNone);
}
case kTfLiteBuiltinTransposeConv: {
if (!InputsWithCorrectTypes(node, context,
{kTfLiteInt32, kTfLiteUInt8, kTfLiteUInt8}))
return false;
const TfLiteTransposeConvParams* params =
reinterpret_cast<const TfLiteTransposeConvParams*>(
node->builtin_data);
return (params->stride_height <= 3 && params->stride_width <= 3 &&
(params->padding == kTfLitePaddingSame ||
params->padding == kTfLitePaddingValid));
}
case kTfLiteBuiltinConv2d: {
if (!InputsWithCorrectTypes(node, context,
{kTfLiteUInt8, kTfLiteUInt8, kTfLiteInt32}))
return false;
const TfLiteConvParams* conv_params =
reinterpret_cast<const TfLiteConvParams*>(node->builtin_data);
return (IsActivationReluOrNone(conv_params->activation) &&
conv_params->stride_height <= 3 &&
conv_params->stride_width <= 3 &&
conv_params->dilation_height_factor == 1 &&
conv_params->dilation_width_factor == 1);
}
case kTfLiteBuiltinDepthwiseConv2d: {
if (!InputsWithCorrectTypes(node, context,
{kTfLiteUInt8, kTfLiteUInt8, kTfLiteInt32}))
return false;
// Hexagon only supports width of 3 for Depthwise Conv.
const auto& tensor = context->tensors[node->inputs->data[1]];
if (tensor.dims->data[2] != 3) return false;
const TfLiteDepthwiseConvParams* conv_params =
reinterpret_cast<const TfLiteDepthwiseConvParams*>(
node->builtin_data);
const bool dilation = conv_params->dilation_height_factor != 1 ||
conv_params->dilation_width_factor != 1;
if (dilation) {
// We only support dilations when stride == 1.
if (conv_params->stride_height != 1 || conv_params->stride_width != 1)
return false;
}
return (IsActivationReluOrNone(conv_params->activation) &&
conv_params->stride_height <= 3 &&
conv_params->stride_width <= 3 &&
conv_params->depth_multiplier == 1);
}
case kTfLiteBuiltinReshape: {
if (node->inputs->size > 2 ||
!TensorTypeMatch(node->inputs->data[0], context, kTfLiteUInt8))
return false;
return true;
}
case kTfLiteBuiltinSoftmax:
case kTfLiteBuiltinRelu:
case kTfLiteBuiltinRelu6:
case kTfLiteBuiltinTanh:
case kTfLiteBuiltinLogistic: {
return InputsWithCorrectTypes(node, context, {kTfLiteUInt8});
}
case kTfLiteBuiltinResizeNearestNeighbor: {
return InputsWithCorrectTypes(node, context,
{kTfLiteUInt8, kTfLiteInt32});
}
case kTfLiteBuiltinL2Normalization: {
if (!InputsWithCorrectTypes(node, context, {kTfLiteUInt8})) return false;
const TfLiteL2NormParams* norm_params =
reinterpret_cast<const TfLiteL2NormParams*>(node->builtin_data);
return (norm_params->activation == kTfLiteActNone);
}
case kTfLiteBuiltinArgMax:
case kTfLiteBuiltinArgMin:
return InputsWithCorrectTypes(node, context,
{kTfLiteUInt8, kTfLiteInt32});
case kTfLiteBuiltinSplit: {
if (!InputsWithCorrectTypes(node, context, {kTfLiteInt32, kTfLiteUInt8}))
return false;
const auto& input_tensor = context->tensors[node->inputs->data[1]];
const bool is_four_dim_or_less = input_tensor.dims->size < 5;
// We need splitting axis to be constant, so Hexagon knows output shapes.
return is_four_dim_or_less &&
IsConstantTensor(&context->tensors[node->inputs->data[0]]);
}
case kTfLiteBuiltinResizeBilinear: {
if (!InputsWithCorrectTypes(node, context,
{kTfLiteUInt8, kTfLiteInt32}) ||
!IsConstantTensor(&context->tensors[node->inputs->data[1]])) {
return false;
}
const auto& size_tensor = context->tensors[node->inputs->data[1]];
// TODO(b/143105433): Latency increase significantly with large size
// value. Limiting to 65 for now.
return NumElements(&size_tensor) == 2 && size_tensor.data.i32[0] < 66 &&
size_tensor.data.i32[1] < 66;
}
case kTfLiteBuiltinNeg: {
return InputsWithCorrectTypes(node, context, {kTfLiteUInt8});
}
case kTfLiteBuiltinTranspose: {
return InputsWithCorrectTypes(node, context,
{kTfLiteUInt8, kTfLiteInt32});
}
default:
return false;
}
return false;
}
} // namespace tflite

View File

@ -0,0 +1,38 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_UTILS_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_UTILS_H_
#include "tensorflow/lite/c/common.h"
namespace tflite {
// Interpretes data from 'dims' as a 4D shape {batch, height, width, depth} and
// populates the corresponding values. If dims->size < 4, the shape is prefixed
// with 1s.
// For example, dims {2, 3} is interpreted as: {1, 1, 2, 3}.
// Returns kTfLiteError if dims->size > 4, kTfLiteOk otherwise.
TfLiteStatus Get4DShape(unsigned int* batch_size, unsigned int* height_size,
unsigned int* width_size, unsigned int* depth_size,
TfLiteIntArray* dims);
// Returns true if provided node is supported by Hexagon NNLib in the current
// context.
bool IsNodeSupportedByHexagon(const TfLiteRegistration* registration,
const TfLiteNode* node, TfLiteContext* context);
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_HEXAGON_UTILS_H_

View File

@ -0,0 +1,71 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/delegates/hexagon/utils.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace {
TEST(UtilsTest, Get4DShapeTest_4DInput) {
unsigned int batch_dim, height_dim, width_dim, depth_dim;
TfLiteIntArray* shape_4d = TfLiteIntArrayCreate(4);
shape_4d->data[0] = 4;
shape_4d->data[1] = 3;
shape_4d->data[2] = 2;
shape_4d->data[3] = 1;
EXPECT_EQ(
Get4DShape(&batch_dim, &height_dim, &width_dim, &depth_dim, shape_4d),
kTfLiteOk);
EXPECT_EQ(batch_dim, shape_4d->data[0]);
EXPECT_EQ(height_dim, shape_4d->data[1]);
EXPECT_EQ(width_dim, shape_4d->data[2]);
EXPECT_EQ(depth_dim, shape_4d->data[3]);
TfLiteIntArrayFree(shape_4d);
}
TEST(UtilsTest, Get4DShapeTest_2DInput) {
unsigned int batch_dim, height_dim, width_dim, depth_dim;
TfLiteIntArray* shape_2d = TfLiteIntArrayCreate(2);
shape_2d->data[0] = 4;
shape_2d->data[1] = 3;
EXPECT_EQ(
Get4DShape(&batch_dim, &height_dim, &width_dim, &depth_dim, shape_2d),
kTfLiteOk);
EXPECT_EQ(batch_dim, 1);
EXPECT_EQ(height_dim, 1);
EXPECT_EQ(width_dim, shape_2d->data[0]);
EXPECT_EQ(depth_dim, shape_2d->data[1]);
TfLiteIntArrayFree(shape_2d);
}
TEST(UtilsTest, Get4DShapeTest_5DInput) {
unsigned int batch_dim, height_dim, width_dim, depth_dim;
TfLiteIntArray* shape_5d = TfLiteIntArrayCreate(5);
EXPECT_EQ(
Get4DShape(&batch_dim, &height_dim, &width_dim, &depth_dim, shape_5d),
kTfLiteError);
TfLiteIntArrayFree(shape_5d);
}
} // namespace
} // namespace tflite

View File

@ -0,0 +1,13 @@
VERS_1.0 {
# Export JNI symbols.
global:
Java_*;
JNI_OnLoad;
JNI_OnUnload;
# TODO(b/138605512): Remove this and build separate .so for c++ api ?
TfLiteHexagon*;
# Hide everything else.
local:
*;
};

View File

@ -0,0 +1,265 @@
## Tensorflow Lite Hexagon Delegate Quick Guide
[TOC]
This document explains how to use the Tensorflow Lite Hexagon Delegate in your
application using the Java and/or C API. The delegate leverages the Qualcomm
Hexagon library to execute quantized kernels on the DSP. Note that the delegate
is intended to *complement* NNAPI functionality, particularly for devices where
NNAPI DSP acceleration is unavailable (e.g., on older devices, or devices that
dont yet have a DSP NNAPI driver). Note: This delegate is in experimental
(beta) phase.
**Supported devices:**
Currently most
[Qualcomm SoCs](https://en.wikipedia.org/wiki/List_of_Qualcomm_Snapdragon_systems-on-chip)
are supported, including:
* Snapdragon 835 (682 DSP)
* Snapdragon 660/820/821 (680 DSP)
* Snapdragon 710/845 (685 DSP)
* Snapdragon 8150/855 (690 DSP)
**Supported models:**
The Hexagon delegate currently supports quantized models generated using
[quantization-aware training](https://github.com/tensorflow/tensorflow/tree/r1.13/tensorflow/contrib/quantize),
e.g.,
[these quantized models](https://www.tensorflow.org/lite/guide/hosted_models#quantized_models)
hosted on the TensorFlow Lite repo. It does not (yet) support models with
[8-bit symmetric quantization spec](https://www.tensorflow.org/lite/performance/quantization_spec).
Sample models include
[MobileNet V1](https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz),
[SSD Mobilenet](https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip).
## Hexagon Delegate Java API {#hexagon-delegate-java-api}
```
public class HexagonDelegate implements Delegate, Closeable {
/*
* Creates a new HexagonDelegate object given the current 'context'.
* Throws UnsupportedOperationException if Hexagon DSP delegation is not
* available on this device.
*/
public HexagonDelegate(Context context) throws UnsupportedOperationException
/**
* Frees TFLite resources in C runtime.
*
* User is expected to call this method explicitly.
*/
@Override
public void close();
}
```
## Example Usage from Java {#example-usage-from-java}
1. Add the tensorflow-lite-hexagon.aar to your app - this is in addition to
the standard tensorflow-lite AAR (nightly or release).
[Relevant instructions](https://stackoverflow.com/questions/16682847/how-to-manually-include-external-aar-package-using-new-gradle-android-build-syst).
1. Run “hexagon_nn_skel.run” - Note: you will need to accept the license
agreement. It should provide 3 different shared libraries
“libhexagon_nn_skel.so”, “libhexagon_nn_skel_v65.so”,
“libhexagon_nn_skel_v66.so” \
Include all 3 in your app with other shared libraries. See
[How to add shared library to your app](#how-to-add-shared-library-to-your-app)
\
The delegate will automatically pick the one with best performance depending
on the device. \
Note: If your app will be built for both 32 and 64-bit ARM devices, then you
will need to add the hexagon shared libs to both 32 and 64-bit lib folders.
1. Create a delegate, example:
```
import org.tensorflow.lite.experimental.HexagonDelegate;
// Create the Delegate instance.
try {
hexagonDelegate = new HexagonDelegate(activity);
tfliteOptions.addDelegate(hexagonDelegate);
} catch (UnsupportedOperationException e) {
// Hexagon delegate is not supported on this device.
}
tfliteInterpreter = new Interpreter(tfliteModel, tfliteOptions);
// Dispose after finished with inference.
tfliteInterpreter.close();
if (hexagonDelegate != null) {
hexagonDelegate.close();
}
```
## Hexagon Delegate C API {#hexagon-delegate-c-api}
```
struct TfLiteHexagonDelegateOptions {
// This corresponds to the debug level in the hexagon SDK. 0 (default)
// means no debug.
int debug_level;
// This corresponds to powersave_level in the hexagon SDK.
// where 0 (default) means high performance which means more power
// consumption.
int powersave_level;
// If set to true, performance information about the graph will be dumped
// to Standard output, this includes cpu cycles.
// WARNING: Experimental and subject to change anytime.
bool print_graph_profile;
// If set to true, graph structure will be dumped to Standard output.
// This is usually beneficial to see what actual nodes executed on
// the DSP. Combining with 'debug_level' more information will be printed.
// WARNING: Experimental and subject to change anytime.
bool print_graph_debug;
};
// Return a delegate that uses Hexagon SDK for ops execution.
// Must outlive the interpreter.
TfLiteDelegate*
TfLiteHexagonDelegateCreate(const TfLiteHexagonDelegateOptions* options);
// Do any needed cleanup and delete 'delegate'.
void TfLiteHexagonDelegateDelete(TfLiteDelegate* delegate);
// Initializes the DSP connection.
// This should be called before doing any usage of the delegate.
// "lib_directory_path": Path to the directory which holds the
// shared libraries for the Hexagon NN libraries on the device.
void TfLiteHexagonInitWithPath(const char* lib_directory_path);
// Same as above method but doesn't accept the path params.
// Assumes the environment setup is already done. Only initialize Hexagon.
Void TfLiteHexagonInit();
// Clean up and switch off the DSP connection.
// This should be called after all processing is done and delegate is deleted.
Void TfLiteHexagonTearDown();
```
## Example Usage from C {#example-usage-from-c}
1. Add the tensorflow-lite-hexagon.aar to your app - this is in addition to
the standard tensorflow-lite AAR (nightly or release).
[Relevant instructions](https://stackoverflow.com/questions/16682847/how-to-manually-include-external-aar-package-using-new-gradle-android-build-syst).
1. Include the provided hexagon_delegate.h
1. Run “hexagon_nn_skel.run” - Note: you will need to accept the license
agreement. It should provide 3 different shared libraries \
“libhexagon_nn_skel.so”, “libhexagon_nn_skel_v65.so”,
“libhexagon_nn_skel_v66.so” \
Include all 3 in your app with other shared libraries. See How to add shared
library to your app. \
The delegate will automatically pick the one with best performance depending
on the device. \
Note: If your app will be built for both 32 and 64-bit ARM devices, then you
will need to add the hexagon shared libs to both 32 and 64-bit lib folders.
1. In your code, ensure the native Hexagon library is loaded. This can be done
by calling `System.loadLibrary("tensorflowlite_hexagon_jni");` \
in your Activity or Java entry-point.
1. Create a delegate, example:
```
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
// Assuming shared libraries are under "/data/local/tmp/"
// If files are packaged with native lib in android App then it
// will typically be equivalent to the path provided by
// "getContext().getApplicationInfo().nativeLibraryDir"
const char[] library_directory_path = "/data/local/tmp/";
TfLiteHexagonInitWithPath(library_directory_path); // Needed once at startup.
::tflite::TfLiteHexagonDelegateOptions params = {0};
// 'delegate_ptr' Need to outlive the interpreter. For example,
// If use case will need to resize input or anything that can trigger
// re-applying delegates then 'delegate_ptr' need to outlive the interpreter.
auto* delegate_ptr = ::tflite::TfLiteHexagonDelegateCreate(&params);
Interpreter::TfLiteDelegatePtr delegate(delegate_ptr,
[](TfLiteDelegate* delegate) {
::tflite::TfLiteHexagonDelegateDelete(delegate);
});
interpreter->ModifyGraphWithDelegate(delegate.get());
// After usage of delegate.
TfLiteHexagonTearDown(); // Needed once at end of app/DSP usage.
```
## How to add shared library to your app {#how-to-add-shared-library-to-your-app}
Create folder “app/src/main/jniLibs”, then for each target architecture create a
directory.
For example,
Arm64 bit: “app/src/main/jniLibs/arm64-v8a”
Arm32 bit: “app/src/main/jniLibs/armeabi-v7a”
Put your .so in the directory that match the architecture.
## Feedback {#feedback}
For issues, please create a
[github](https://github.com/tensorflow/tensorflow/issues/new?template=50-other-issues.md)
issue with all the necessary repro details, including the phone model and board
used (`adb shell getprop ro.product.device` and `adb shell getprop
ro.board.platform`).
## FAQ {#faq}
* Will the delegate support models created using
[post-training quantization](https://www.tensorflow.org/lite/performance/post_training_quantization)?
* This is tentatively planned for a future release, though there is no
concrete timeline.
* Which ops are supported by the delegate?
* Initial Dogfood list of supported ops:
* Add
* ArgMax
* ArgMin
* AveragePool2D:
* Constraints:
* No Activation
* Concat
* Conv2D:
* Constraints:
* stride width/height <= 3
* DepthwiseConv2D:
* Constraints:
* Filter width == 3
* depth_multiplier == 1
* dilation only supported when stride == 1
* Otherwise, stride height/width <= 3
* FullyConnected (without any activation)
* L2Normalization (without any activation)
* Logistic (aka Sigmoid)
* MaxPool2D (without any activation)
* Mul (without any activation)
* Neg
* Pad: Only supports 0 padding
* Relu
* Relu6
* Reshape
* Resize Bilinear:
* Constraints:
* Requested size <= 65
* Resize Nearest Neighbor
* SoftMax
* Split
* Sub
* Tanh
* Transpose
* TransposeConv2D:
* Constraints:
* stride height/width <= 3
* dilation height/width == 1
* How can I tell that the model is using the DSP when I enable the delegate?
* A log message will be printed whether delegate created or not, and
another one with how many nodes are running using the delegate. \
"Created TensorFlow Lite delegate for Hexagon." \
"Hexagon delegate: X nodes delegated out of Y nodes."
* Do I need all Ops in the model to be supported to run the delegate ?
* No, the Model will be partitioned into subgraphs based on the supported
ops. Any unsupported ops will run on the CPU.

View File

@ -27,6 +27,7 @@ load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
@ -46,6 +47,7 @@ def initialize_third_party():
clog()
cpuinfo()
flatbuffers()
hexagon_nn()
highwayhash()
hwloc()
icu()

48
third_party/hexagon/BUILD vendored Normal file
View File

@ -0,0 +1,48 @@
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
package(default_visibility = [
"//visibility:public",
])
licenses([
"notice", # BSD-3-Clause-Clear
])
exports_files(glob(["hexagon/**/*.so"]))
#Just header file, needed for data types in the interface.
cc_library(
name = "hexagon_nn_header",
hdrs = [
"hexagon/hexagon_nn.h",
],
tags = [
"manual",
"nobuilder",
],
)
cc_library(
name = "hexagon_nn_ops",
hdrs = [
"hexagon/hexagon_nn_ops.h",
"hexagon/ops.def",
],
tags = [
"manual",
"nobuilder",
],
)

35
third_party/hexagon/LICENSE vendored Normal file
View File

@ -0,0 +1,35 @@
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted (subject to the limitations in the
* disclaimer below) provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
* GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
* HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/

13
third_party/hexagon/workspace.bzl vendored Normal file
View File

@ -0,0 +1,13 @@
"""Loads the Hexagon NN Header files library, used by TF Lite."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "hexagon_nn",
sha256 = "e972f86eb8bcfb1ee93ff3dc7aa4518948e3941b5ea0945f5c9307b2d3334225",
urls = [
"http://mirror.tensorflow.org/storage.cloud.google.com/download.tensorflow.org/tflite/hexagon_nn_headers_v1.10.3.1.0.tgz",
],
build_file = "//third_party/hexagon:BUILD",
)