Move function definitions into .cc file
- Clarified some documentations and method names - changed char* buffer usage with std::string when possible PiperOrigin-RevId: 344083133 Change-Id: I29f3acc4967d71e540d33de14f96d770ad1a8c95
This commit is contained in:
parent
603d88d72c
commit
6a30edd2de
@ -24,10 +24,9 @@ namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
|
||||
const char* ActivationLayerBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("ActivationLayerBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& ActivationLayerBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("ActivationLayerBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* ActivationLayerBuilder::Build() {
|
||||
|
@ -31,7 +31,7 @@ class ActivationLayerBuilder : public OpBuilder {
|
||||
TfLiteFusedActivation activation)
|
||||
: OpBuilder(graph_builder), activation_(activation) {}
|
||||
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -24,10 +24,9 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
const char* AddOpBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("AddOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& AddOpBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("AddOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* AddOpBuilder::Build() {
|
||||
|
@ -25,7 +25,7 @@ class AddOpBuilder : public OpBuilder {
|
||||
public:
|
||||
explicit AddOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -26,10 +26,9 @@ class ConcatenationOpBuilder : public OpBuilder {
|
||||
explicit ConcatenationOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
|
||||
const char* DebugName() override {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("ConcatOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& DebugName() override {
|
||||
if (debug_name_.empty()) SetDebugName("ConcatOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
@ -25,10 +25,9 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
const char* ConvolutionOpBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("ConvolutionOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& ConvolutionOpBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("ConvolutionOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
void ConvolutionOpBuilder::SetWeights(TfLiteTensor* weights) {
|
||||
|
@ -33,7 +33,7 @@ class ConvolutionOpBuilder : public OpBuilder {
|
||||
ConvolutionType conv_type)
|
||||
: OpBuilder(graph_builder), conv_type_(conv_type) {}
|
||||
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -24,7 +24,10 @@ CoreML::Specification::NeuralNetworkLayer* DummyOpBuilder::Build() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const char* DummyOpBuilder::DebugName() { return "Dummy OpBuilder"; }
|
||||
const std::string& DummyOpBuilder::DebugName() {
|
||||
SetDebugName("DummyOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
TfLiteStatus DummyOpBuilder::PopulateSubgraph(TfLiteContext* context) {
|
||||
return kTfLiteOk;
|
||||
@ -34,6 +37,16 @@ OpBuilder* CreateDummyOpBuilder(GraphBuilder* graph_builder) {
|
||||
return new DummyOpBuilder(graph_builder);
|
||||
}
|
||||
|
||||
TfLiteStatus DummyOpBuilder::RegisterInputs(const TfLiteIntArray* inputs,
|
||||
TfLiteContext* context) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
TfLiteStatus DummyOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
|
||||
TfLiteContext* context) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
} // namespace coreml
|
||||
} // namespace delegates
|
||||
} // namespace tflite
|
||||
|
@ -31,7 +31,13 @@ class DummyOpBuilder : public OpBuilder {
|
||||
: OpBuilder(graph_builder) {}
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
TfLiteStatus PopulateSubgraph(TfLiteContext* context) override;
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
TfLiteStatus RegisterInputs(const TfLiteIntArray* inputs,
|
||||
TfLiteContext* context) override;
|
||||
|
||||
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
|
||||
TfLiteContext* context) override;
|
||||
};
|
||||
|
||||
} // namespace coreml
|
||||
|
@ -24,10 +24,9 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
const char* FullyConnectedOpBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("FullyConnectedOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& FullyConnectedOpBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("FullyConnectedOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
void FullyConnectedOpBuilder::SetWeights(TfLiteTensor* weights) {
|
||||
|
@ -25,7 +25,7 @@ class FullyConnectedOpBuilder : public OpBuilder {
|
||||
public:
|
||||
explicit FullyConnectedOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -23,10 +23,9 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
const char* HardSwishOpBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("HardSwishOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& HardSwishOpBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("HardSwishOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* HardSwishOpBuilder::Build() {
|
||||
|
@ -25,7 +25,7 @@ class HardSwishOpBuilder : public OpBuilder {
|
||||
public:
|
||||
explicit HardSwishOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -27,10 +27,9 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
const char* MulOpBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("MulOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& MulOpBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("MulOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* MulOpBuilder::Build() {
|
||||
|
@ -25,7 +25,7 @@ class MulOpBuilder : public OpBuilder {
|
||||
public:
|
||||
explicit MulOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -14,6 +14,8 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/delegates/coreml/builders/op_builder.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/builtin_ops.h"
|
||||
#include "tensorflow/lite/c/builtin_op_data.h"
|
||||
#include "tensorflow/lite/experimental/delegates/coreml/builders/op_factory.h"
|
||||
@ -22,6 +24,15 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
|
||||
std::string TensorID::ToString() const {
|
||||
return std::to_string(node_) + "_" + std::to_string(output_id_);
|
||||
}
|
||||
|
||||
int TensorID::NodeID() const { return node_; }
|
||||
|
||||
int TensorID::OutputID() const { return output_id_; }
|
||||
|
||||
OpBuilder* GraphBuilder::AddBuilder(int builtin_code, const TfLiteNode* node) {
|
||||
switch (builtin_code) {
|
||||
case kTfLiteBuiltinAdd:
|
||||
@ -113,7 +124,7 @@ CoreML::Specification::Model* GraphBuilder::BuildModel() {
|
||||
CoreML::Specification::NeuralNetworkLayer* layer = builder->Build();
|
||||
if (layer == nullptr) {
|
||||
fprintf(stderr, "Null layer returned from builder: %s\n",
|
||||
builder->DebugName());
|
||||
builder->DebugName().c_str());
|
||||
continue;
|
||||
}
|
||||
neural_network->mutable_layers()->AddAllocated(layer);
|
||||
@ -159,6 +170,35 @@ bool GraphBuilder::IsTensorUsed(int tflite_tensor_index) {
|
||||
return used_tensor_[tflite_tensor_index];
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* OpBuilder::Build() {
|
||||
layer_->set_name(DebugName());
|
||||
return layer_.release();
|
||||
}
|
||||
|
||||
TfLiteStatus OpBuilder::PopulateSubgraph(TfLiteContext* context) {
|
||||
builder_output_ = AddOutput();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
void OpBuilder::SetBuiltinData(void* builtin_data) {
|
||||
builtin_data_ = builtin_data;
|
||||
}
|
||||
|
||||
void OpBuilder::SetNodeID(int id) { node_id_ = id; }
|
||||
|
||||
void OpBuilder::SetTfLiteNode(const TfLiteNode* node) { tflite_node_ = node; }
|
||||
|
||||
int OpBuilder::GetID() const { return node_id_; }
|
||||
|
||||
TensorID OpBuilder::GetOutput(TfLiteContext* context) {
|
||||
if (builder_output_.NodeID() != -1) {
|
||||
return builder_output_;
|
||||
}
|
||||
// builder_output_ is not set when PopulateSubgraph is not called.
|
||||
builder_output_ = AddOutput();
|
||||
return builder_output_;
|
||||
}
|
||||
|
||||
void OpBuilder::AddInput(const std::string& input_name) {
|
||||
if (layer_ == nullptr) {
|
||||
layer_.reset(new CoreML::Specification::NeuralNetworkLayer);
|
||||
@ -180,6 +220,10 @@ TensorID OpBuilder::AddOutput() {
|
||||
return tensor_id;
|
||||
}
|
||||
|
||||
void OpBuilder::SetDebugName(const char* name, int id) {
|
||||
debug_name_ = std::string(name) + "_" + std::to_string(id);
|
||||
}
|
||||
|
||||
} // namespace coreml
|
||||
} // namespace delegates
|
||||
} // namespace tflite
|
||||
|
@ -15,8 +15,8 @@ limitations under the License.
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_COREML_BUILDERS_OP_BUILDER_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_DELEGATES_COREML_BUILDERS_OP_BUILDER_H_
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_format.h"
|
||||
#include <string>
|
||||
|
||||
#include "mlmodel/format/Model.pb.h"
|
||||
#include "mlmodel/format/NeuralNetwork.pb.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
@ -34,11 +34,11 @@ class TensorID {
|
||||
TensorID() {}
|
||||
TensorID(int node, int output_id) : node_(node), output_id_(output_id) {}
|
||||
|
||||
std::string ToString() const { return absl::StrCat(node_, "__", output_id_); }
|
||||
std::string ToString() const;
|
||||
|
||||
int NodeID() const { return node_; }
|
||||
int NodeID() const;
|
||||
|
||||
int OutputID() const { return output_id_; }
|
||||
int OutputID() const;
|
||||
|
||||
private:
|
||||
int node_ = -1;
|
||||
@ -101,20 +101,18 @@ class OpBuilder {
|
||||
|
||||
// Returns the Layer this builder responsible for.
|
||||
// Ownership is transferred to caller.
|
||||
virtual CoreML::Specification::NeuralNetworkLayer* Build() {
|
||||
layer_->set_name(DebugName());
|
||||
return layer_.release();
|
||||
}
|
||||
virtual CoreML::Specification::NeuralNetworkLayer* Build();
|
||||
|
||||
// Associates TfLite input tensors to Core ML layer's inputs and properties.
|
||||
// Verification for input constraints should happen here.
|
||||
virtual TfLiteStatus RegisterInputs(const TfLiteIntArray* inputs,
|
||||
TfLiteContext* context) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
TfLiteContext* context) = 0;
|
||||
|
||||
// Associates TFLite output tensor with the node's output. If the OpBuilder
|
||||
// has subgraphs, The final output of that subgraph should be associated with
|
||||
// the output tensor.
|
||||
virtual TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
|
||||
TfLiteContext* context) {
|
||||
return kTfLiteOk;
|
||||
}
|
||||
TfLiteContext* context) = 0;
|
||||
|
||||
// Adds additional required OpBuilders, and populate builder_output_ with
|
||||
// Actual output that corresponds to output tensor of TFL Node.
|
||||
@ -122,32 +120,17 @@ class OpBuilder {
|
||||
// composing other ops. For example, Relu6 in TfLite can be converted to
|
||||
// Relu -> Threshold -> Neg.
|
||||
// TODO(b/147211734): have this called automatically when necessary.
|
||||
virtual TfLiteStatus PopulateSubgraph(TfLiteContext* context) {
|
||||
builder_output_ = AddOutput();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
virtual TfLiteStatus PopulateSubgraph(TfLiteContext* context);
|
||||
|
||||
virtual const char* DebugName() = 0;
|
||||
virtual const std::string& DebugName() = 0;
|
||||
|
||||
void SetBuiltinData(void* builtin_data) { builtin_data_ = builtin_data; }
|
||||
void SetBuiltinData(void* builtin_data);
|
||||
|
||||
void SetNodeID(int id) { node_id_ = id; }
|
||||
void SetNodeID(int id);
|
||||
|
||||
void SetTfLiteNode(const TfLiteNode* node) { tflite_node_ = node; }
|
||||
void SetTfLiteNode(const TfLiteNode* node);
|
||||
|
||||
int GetID() const { return node_id_; }
|
||||
|
||||
TensorID AddOutput();
|
||||
|
||||
// To be used by clients that needs the output of the node.
|
||||
virtual TensorID GetOutput(TfLiteContext* context) {
|
||||
if (builder_output_.NodeID() != -1) {
|
||||
return builder_output_;
|
||||
}
|
||||
// builder_output_ is not set when PopulateSubgraph is not called.
|
||||
builder_output_ = AddOutput();
|
||||
return builder_output_;
|
||||
}
|
||||
int GetID() const;
|
||||
|
||||
// Adds input with tensor name.
|
||||
void AddInput(const std::string& input_name);
|
||||
@ -159,13 +142,16 @@ class OpBuilder {
|
||||
// TODO(taeheej): cleanup AddInput use cases and used tensor tracking.
|
||||
void AddInput(int tf_input_id);
|
||||
|
||||
// Simply adds new output to the underlying layer.
|
||||
TensorID AddOutput();
|
||||
|
||||
// Should set builder_output_ (if unset) and return it as the output of
|
||||
// this node. To be used by clients that needs the output of the node.
|
||||
virtual TensorID GetOutput(TfLiteContext* context);
|
||||
|
||||
protected:
|
||||
// Helper to print op instance name.
|
||||
void GetDebugName(const char* name, int id, char* debug_name) {
|
||||
// TODO(karimnosseir): Move away from absl, probably adding overhead
|
||||
// on binary size ?.
|
||||
absl::SNPrintF(debug_name, 100 * sizeof(char), "%s_%d", name, id);
|
||||
}
|
||||
// Sets layer's name.
|
||||
void SetDebugName(const char* layer_name, int id);
|
||||
|
||||
GraphBuilder* graph_builder_ = nullptr;
|
||||
// Data needed by this node.
|
||||
@ -174,7 +160,7 @@ class OpBuilder {
|
||||
int num_outputs_ = 0;
|
||||
const TfLiteNode* tflite_node_ = nullptr;
|
||||
TensorID builder_output_;
|
||||
char str_debug_name_[100] = {0};
|
||||
std::string debug_name_;
|
||||
std::unique_ptr<CoreML::Specification::NeuralNetworkLayer> layer_;
|
||||
};
|
||||
|
||||
|
@ -25,12 +25,12 @@ namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
|
||||
const char* PadOpBuilder::DebugName() {
|
||||
if (str_debug_name_[0]) return str_debug_name_;
|
||||
GetDebugName(padding_type_ == PadType::kPad ? "PadOpBuilder (PAD)"
|
||||
const std::string& PadOpBuilder::DebugName() {
|
||||
if (!debug_name_.empty()) return debug_name_;
|
||||
SetDebugName(padding_type_ == PadType::kPad ? "PadOpBuilder (PAD)"
|
||||
: "PadOpBuilder (MIRROR_PAD)",
|
||||
node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* PadOpBuilder::Build() {
|
||||
|
@ -30,7 +30,7 @@ class PadOpBuilder : public OpBuilder {
|
||||
explicit PadOpBuilder(GraphBuilder* graph_builder, PadType padding_type)
|
||||
: OpBuilder(graph_builder), padding_type_(padding_type) {}
|
||||
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -25,26 +25,25 @@ namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
|
||||
const char* PoolingLayerBuilder::DebugName() {
|
||||
if (str_debug_name_[0]) return str_debug_name_;
|
||||
const std::string& PoolingLayerBuilder::DebugName() {
|
||||
if (!debug_name_.empty()) return debug_name_;
|
||||
switch (pooling_type_) {
|
||||
case kTfLiteBuiltinAveragePool2d:
|
||||
GetDebugName("PoolingLayerBuilder (AVERAGE)", node_id_, str_debug_name_);
|
||||
SetDebugName("PoolingLayerBuilder (AVERAGE)", node_id_);
|
||||
break;
|
||||
case kTfLiteBuiltinMaxPool2d:
|
||||
GetDebugName("PoolingLayerBuilder (MAX)", node_id_, str_debug_name_);
|
||||
SetDebugName("PoolingLayerBuilder (MAX)", node_id_);
|
||||
break;
|
||||
case kTfLiteBuiltinL2Pool2d:
|
||||
GetDebugName("PoolingLayerBuilder (L2, unsupported)", node_id_,
|
||||
str_debug_name_);
|
||||
SetDebugName("PoolingLayerBuilder (L2, unsupported)", node_id_);
|
||||
break;
|
||||
case kTfLiteBuiltinMean:
|
||||
GetDebugName("PoolingLayerBuilder (MEAN)", node_id_, str_debug_name_);
|
||||
SetDebugName("PoolingLayerBuilder (MEAN)", node_id_);
|
||||
break;
|
||||
default:
|
||||
GetDebugName("PoolingLayerBuilder (ERROR)", node_id_, str_debug_name_);
|
||||
SetDebugName("PoolingLayerBuilder (ERROR)", node_id_);
|
||||
}
|
||||
return str_debug_name_;
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* PoolingLayerBuilder::Build() {
|
||||
|
@ -28,7 +28,7 @@ class PoolingLayerBuilder : public OpBuilder {
|
||||
TfLiteBuiltinOperator pooling_type)
|
||||
: OpBuilder(graph_builder), pooling_type_(pooling_type) {}
|
||||
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -26,11 +26,11 @@ namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
|
||||
const char* ReshapeOpBuilder::DebugName() {
|
||||
if (!str_debug_name_[0]) {
|
||||
GetDebugName("ReshapeOpBuilder", node_id_, str_debug_name_);
|
||||
const std::string& ReshapeOpBuilder::DebugName() {
|
||||
if (debug_name_.empty()) {
|
||||
SetDebugName("ReshapeOpBuilder", node_id_);
|
||||
}
|
||||
return str_debug_name_;
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* ReshapeOpBuilder::Build() {
|
||||
|
@ -26,7 +26,7 @@ class ReshapeOpBuilder : public OpBuilder {
|
||||
public:
|
||||
explicit ReshapeOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -28,10 +28,10 @@ namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
|
||||
const char* ResizeBilinearOpBuilder::DebugName() {
|
||||
if (str_debug_name_[0]) return str_debug_name_;
|
||||
GetDebugName("ResizeBilinearOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& ResizeBilinearOpBuilder::DebugName() {
|
||||
if (!debug_name_.empty()) return debug_name_;
|
||||
SetDebugName("ResizeBilinearOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* ResizeBilinearOpBuilder::Build() {
|
||||
|
@ -27,7 +27,7 @@ class ResizeBilinearOpBuilder : public OpBuilder {
|
||||
explicit ResizeBilinearOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -19,10 +19,9 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
const char* SoftmaxOpBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("SoftmaxOpBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& SoftmaxOpBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("SoftmaxOpBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* SoftmaxOpBuilder::Build() {
|
||||
|
@ -25,7 +25,7 @@ class SoftmaxOpBuilder : public OpBuilder {
|
||||
public:
|
||||
explicit SoftmaxOpBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
@ -20,10 +20,9 @@ namespace tflite {
|
||||
namespace delegates {
|
||||
namespace coreml {
|
||||
|
||||
const char* ThresholdLayerBuilder::DebugName() {
|
||||
if (!str_debug_name_[0])
|
||||
GetDebugName("ThresholdLayerBuilder", node_id_, str_debug_name_);
|
||||
return str_debug_name_;
|
||||
const std::string& ThresholdLayerBuilder::DebugName() {
|
||||
if (debug_name_.empty()) SetDebugName("ThresholdLayerBuilder", node_id_);
|
||||
return debug_name_;
|
||||
}
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* ThresholdLayerBuilder::Build() {
|
||||
|
@ -30,7 +30,7 @@ class ThresholdLayerBuilder : public OpBuilder {
|
||||
explicit ThresholdLayerBuilder(GraphBuilder* graph_builder)
|
||||
: OpBuilder(graph_builder) {}
|
||||
|
||||
const char* DebugName() override;
|
||||
const std::string& DebugName() override;
|
||||
|
||||
CoreML::Specification::NeuralNetworkLayer* Build() override;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user