Update tf_tfl_translate (and associated TensorFlow Lite FlatBuffer importer and exporter) to not use ".input" nodes.
PiperOrigin-RevId: 278749208 Change-Id: Ic1d1091d6365fa9f20fefd029ebd0dd6b5a40c1d
This commit is contained in:
parent
21e4e8b991
commit
399e1c2718
@ -862,7 +862,7 @@ static OwningModuleRef FlatBufferFileToMlirTrans(
|
||||
|
||||
return tflite::FlatBufferToMlir(
|
||||
absl::string_view(input->getBufferStart(), input->getBufferSize()),
|
||||
context, loc, output_arrays_order);
|
||||
context, loc, output_arrays_order, /*add_pseudo_input_nodes=*/false);
|
||||
}
|
||||
|
||||
static mlir::TranslateToMLIRRegistration FlatBufferFileToMlirTransReg(
|
||||
|
||||
@ -52,6 +52,7 @@ limitations under the License.
|
||||
#include "mlir/IR/StandardTypes.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Types.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Value.h" // TF:local_config_mlir
|
||||
#include "mlir/Support/LogicalResult.h" // TF:local_config_mlir
|
||||
#include "mlir/Translation.h" // TF:local_config_mlir
|
||||
#include "tensorflow/compiler/mlir/lite/flatbuffer_operator.h"
|
||||
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
|
||||
@ -233,7 +234,8 @@ static bool IsConst(Operation* op) {
|
||||
static bool IsConstOrInput(Operation* op) { return IsConst(op) || IsInput(op); }
|
||||
|
||||
template <typename T>
|
||||
static bool HasValidTFLiteType(Value* value, T& error_handler) {
|
||||
static bool HasValidTFLiteType(Value* value, T& error_handler,
|
||||
bool add_pseudo_input_nodes) {
|
||||
// None type is allowed to represent unspecified operands.
|
||||
if (value->getType().isa<NoneType>()) return true;
|
||||
|
||||
@ -248,11 +250,13 @@ static bool HasValidTFLiteType(Value* value, T& error_handler) {
|
||||
error_handler.emitError("expected tensor type, got ") << value->getType();
|
||||
return false;
|
||||
}
|
||||
if (auto* inst = value->getDefiningOp()) {
|
||||
if (IsInput(inst) && !type.hasStaticShape()) {
|
||||
return error_handler.emitError("should have static shape, got ")
|
||||
<< type.getShape(),
|
||||
false;
|
||||
if (add_pseudo_input_nodes) {
|
||||
if (auto* inst = value->getDefiningOp()) {
|
||||
if (IsInput(inst) && !type.hasStaticShape()) {
|
||||
return error_handler.emitError("should have static shape, got ")
|
||||
<< type.getShape(),
|
||||
false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -272,7 +276,8 @@ static bool HasValidTFLiteType(Value* value, T& error_handler) {
|
||||
// TODO(hinsu): Now that translation is done by making a single pass over the
|
||||
// MLIR module, consider inlining these validation checks at the place where
|
||||
// these invariants are assumed instead of checking upfront.
|
||||
static bool IsValidTFLiteMlirModule(ModuleOp module) {
|
||||
static bool IsValidTFLiteMlirModule(ModuleOp module,
|
||||
bool add_pseudo_input_nodes) {
|
||||
MLIRContext* context = module.getContext();
|
||||
|
||||
// Verify that module has a function named main.
|
||||
@ -290,7 +295,7 @@ static bool IsValidTFLiteMlirModule(ModuleOp module) {
|
||||
auto& bb = fn.getBlocks().front();
|
||||
|
||||
for (auto* arg : bb.getArguments()) {
|
||||
if (!HasValidTFLiteType(arg, fn))
|
||||
if (!HasValidTFLiteType(arg, fn, add_pseudo_input_nodes))
|
||||
return fn.emitError("invalid TFLite type: ") << arg->getType(), false;
|
||||
}
|
||||
|
||||
@ -300,13 +305,15 @@ static bool IsValidTFLiteMlirModule(ModuleOp module) {
|
||||
if (inst.isKnownTerminator()) break;
|
||||
|
||||
for (auto* result : inst.getResults()) {
|
||||
if (!HasValidTFLiteType(result, inst))
|
||||
if (!HasValidTFLiteType(result, inst, add_pseudo_input_nodes))
|
||||
return fn.emitError("invalid TFLite type: ") << result->getType(),
|
||||
false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!add_pseudo_input_nodes) return true;
|
||||
|
||||
// Verify that main function's arguments have input op as the only user.
|
||||
// Arguments are first passed to a pseudo input operation so that they can
|
||||
// have attributes.
|
||||
@ -354,16 +361,19 @@ class Translator {
|
||||
// internal error.
|
||||
static Optional<std::string> Translate(
|
||||
ModuleOp module, bool emit_builtin_tflite_ops, bool emit_select_tf_ops,
|
||||
bool emit_custom_ops, OpOrArgNameMapper* op_or_arg_name_mapper);
|
||||
bool emit_custom_ops, OpOrArgNameMapper* op_or_arg_name_mapper,
|
||||
bool add_pseudo_input_nodes);
|
||||
|
||||
private:
|
||||
enum class OpType : char { kTfliteBuiltin, kSelectTf, kCustomOp };
|
||||
explicit Translator(ModuleOp module, bool emit_builtin_tflite_ops,
|
||||
bool emit_select_tf_ops, bool emit_custom_ops,
|
||||
OpOrArgNameMapper* op_or_arg_name_mapper)
|
||||
OpOrArgNameMapper* op_or_arg_name_mapper,
|
||||
bool add_pseudo_input_nodes)
|
||||
: module_(module),
|
||||
name_mapper_(*op_or_arg_name_mapper),
|
||||
builder_(kInitialBufferSize) {
|
||||
builder_(kInitialBufferSize),
|
||||
add_pseudo_input_nodes_(add_pseudo_input_nodes) {
|
||||
// The first buffer must be empty according to the schema definition.
|
||||
empty_buffer_ = tflite::CreateBuffer(builder_);
|
||||
buffers_.push_back(empty_buffer_);
|
||||
@ -436,7 +446,7 @@ class Translator {
|
||||
|
||||
// Uses the tf.entry_function attribute (if set) to initialize the op to name
|
||||
// mapping.
|
||||
void InitializeNamesFromAttribute(FuncOp fn);
|
||||
void InitializeNamesFromAttribute(FuncOp fn, bool* has_input_attr);
|
||||
|
||||
// Determines if the specified operation op's operand at operand_index
|
||||
// is marked as a stateful operand.
|
||||
@ -471,6 +481,8 @@ class Translator {
|
||||
// The failed ops during legalization.
|
||||
std::vector<std::string> failed_flex_ops_;
|
||||
std::vector<std::string> failed_custom_ops_;
|
||||
|
||||
bool add_pseudo_input_nodes_ = false;
|
||||
};
|
||||
|
||||
std::string Translator::UniqueName(mlir::Operation* op) {
|
||||
@ -531,27 +543,36 @@ Optional<BufferOffset<tflite::Tensor>> Translator::BuildTensor(
|
||||
|
||||
// TFLite requires tensor shape only for the inputs and constants.
|
||||
// However, we output all known shapes for better round-tripping
|
||||
std::vector<int32_t> shape;
|
||||
if (auto* inst = value->getDefiningOp()) {
|
||||
if (type.hasStaticShape() || IsConst(inst)) {
|
||||
// Const op can have a result of dynamic shaped type (e.g. due to constant
|
||||
// folding), but we can still derive the shape of a constant tensor
|
||||
// for its attribute type.
|
||||
llvm::ArrayRef<int64_t> shape_ref;
|
||||
if (type.hasStaticShape()) {
|
||||
shape_ref = type.getShape();
|
||||
} else {
|
||||
mlir::Attribute tensor_attr = inst->getAttr("value");
|
||||
shape_ref = tensor_attr.getType().cast<TensorType>().getShape();
|
||||
}
|
||||
auto check_shape =
|
||||
[&](llvm::ArrayRef<int64_t> shape_ref) -> mlir::LogicalResult {
|
||||
auto is_out_of_range = [](int64_t dim) {
|
||||
return dim > std::numeric_limits<int32_t>::max();
|
||||
};
|
||||
|
||||
if (std::any_of(shape_ref.begin(), shape_ref.end(), is_out_of_range))
|
||||
return mlir::emitError(
|
||||
value->getLoc(),
|
||||
"result shape dimensions out of 32 bit int type range");
|
||||
|
||||
return mlir::success();
|
||||
};
|
||||
|
||||
std::vector<int32_t> shape;
|
||||
if (type.hasStaticShape()) {
|
||||
llvm::ArrayRef<int64_t> shape_ref = type.getShape();
|
||||
if (mlir::failed(check_shape(shape_ref))) return llvm::None;
|
||||
|
||||
shape = std::vector<int32_t>(shape_ref.begin(), shape_ref.end());
|
||||
} else if (auto* inst = value->getDefiningOp()) {
|
||||
if (IsConst(inst)) {
|
||||
// Const op can have a result of dynamic shaped type (e.g. due to constant
|
||||
// folding), but we can still derive the shape of a constant tensor for
|
||||
// its attribute type.
|
||||
mlir::Attribute tensor_attr = inst->getAttr("value");
|
||||
llvm::ArrayRef<int64_t> shape_ref =
|
||||
tensor_attr.getType().cast<TensorType>().getShape();
|
||||
if (mlir::failed(check_shape(shape_ref))) return llvm::None;
|
||||
|
||||
auto is_out_of_range = [](int64_t dim) {
|
||||
return dim > std::numeric_limits<int32_t>::max();
|
||||
};
|
||||
if (std::any_of(shape_ref.begin(), shape_ref.end(), is_out_of_range)) {
|
||||
inst->emitError("result shape dimensions out of 32 bit int type range");
|
||||
return llvm::None;
|
||||
}
|
||||
shape = std::vector<int32_t>(shape_ref.begin(), shape_ref.end());
|
||||
}
|
||||
}
|
||||
@ -852,7 +873,7 @@ Optional<BufferOffset<tflite::Operator>> Translator::BuildOperator(
|
||||
llvm::None;
|
||||
}
|
||||
|
||||
void Translator::InitializeNamesFromAttribute(FuncOp fn) {
|
||||
void Translator::InitializeNamesFromAttribute(FuncOp fn, bool* has_input_attr) {
|
||||
auto dict_attr = fn.getAttrOfType<mlir::DictionaryAttr>("tf.entry_function");
|
||||
if (!dict_attr) return;
|
||||
|
||||
@ -866,9 +887,14 @@ void Translator::InitializeNamesFromAttribute(FuncOp fn) {
|
||||
return;
|
||||
}
|
||||
for (auto it : llvm::enumerate(fn.getArguments())) {
|
||||
name_mapper_.InitOpName(*it.value()->user_begin(),
|
||||
input_names[it.index()].trim());
|
||||
if (add_pseudo_input_nodes_) {
|
||||
name_mapper_.InitOpName(*it.value()->user_begin(),
|
||||
input_names[it.index()].trim());
|
||||
} else {
|
||||
name_mapper_.InitOpName(it.value(), input_names[it.index()].trim());
|
||||
}
|
||||
}
|
||||
*has_input_attr = true;
|
||||
}
|
||||
|
||||
if (auto str = dict_attr.get("outputs").dyn_cast<mlir::StringAttr>()) {
|
||||
@ -909,7 +935,8 @@ bool Translator::IsStatefulOperand(mlir::Operation* op, int operand_index) {
|
||||
}
|
||||
|
||||
Optional<BufferOffset<tflite::SubGraph>> Translator::BuildSubGraph(FuncOp fn) {
|
||||
InitializeNamesFromAttribute(fn);
|
||||
bool has_input_attr = false;
|
||||
InitializeNamesFromAttribute(fn, &has_input_attr);
|
||||
std::vector<BufferOffset<tflite::Tensor>> tensors;
|
||||
llvm::DenseMap<Value*, int> tensor_index_map;
|
||||
bool is_main_fn = fn.getName() == "main";
|
||||
@ -947,9 +974,12 @@ Optional<BufferOffset<tflite::SubGraph>> Translator::BuildSubGraph(FuncOp fn) {
|
||||
// Main function's arguments are first passed to `input` op so they don't
|
||||
// have associated tensor and buffer. Build FlatBuffer tensor and buffer for
|
||||
// other functions.
|
||||
if (!is_main_fn) {
|
||||
if (!is_main_fn || !add_pseudo_input_nodes_) {
|
||||
for (unsigned i = 0, e = bb.getNumArguments(); i < e; ++i) {
|
||||
std::string name = absl::StrCat("arg", i);
|
||||
mlir::BlockArgument* arg = bb.getArgument(i);
|
||||
std::string name;
|
||||
if (has_input_attr) name = name_mapper_.GetUniqueName(arg);
|
||||
if (name.empty()) name = absl::StrCat("arg", i);
|
||||
if (!build_tensor_and_buffer(bb.getArgument(i), name)) return llvm::None;
|
||||
}
|
||||
}
|
||||
@ -1002,7 +1032,7 @@ Optional<BufferOffset<tflite::SubGraph>> Translator::BuildSubGraph(FuncOp fn) {
|
||||
// Arguments of the main function are first passed to a pseudo input
|
||||
// operation unlike arguments of other functions that are directly used by
|
||||
// the actual ops.
|
||||
if (is_main_fn) {
|
||||
if (is_main_fn && add_pseudo_input_nodes_) {
|
||||
inputs.push_back(tensor_index_map[arg->user_begin()->getResult(0)]);
|
||||
} else {
|
||||
inputs.push_back(tensor_index_map[arg]);
|
||||
@ -1050,10 +1080,13 @@ Translator::CreateMetadataVector() {
|
||||
|
||||
Optional<std::string> Translator::Translate(
|
||||
ModuleOp module, bool emit_builtin_tflite_ops, bool emit_select_tf_ops,
|
||||
bool emit_custom_ops, OpOrArgNameMapper* op_or_arg_name_mapper) {
|
||||
if (!IsValidTFLiteMlirModule(module)) return llvm::None;
|
||||
bool emit_custom_ops, OpOrArgNameMapper* op_or_arg_name_mapper,
|
||||
bool add_pseudo_input_nodes) {
|
||||
if (!IsValidTFLiteMlirModule(module, add_pseudo_input_nodes))
|
||||
return llvm::None;
|
||||
Translator translator(module, emit_builtin_tflite_ops, emit_select_tf_ops,
|
||||
emit_custom_ops, op_or_arg_name_mapper);
|
||||
emit_custom_ops, op_or_arg_name_mapper,
|
||||
add_pseudo_input_nodes);
|
||||
return translator.TranslateInternal();
|
||||
}
|
||||
|
||||
@ -1152,10 +1185,10 @@ Optional<std::string> Translator::TranslateInternal() {
|
||||
bool tflite::MlirToFlatBufferTranslateFunction(
|
||||
ModuleOp module, std::string* serialized_flatbuffer,
|
||||
bool emit_builtin_tflite_ops, bool emit_select_tf_ops, bool emit_custom_ops,
|
||||
OpOrArgNameMapper* op_or_arg_name_mapper) {
|
||||
auto maybe_translated =
|
||||
Translator::Translate(module, emit_builtin_tflite_ops, emit_select_tf_ops,
|
||||
emit_custom_ops, op_or_arg_name_mapper);
|
||||
OpOrArgNameMapper* op_or_arg_name_mapper, bool add_pseudo_input_nodes) {
|
||||
auto maybe_translated = Translator::Translate(
|
||||
module, emit_builtin_tflite_ops, emit_select_tf_ops, emit_custom_ops,
|
||||
op_or_arg_name_mapper, add_pseudo_input_nodes);
|
||||
if (!maybe_translated) return true;
|
||||
*serialized_flatbuffer = std::move(*maybe_translated);
|
||||
return false;
|
||||
@ -1163,12 +1196,13 @@ bool tflite::MlirToFlatBufferTranslateFunction(
|
||||
|
||||
bool tflite::MlirToFlatBufferTranslateFunction(
|
||||
ModuleOp module, std::string* serialized_flatbuffer,
|
||||
bool emit_builtin_tflite_ops, bool emit_select_tf_ops,
|
||||
bool emit_custom_ops) {
|
||||
bool emit_builtin_tflite_ops, bool emit_select_tf_ops, bool emit_custom_ops,
|
||||
bool add_pseudo_input_nodes) {
|
||||
OpOrArgLocNameMapper op_or_arg_name_mapper;
|
||||
return MlirToFlatBufferTranslateFunction(
|
||||
module, serialized_flatbuffer, emit_builtin_tflite_ops,
|
||||
emit_select_tf_ops, emit_custom_ops, &op_or_arg_name_mapper);
|
||||
emit_select_tf_ops, emit_custom_ops, &op_or_arg_name_mapper,
|
||||
add_pseudo_input_nodes);
|
||||
}
|
||||
|
||||
static mlir::LogicalResult MlirToFlatBufferFileTranslateFunction(
|
||||
@ -1183,7 +1217,8 @@ static mlir::LogicalResult MlirToFlatBufferFileTranslateFunction(
|
||||
}
|
||||
if (tflite::MlirToFlatBufferTranslateFunction(
|
||||
module, &serialized_flatbuffer, emit_builtin_tflite_ops,
|
||||
emit_select_tf_ops, emit_custom_ops, op_or_arg_name_mapper.get()))
|
||||
emit_select_tf_ops, emit_custom_ops, op_or_arg_name_mapper.get(),
|
||||
/*add_pseudo_input_nodes=*/false))
|
||||
return mlir::failure();
|
||||
|
||||
output << serialized_flatbuffer;
|
||||
|
||||
@ -30,13 +30,15 @@ bool MlirToFlatBufferTranslateFunction(mlir::ModuleOp module,
|
||||
std::string* serialized_flatbuffer,
|
||||
bool emit_builtin_tflite_ops,
|
||||
bool emit_select_tf_ops,
|
||||
bool emit_custom_ops);
|
||||
bool emit_custom_ops,
|
||||
bool add_pseudo_input_nodes = true);
|
||||
|
||||
// Same as the above but with a custom op name mapper.
|
||||
bool MlirToFlatBufferTranslateFunction(
|
||||
mlir::ModuleOp module, std::string* serialized_flatbuffer,
|
||||
bool emit_builtin_tflite_ops, bool emit_select_tf_ops, bool emit_custom_ops,
|
||||
tensorflow::OpOrArgNameMapper* op_or_arg_name_mapper);
|
||||
tensorflow::OpOrArgNameMapper* op_or_arg_name_mapper,
|
||||
bool add_pseudo_input_nodes);
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TRANSLATE_H_
|
||||
|
||||
@ -38,8 +38,6 @@ versions {
|
||||
|
||||
# CHECK: func @main(%arg0: tensor<4xi32>, %arg1: tensor<4xi32>) -> tensor<*xi32>
|
||||
# CHECK-NEXT: attributes {tf.entry_function = {inputs = "input0,input1", outputs = "output"}} {
|
||||
# CHECK-NEXT: %0 = "tfl.pseudo_input"(%arg0) : (tensor<4xi32>) -> tensor<4xi32>
|
||||
# CHECK-NEXT: %1 = "tfl.pseudo_input"(%arg1) : (tensor<4xi32>) -> tensor<4xi32>
|
||||
# CHECK-NEXT: %2 = "tf.BannaPotatoSaladWithColeslaw"(%0, %1) {T = "tfdtype$DT_INT32", device = "", name = "output"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<*xi32>
|
||||
# CHECK-NEXT: return %2 : tensor<*xi32>
|
||||
# CHECK-NEXT: %0 = "tf.BannaPotatoSaladWithColeslaw"(%arg0, %arg1) {T = "tfdtype$DT_INT32", device = "", name = "output"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<*xi32>
|
||||
# CHECK-NEXT: return %0 : tensor<*xi32>
|
||||
# CHECK-NEXT: }
|
||||
|
||||
@ -443,19 +443,16 @@ node {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# MLIR-LABEL: func @main(%arg0: tensor<1x1x1x256x!quant.uniform<i8:f32, 0.21632751372549019:27>>) -> tensor<1x6x31x!quant.uniform<i8:f32, 0.09363494573854933:22>>
|
||||
# MLIR: attributes {tf.entry_function = {inputs = "input", outputs = "output"}
|
||||
# MLIR: %[[input:.*]] = "tfl.pseudo_input"(%arg0) : (tensor<1x1x1x256x!quant.uniform<i8:f32, 0.21632751372549019:27>>)
|
||||
# MLIR: %[[shape:.*]] = constant dense<[1, -1, 31]> : tensor<3xi32>
|
||||
# MLIR: %[[bias:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<186x!quant.uniform<i32:f32:0
|
||||
# MLIR: %[[weight:.*]] = "tfl.pseudo_qconst"() {qtype = tensor<186x1x1x256x!quant.uniform<i8<-127:127>:f32:0, {0.12581039038230116,
|
||||
# MLIR: %[[conv:.*]] = "tfl.conv_2d"(%[[input]], %[[weight]], %[[bias]]) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}
|
||||
# MLIR: %[[conv:.*]] = "tfl.conv_2d"(%arg0, %[[weight]], %[[bias]]) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32}
|
||||
# MLIR: %[[reshape:.*]] = "tfl.reshape"(%[[conv]], %[[shape]]) : (tensor<1x1x1x186x!quant.uniform<i8:f32, 0.09363494573854933:22>>, tensor<3xi32>)
|
||||
# MLIR: return %[[reshape]] : tensor<1x6x31x!quant.uniform<i8:f32, 0.09363494573854933:22>>
|
||||
# MLIR: }
|
||||
|
||||
|
||||
# CHECK-LABEL: {
|
||||
# CHECK: version: 3,
|
||||
# CHECK: operator_codes: [ {
|
||||
|
||||
@ -49,6 +49,5 @@ versions {
|
||||
|
||||
# CHECK: func @main(%arg0: tensor<4xi32>) -> tensor<4xi32>
|
||||
# CHECK-NEXT: attributes {tf.entry_function = {inputs = "input", outputs = "output"}} {
|
||||
# CHECK-NEXT: %0 = "tfl.pseudo_input"(%arg0) : (tensor<4xi32>) -> tensor<4xi32>
|
||||
# CHECK-NEXT: return %0 : tensor<4xi32>
|
||||
# CHECK-NEXT: return %arg0 : tensor<4xi32>
|
||||
# CHECK-NEXT: }
|
||||
|
||||
@ -141,6 +141,5 @@ versions {
|
||||
# CHECK: attributes {tf.entry_function = {inputs = "unranked", outputs = "unranked,static,static_10"}} {
|
||||
# CHECK: [[VAL_1:%.*]] = constant dense<0> : tensor<10xi32>
|
||||
# CHECK: [[VAL_2:%.*]] = constant dense<0> : tensor<i32>
|
||||
# CHECK: [[VAL_3:%.*]] = "tfl.pseudo_input"([[VAL_0]]) : (tensor<1x8x8x2xi32>) -> tensor<1x8x8x2xi32>
|
||||
# CHECK: return [[VAL_3]], [[VAL_2]], [[VAL_1]] : tensor<1x8x8x2xi32>, tensor<i32>, tensor<10xi32>
|
||||
# CHECK: return [[VAL_0]], [[VAL_2]], [[VAL_1]] : tensor<1x8x8x2xi32>, tensor<i32>, tensor<10xi32>
|
||||
# CHECK: }
|
||||
|
||||
@ -7808,14 +7808,12 @@ library {
|
||||
# CHECK: [[VAL_21:%.*]] = constant dense<0.000000e+00> : tensor<3xf32>
|
||||
# CHECK: [[VAL_22:%.*]] = constant dense<0.000000e+00> : tensor<1x3xf32>
|
||||
# CHECK: [[VAL_23:%.*]] = constant unit
|
||||
# CHECK: [[VAL_24:%.*]] = "tfl.pseudo_input"(%arg0) : (tensor<1x3x3xf32>) -> tensor<1x3x3xf32>
|
||||
# CHECK: [[VAL_25:%.*]]:3 = "tfl.unpack"([[VAL_24]]) {axis = 1 : i32, num = 3 : i32} : (tensor<1x3x3xf32>) -> (tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>)
|
||||
# CHECK: [[VAL_26:%.*]] = "tfl.pack"([[VAL_25]]#0, [[VAL_25]]#1, [[VAL_25]]#2) {axis = 0 : i32, values_count = 3 : i32} : (tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>) -> tensor<3x1x3xf32>
|
||||
# CHECK: [[VAL_27:%.*]] = constant dense<0.000000e+00> : tensor<1x3xf32>
|
||||
# CHECK: [[VAL_28:%.*]] = "tfl.unidirectional_sequence_lstm"([[VAL_26]], [[VAL_7]], [[VAL_6]], [[VAL_5]], [[VAL_8]], [[VAL_3]], [[VAL_2]], [[VAL_1]], [[VAL_4]], [[VAL_10]], [[VAL_9]], [[VAL_11]], [[VAL_21]], [[VAL_16]], [[VAL_21]], [[VAL_21]], [[VAL_23]], [[VAL_23]], [[VAL_22]], [[VAL_27]], [[VAL_23]], [[VAL_23]], [[VAL_23]], [[VAL_23]]) {fused_activation_function = "TANH", time_major = true} : (tensor<3x1x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, none, none, tensor<1x3xf32>, tensor<1x3xf32>, none, none, none, none) -> tensor<3x1x3xf32>
|
||||
# CHECK: [[VAL_24:%.*]]:3 = "tfl.unpack"(%arg0) {axis = 1 : i32, num = 3 : i32} : (tensor<1x3x3xf32>) -> (tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>)
|
||||
# CHECK: [[VAL_25:%.*]] = "tfl.pack"([[VAL_24]]#0, [[VAL_24]]#1, [[VAL_24]]#2) {axis = 0 : i32, values_count = 3 : i32} : (tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>) -> tensor<3x1x3xf32>
|
||||
# CHECK: [[VAL_26:%.*]] = constant dense<0.000000e+00> : tensor<1x3xf32>
|
||||
# CHECK: [[VAL_27:%.*]] = "tfl.unidirectional_sequence_lstm"([[VAL_25]], [[VAL_7]], [[VAL_6]], [[VAL_5]], [[VAL_8]], [[VAL_3]], [[VAL_2]], [[VAL_1]], [[VAL_4]], [[VAL_10]], [[VAL_9]], [[VAL_11]], [[VAL_21]], [[VAL_16]], [[VAL_21]], [[VAL_21]], [[VAL_23]], [[VAL_23]], [[VAL_22]], [[VAL_26]], [[VAL_23]], [[VAL_23]], [[VAL_23]], [[VAL_23]]) {fused_activation_function = "TANH", time_major = true} : (tensor<3x1x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, none, none, tensor<1x3xf32>, tensor<1x3xf32>, none, none, none, none) -> tensor<3x1x3xf32>
|
||||
# CHECK: [[VAL_28:%.*]] = constant dense<0.000000e+00> : tensor<1x3xf32>
|
||||
# CHECK: [[VAL_29:%.*]] = constant dense<0.000000e+00> : tensor<1x3xf32>
|
||||
# CHECK: [[VAL_30:%.*]] = constant dense<0.000000e+00> : tensor<1x3xf32>
|
||||
# CHECK: [[VAL_31:%.*]] = "tfl.unidirectional_sequence_lstm"([[VAL_28]], [[VAL_19]], [[VAL_18]], [[VAL_17]], [[VAL_20]], [[VAL_14]], [[VAL_13]], [[VAL_12]], [[VAL_15]], [[VAL_23]], [[VAL_23]], [[VAL_23]], [[VAL_21]], [[VAL_16]], [[VAL_21]], [[VAL_21]], [[VAL_23]], [[VAL_23]], [[VAL_29]], [[VAL_30]], [[VAL_23]], [[VAL_23]], [[VAL_23]], [[VAL_23]]) {fused_activation_function = "TANH", time_major = true} : (tensor<3x1x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, none, none, none, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, none, none, tensor<1x3xf32>, tensor<1x3xf32>, none, none, none, none) -> tensor<3x1x3xf32>
|
||||
# CHECK: [[VAL_32:%.*]]:3 = "tfl.unpack"([[VAL_31]]) {axis = 0 : i32, num = 3 : i32} : (tensor<3x1x3xf32>) -> (tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>)
|
||||
# CHECK: return [[VAL_32]]#2 : tensor<1x3xf32>
|
||||
|
||||
# CHECK: [[VAL_30:%.*]] = "tfl.unidirectional_sequence_lstm"([[VAL_27]], [[VAL_19]], [[VAL_18]], [[VAL_17]], [[VAL_20]], [[VAL_14]], [[VAL_13]], [[VAL_12]], [[VAL_15]], [[VAL_23]], [[VAL_23]], [[VAL_23]], [[VAL_21]], [[VAL_16]], [[VAL_21]], [[VAL_21]], [[VAL_23]], [[VAL_23]], [[VAL_28]], [[VAL_29]], [[VAL_23]], [[VAL_23]], [[VAL_23]], [[VAL_23]]) {fused_activation_function = "TANH", time_major = true} : (tensor<3x1x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, tensor<3x3xf32>, none, none, none, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, tensor<3xf32>, none, none, tensor<1x3xf32>, tensor<1x3xf32>, none, none, none, none) -> tensor<3x1x3xf32>
|
||||
# CHECK: [[VAL_31:%.*]]:3 = "tfl.unpack"([[VAL_30]]) {axis = 0 : i32, num = 3 : i32} : (tensor<3x1x3xf32>) -> (tensor<1x3xf32>, tensor<1x3xf32>, tensor<1x3xf32>)
|
||||
# CHECK: return [[VAL_31]]#2 : tensor<1x3xf32>
|
||||
|
||||
@ -5,11 +5,6 @@ func @main(%arg0: tensor<1x384xf32>, %arg1: tensor<1x96xf32>, %arg2: tensor<384x
|
||||
// CHECK-LABEL: @main
|
||||
// CHECK: "tfl.basic_lstm"({{.*}}) {cell_clip = 1.000000e+00 : f32, fused_activation_function = "RELU", kernel_type = "BASIC", proj_clip = 2.000000e+00 : f32} : (tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> (tensor<1x96xf32>, tensor<1x96xf32>, tensor<1x480xf32>, tensor<1x384xf32>)
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<1x384xf32>) -> tensor<1x384xf32>
|
||||
%1 = "tfl.pseudo_input" (%arg1) : (tensor<1x96xf32>) -> tensor<1x96xf32>
|
||||
%2 = "tfl.pseudo_input" (%arg2) : (tensor<384x480xf32>) -> tensor<384x480xf32>
|
||||
%3 = "tfl.pseudo_input" (%arg3) : (tensor<384xf32>) -> tensor<384xf32>
|
||||
%4 = "tfl.pseudo_input" (%arg4) : (tensor<1x96xf32>) -> tensor<1x96xf32>
|
||||
%5:4 = "tfl.basic_lstm"(%0, %1, %2, %3, %4) {fused_activation_function = "RELU", cell_clip = 1.0 : f32, proj_clip = 2.0 : f32} : (tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> (tensor<1x96xf32>, tensor<1x96xf32>, tensor<1x480xf32>, tensor<1x384xf32>)
|
||||
return %5#0 : tensor<1x96xf32>
|
||||
%0:4 = "tfl.basic_lstm"(%arg0, %arg1, %arg2, %arg3, %arg4) {fused_activation_function = "RELU", cell_clip = 1.0 : f32, proj_clip = 2.0 : f32} : (tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> (tensor<1x96xf32>, tensor<1x96xf32>, tensor<1x480xf32>, tensor<1x384xf32>)
|
||||
return %0#0 : tensor<1x96xf32>
|
||||
}
|
||||
|
||||
@ -84,6 +84,5 @@ func @qu8() -> tensor<3x!quant.uniform<u8<1:255>:f32, 1.0>> {
|
||||
|
||||
// Identity function to make the exporter happy
|
||||
func @main(%arg0: tensor<4xi8>) -> tensor<4xi8> {
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<4xi8>) -> tensor<4xi8>
|
||||
return %0 : tensor<4xi8>
|
||||
return %arg0 : tensor<4xi8>
|
||||
}
|
||||
|
||||
@ -2,11 +2,9 @@
|
||||
// Confirm function references in if ops are preserved
|
||||
func @main(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
|
||||
// CHECK: %{{.*}} = "tf.If"(%{{.*}}, %{{.*}}, %{{.*}}) {else_branch = @cond_false, is_stateless = false, then_branch = @cond_true} : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<1xf32>) -> tensor<1xf32>
|
||||
%2 = "tfl.less"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1>
|
||||
%3 = "tf.If"(%2, %0, %1) {else_branch = @cond_false, then_branch = @cond_true, is_stateless = false} : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
return %3 : tensor<1xf32>
|
||||
%0 = "tfl.less"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1>
|
||||
%1 = "tf.If"(%0, %arg0, %arg1) {else_branch = @cond_false, then_branch = @cond_true, is_stateless = false} : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
return %1 : tensor<1xf32>
|
||||
}
|
||||
|
||||
func @cond_true(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
|
||||
|
||||
@ -57,13 +57,13 @@ Optional<std::unique_ptr<tflite::ModelT>> InjectStatsToFullyConnected(
|
||||
}
|
||||
std::unique_ptr<tflite::ModelT> model(model_ptr->GetModel()->UnPack());
|
||||
|
||||
// FB-LABEL: name: "Input",
|
||||
// FB-LABEL: name: "arg0",
|
||||
// FB-NEXT: quantization: {
|
||||
// FB-NEXT: min: [ -1.0 ],
|
||||
// FB-NEXT: max: [ 1.0 ]
|
||||
// FB-NEXT: }
|
||||
|
||||
// FB-LABEL: name: "Input1",
|
||||
// FB-LABEL: name: "arg1",
|
||||
// FB-NEXT: quantization: {
|
||||
// FB-EMPTY:
|
||||
// FB-NEXT: }
|
||||
@ -88,7 +88,7 @@ Optional<std::unique_ptr<tflite::ModelT>> InjectStatsToFullyConnected(
|
||||
// FB-NEXT: }
|
||||
|
||||
// FB-LABEL: operators: [ {
|
||||
// FB-NEXT: inputs: [ 1, 2, 0 ],
|
||||
// FB-NEXT: inputs: [ 0, 1, 2 ],
|
||||
// FB-NEXT: outputs: [ 3, 4 ],
|
||||
// FB-NEXT: builtin_options_type: FullyConnectedOptions,
|
||||
// FB-NEXT: builtin_options: {
|
||||
@ -98,16 +98,12 @@ Optional<std::unique_ptr<tflite::ModelT>> InjectStatsToFullyConnected(
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>)
|
||||
// CHECK-SAME: -> tensor<40x40xf32> {
|
||||
// CHECK-NEXT: %[[in:.*]] = "tfl.pseudo_input"(%arg0) : (tensor<40x37xf32>)
|
||||
// CHECK-SAME: -> tensor<40x37xf32>
|
||||
// CHECK-NEXT: %[[stat:.*]] = "quant.stats"(%[[in]]) {layerStats = dense<
|
||||
// CHECK-NEXT: %[[stat:.*]] = "quant.stats"(%arg0) {layerStats = dense<
|
||||
// CHECK-SAME: [-1.000000e+00, 1.000000e+00]> : tensor<2xf32>}
|
||||
// CHECK-SAME: : (tensor<40x37xf32>) -> tensor<40x37xf32>
|
||||
// CHECK-NEXT: %[[in1:.*]] = "tfl.pseudo_input"(%arg1) :
|
||||
// CHECK-SAME: (tensor<40x37xf32>) -> tensor<40x37xf32>
|
||||
// CHECK-NEXT: %[[cst:.*]] = "tfl.pseudo_const"() {value = dense<
|
||||
// CHECK-SAME: 1.000000e+00> : tensor<40xf32>} : () -> tensor<40xf32>
|
||||
// CHECK-NEXT: %[[fc:.*]]:2 = "tfl.fully_connected"(%[[stat]], %[[in1]],
|
||||
// CHECK-NEXT: %[[fc:.*]]:2 = "tfl.fully_connected"(%[[stat]], %arg1,
|
||||
// CHECK-NEXT: %[[stat1:.*]] = "quant.stats"(%[[fc]]#0) {axis = 1 : i64,
|
||||
// CHECK-SAME: axisStats = dense<{{\[}}[-0.000000e+00, 0.000000e+00],
|
||||
// CHECK-SAME: [-1.000000e+00, 1.000000e+00],
|
||||
|
||||
@ -4,16 +4,12 @@
|
||||
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
^bb0(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>):
|
||||
%cst = constant dense<1.0> : tensor<40xf32>
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%2:2 = "tfl.fully_connected"(%0, %1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, tensor<40xf32>) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %2 : tensor<40x40xf32>
|
||||
%0:2 = "tfl.fully_connected"(%arg0, %arg1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, tensor<40xf32>) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %0 : tensor<40x40xf32>
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
// CHECK-NEXT: %0 = "tfl.pseudo_input"(%arg0) : (tensor<40x37xf32>) -> tensor<40x37xf32>
|
||||
// CHECK-NEXT: %1 = "tfl.pseudo_input"(%arg1) : (tensor<40x37xf32>) -> tensor<40x37xf32>
|
||||
// CHECK-NEXT: %2 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<40xf32>} : () -> tensor<40xf32>
|
||||
// CHECK-NEXT: %3:2 = "tfl.fully_connected"(%0, %1, %2) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, tensor<40xf32>) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
// CHECK-NEXT: return %3#0 : tensor<40x40xf32>
|
||||
// CHECK-NEXT: %0 = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<40xf32>} : () -> tensor<40xf32>
|
||||
// CHECK-NEXT: %1:2 = "tfl.fully_connected"(%arg0, %arg1, %0) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, tensor<40xf32>) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
// CHECK-NEXT: return %1#0 : tensor<40x40xf32>
|
||||
// CHECK-NEXT: }
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@
|
||||
func @main(tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> {
|
||||
^bb0(%arg0: tensor<1x6x6x16xf32>):
|
||||
// CHECK: "tfl.average_pool_2d"(%{{.*}}) {filter_height = 3 : i32, filter_width = 6 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 3 : i32, stride_w = 1 : i32} : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32>
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<1x6x6x16xf32>) -> tensor<1x6x6x16xf32> loc("Input")
|
||||
%1 = "tfl.average_pool_2d"(%0) {filter_height = 3 : i32, filter_width = 6 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 3 : i32, stride_w = 1 : i32} : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> loc("avgpool")
|
||||
return %1 : tensor<1x1x1x16xf32>
|
||||
%0 = "tfl.average_pool_2d"(%arg0) {filter_height = 3 : i32, filter_width = 6 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 3 : i32, stride_w = 1 : i32} : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> loc("avgpool")
|
||||
return %0 : tensor<1x1x1x16xf32>
|
||||
}
|
||||
|
||||
@ -3,18 +3,16 @@
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
^bb0(%arg0: tensor<4xf32>):
|
||||
// CHECK: [[INPUT:%.*]] = "tfl.pseudo_input"(%arg0) : (tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: [[CONST:%.*]] = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<4xf32>} : () -> tensor<4xf32>
|
||||
// CHECK-NEXT: [[SQDIFF:%.*]] = tfl.squared_difference [[INPUT]], [[CONST]] : tensor<4xf32>
|
||||
// CHECK-NEXT: %{{.*}} = tfl.mul [[INPUT]], [[SQDIFF]] {fused_activation_function = "NONE"} : tensor<4xf32>
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xf32>) -> tensor<4xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
// CHECK: [[CONST:%.*]] = "tfl.pseudo_const"() {value = dense<1.000000e+00> : tensor<4xf32>} : () -> tensor<4xf32>
|
||||
// CHECK-NEXT: [[SQDIFF:%.*]] = tfl.squared_difference %arg0, [[CONST]] : tensor<4xf32>
|
||||
// CHECK-NEXT: %{{.*}} = tfl.mul %arg0, [[SQDIFF]] {fused_activation_function = "NONE"} : tensor<4xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
// Confirm that attributes that cannot be stored in the flatbuffer options
|
||||
// for a given operator are dropped silently.
|
||||
%2 = "tfl.squared_difference"(%0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
|
||||
%3 = "tfl.mul"(%0, %2) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
%4 = "tfl.div"(%3, %2) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
%5 = "tfl.exp"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
%6 = "tfl.neg"(%5) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
|
||||
return %6 : tensor<4xf32>
|
||||
%1 = "tfl.squared_difference"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
|
||||
%2 = "tfl.mul"(%arg0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
%3 = "tfl.div"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
%4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
%5 = "tfl.neg"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
|
||||
return %5 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -3,11 +3,9 @@
|
||||
|
||||
func @main(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
// CHECK: [[NONE:%.*]] = constant unit
|
||||
// CHECK: "tfl.fully_connected"(%{{.()}}, %{{.*}}, [[NONE]])
|
||||
// CHECK: "tfl.fully_connected"(%arg0, %arg1, [[NONE]])
|
||||
// CHECK-SAME: (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
%cst = constant unit
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%2:2 = "tfl.fully_connected"(%0, %1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %2 : tensor<40x40xf32>
|
||||
%0:2 = "tfl.fully_connected"(%arg0, %arg1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %0 : tensor<40x40xf32>
|
||||
}
|
||||
|
||||
@ -3,16 +3,15 @@
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
^bb0(%arg0: tensor<4xf32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xf32>) -> tensor<4xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%2 = "tfl.squared_difference"(%0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
|
||||
%0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.squared_difference"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
|
||||
// CHECK: %[[MUL:.*]] = tfl.mul
|
||||
%3 = "tfl.mul"(%0, %2) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
%2 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
// CHECK: %[[DIV:.*]] = tfl.div
|
||||
%4 = "tfl.div"(%3, %2) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
%3 = "tfl.div"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
// CHECK: %[[EXP:.*]] = "tfl.exp"
|
||||
%5 = "tfl.exp"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
%6 = "tfl.neg"(%5) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
|
||||
%4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
%5 = "tfl.neg"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
|
||||
// CHECK: return %[[MUL]], %[[EXP]], %[[DIV]]
|
||||
return %6 : tensor<4xf32>
|
||||
return %5 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -8,13 +8,12 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK: %{{.*}} = "tfl.dequantize"(%{{.*}}) : (tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-03>>) -> tensor<1x1001xf32>
|
||||
|
||||
%cst = constant dense<[1, 1001]> : tensor<2xi32>
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3xf32>
|
||||
%1 = "tfl.quantize"(%0) {qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
|
||||
%2 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
|
||||
%3 = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>
|
||||
%4 = "tfl.conv_2d"(%1, %2, %3) {dilation_h_factor = 2 : i32, dilation_w_factor = 3 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>, tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
|
||||
%5 = "tfl.reshape"(%4, %cst) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>) -> tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>
|
||||
%6 = "tfl.softmax"(%5) {beta = 1.000000e+00 : f32} : (tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>) -> tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-03>>
|
||||
%7 = "tfl.dequantize"(%6) : (tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-03>>) -> tensor<1x1001xf32>
|
||||
return %7 : tensor<1x1001xf32>
|
||||
%0 = "tfl.quantize"(%arg0) {qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
|
||||
%1 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
|
||||
%2 = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>
|
||||
%3 = "tfl.conv_2d"(%0, %1, %2) {dilation_h_factor = 2 : i32, dilation_w_factor = 3 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>, tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
|
||||
%4 = "tfl.reshape"(%3, %cst) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>) -> tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>
|
||||
%5 = "tfl.softmax"(%4) {beta = 1.000000e+00 : f32} : (tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>) -> tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-03>>
|
||||
%6 = "tfl.dequantize"(%5) : (tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-03>>) -> tensor<1x1001xf32>
|
||||
return %6 : tensor<1x1001xf32>
|
||||
}
|
||||
|
||||
@ -13,10 +13,9 @@ func @main(tensor<3x2xi32>) -> tensor<3x2xi32> {
|
||||
// CHECK-NEXT: [[ADD:%.*]] = "tfl.add"([[SCALAR]], [[SUB]]) {fused_activation_function = "NONE"} : (tensor<i32>, tensor<3x2xi32>) -> tensor<3x2xi32>
|
||||
// CHECK-NEXT: return [[ADD]] : tensor<3x2xi32>
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<3x2xi32>) -> tensor<3x2xi32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32> loc("Const")
|
||||
%2 = "tfl.sub" (%0, %1) {fused_activation_function = "RELU6"} : (tensor<3x2xi32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("sub")
|
||||
%3 = "std.constant" () {value = dense<10> : tensor<i32>} : () -> tensor<i32> loc("Const2")
|
||||
%4 = "tfl.add" (%3, %2) {fused_activation_function = "NONE"} : (tensor<i32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("add")
|
||||
return %4 : tensor<3x2xi32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32> loc("Const")
|
||||
%1 = "tfl.sub" (%arg0, %0) {fused_activation_function = "RELU6"} : (tensor<3x2xi32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("sub")
|
||||
%2 = "std.constant" () {value = dense<10> : tensor<i32>} : () -> tensor<i32> loc("Const2")
|
||||
%3 = "tfl.add" (%2, %1) {fused_activation_function = "NONE"} : (tensor<i32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("add")
|
||||
return %3 : tensor<3x2xi32>
|
||||
}
|
||||
|
||||
@ -3,14 +3,12 @@
|
||||
func @main(%arg0: tensor<i32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
|
||||
// TODO(b/138222071) Expect first output to be a scalar
|
||||
// CHECK: %{{.*}}:2 = "tf.While"(%{{.*}}, %{{.*}}) {body = @body, cond = @cond, is_stateless = false} : (tensor<i32>, tensor<1xf32>) -> (tensor<*xi32>, tensor<1xf32>)
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<i32>) -> tensor<i32>
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<1xf32>) -> tensor<1xf32>
|
||||
|
||||
// While %0 is greater than zero, element wise add %1 with itself.
|
||||
%2:2 = "tf.While"(%0, %1) {
|
||||
// While %arg0 is greater than zero, element wise add %arg1 with itself.
|
||||
%0:2 = "tf.While"(%arg0, %arg1) {
|
||||
cond = @cond, body = @body, is_stateless = false
|
||||
} : (tensor<i32>, tensor<1xf32>) -> (tensor<i32>, tensor<1xf32>)
|
||||
return %2#1 : tensor<1xf32>
|
||||
return %0#1 : tensor<1xf32>
|
||||
}
|
||||
|
||||
func @cond(%arg0: tensor<*xi32>, %arg1: tensor<*xf32>) -> tensor<i1> {
|
||||
|
||||
@ -11,35 +11,35 @@ func @main(tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384x
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 1, 384 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 1, 96 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 384, 480 ],
|
||||
// CHECK-NEXT: buffer: 3,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input2",
|
||||
// CHECK-NEXT: name: "arg2",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 384 ],
|
||||
// CHECK-NEXT: buffer: 4,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input3",
|
||||
// CHECK-NEXT: name: "arg3",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 1, 96 ],
|
||||
// CHECK-NEXT: buffer: 5,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input4",
|
||||
// CHECK-NEXT: name: "arg4",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -112,11 +112,6 @@ func @main(tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384x
|
||||
// CHECK-NEXT:}
|
||||
|
||||
^bb0(%arg0: tensor<1x384xf32>, %arg1: tensor<1x96xf32>, %arg2: tensor<384x480xf32>, %arg3: tensor<384xf32>, %arg4: tensor<1x96xf32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<1x384xf32>) -> tensor<1x384xf32>
|
||||
%1 = "tfl.pseudo_input" (%arg1) : (tensor<1x96xf32>) -> tensor<1x96xf32>
|
||||
%2 = "tfl.pseudo_input" (%arg2) : (tensor<384x480xf32>) -> tensor<384x480xf32>
|
||||
%3 = "tfl.pseudo_input" (%arg3) : (tensor<384xf32>) -> tensor<384xf32>
|
||||
%4 = "tfl.pseudo_input" (%arg4) : (tensor<1x96xf32>) -> tensor<1x96xf32>
|
||||
%5:4 = "tfl.basic_lstm"(%0, %1, %2, %3, %4) {fused_activation_function = "RELU", cell_clip = 1.0 : f32, proj_clip = 2.0 : f32} : (tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> (tensor<1x96xf32>, tensor<1x96xf32>, tensor<1x480xf32>, tensor<1x384xf32>)
|
||||
return %5#0 : tensor<1x96xf32>
|
||||
%0:4 = "tfl.basic_lstm"(%arg0, %arg1, %arg2, %arg3, %arg4) {fused_activation_function = "RELU", cell_clip = 1.0 : f32, proj_clip = 2.0 : f32} : (tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> (tensor<1x96xf32>, tensor<1x96xf32>, tensor<1x480xf32>, tensor<1x384xf32>)
|
||||
return %0#0 : tensor<1x96xf32>
|
||||
}
|
||||
|
||||
@ -19,7 +19,7 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -93,11 +93,10 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xf32>) -> tensor<4xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%2 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
%0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
// tf.MyCustomOp is the result of conversion to a Custom op
|
||||
%3 = "tf.MyCustomOp"(%2, %1) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp")
|
||||
%4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
return %4 : tensor<4xf32>
|
||||
%2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp")
|
||||
%3 = "tfl.exp"(%2) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
return %3 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 1, 224, 224, 3 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -85,11 +85,10 @@ func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<-1.23697901> : tensor<32xf32>} : () -> tensor<32xf32> loc("Const")
|
||||
%2 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
|
||||
%3 = "tfl.dequantize"(%2) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>) -> tensor<32x3x3x3xf32>
|
||||
%4 = "tfl.depthwise_conv_2d"(%0, %3, %1) {depth_multiplier = 4 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<1x224x224x3xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<-1.23697901> : tensor<32xf32>} : () -> tensor<32xf32> loc("Const")
|
||||
%1 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
|
||||
%2 = "tfl.dequantize"(%1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>) -> tensor<32x3x3x3xf32>
|
||||
%3 = "tfl.depthwise_conv_2d"(%arg0, %2, %0) {depth_multiplier = 4 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<1x224x224x3xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32>
|
||||
|
||||
return %4 : tensor<1x112x112x32xf32>
|
||||
return %3 : tensor<1x112x112x32xf32>
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 1, 224, 224, 3 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -87,11 +87,10 @@ func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<-1.23697901> : tensor<32xf32>} : () -> tensor<32xf32> loc("Const")
|
||||
%2 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
|
||||
%3 = "tfl.dequantize"(%2) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>) -> tensor<32x3x3x3xf32>
|
||||
%4 = "tfl.depthwise_conv_2d"(%0, %3, %1) {depth_multiplier = 4 : i32, dilation_h_factor = 2 : i32, dilation_w_factor = 2 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<1x224x224x3xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<-1.23697901> : tensor<32xf32>} : () -> tensor<32xf32> loc("Const")
|
||||
%1 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
|
||||
%2 = "tfl.dequantize"(%1) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>) -> tensor<32x3x3x3xf32>
|
||||
%3 = "tfl.depthwise_conv_2d"(%arg0, %2, %0) {depth_multiplier = 4 : i32, dilation_h_factor = 2 : i32, dilation_w_factor = 2 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<1x224x224x3xf32>, tensor<32x3x3x3xf32>, tensor<32xf32>) -> tensor<1x112x112x32xf32>
|
||||
|
||||
return %4 : tensor<1x112x112x32xf32>
|
||||
return %3 : tensor<1x112x112x32xf32>
|
||||
}
|
||||
|
||||
@ -15,7 +15,7 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -91,10 +91,9 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xf32>) -> tensor<4xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%2 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul0")
|
||||
%3 = "tfl.mul"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul1")
|
||||
%4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
return %4 : tensor<4xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul0")
|
||||
%2 = "tfl.mul"(%1, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul1")
|
||||
%3 = "tfl.exp"(%2) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
return %3 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -2,9 +2,8 @@
|
||||
|
||||
func @main(%arg0: tensor<2xi32>) -> tensor<2xi32> {
|
||||
%cst = "tfl.pseudo_const"() {value = dense<[1, 2]> : tensor<2xi32>} : () -> tensor<?xi32>
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<2xi32>) -> tensor<2xi32>
|
||||
%1 = "tfl.add"(%0, %cst) {fused_activation_function = "NONE"} : (tensor<2xi32>, tensor<?xi32>) -> tensor<2xi32>
|
||||
return %1 : tensor<2xi32>
|
||||
%0 = "tfl.add"(%arg0, %cst) {fused_activation_function = "NONE"} : (tensor<2xi32>, tensor<?xi32>) -> tensor<2xi32>
|
||||
return %0 : tensor<2xi32>
|
||||
}
|
||||
|
||||
|
||||
@ -22,4 +21,3 @@ func @main(%arg0: tensor<2xi32>) -> tensor<2xi32> {
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 1, 0, 0, 0, 2, 0, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -134,7 +134,6 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xf32>) -> tensor<4xf32> loc("Input")
|
||||
%1 = "tfl.fake_quant"(%0) {num_bits = 6 : i32, narrow_range = false, minmax = [0.3, 1.4]} : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
return %1 : tensor<4xf32>
|
||||
%0 = "tfl.fake_quant"(%arg0) {num_bits = 6 : i32, narrow_range = false, minmax = [0.3, 1.4]} : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
return %0 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ func @main(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 3, 2 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tf.Placeholder.input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -42,7 +42,6 @@ func @main(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
|
||||
%0 = "tf.Placeholder.input"(%arg0) {name = "Placeholder"} : (tensor<3x2xf32>) -> tensor<3x2xf32>
|
||||
%1 = "tf.AddV2"(%0, %0) : (tensor<3x2xf32>, tensor<3x2xf32>) -> tensor<3x2xf32>
|
||||
return %1 : tensor<3x2xf32>
|
||||
%0 = "tf.AddV2"(%arg0, %arg0) : (tensor<3x2xf32>, tensor<3x2xf32>) -> tensor<3x2xf32>
|
||||
return %0 : tensor<3x2xf32>
|
||||
}
|
||||
|
||||
@ -18,7 +18,7 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -92,11 +92,10 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xf32>) -> tensor<4xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%2 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
%0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
// tf.div is the result of conversion to a Flex TF op
|
||||
%3 = "tf.Div"(%2, %1) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
%4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
return %4 : tensor<4xf32>
|
||||
%2 = "tf.Div"(%1, %0) : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
%3 = "tfl.exp"(%2) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
return %3 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -12,14 +12,14 @@ func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 40, 37 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 40, 37 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "Input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -65,8 +65,6 @@ func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%cst = constant unit
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%2:2 = "tfl.fully_connected"(%0, %1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %2 : tensor<40x40xf32>
|
||||
%0:2 = "tfl.fully_connected"(%arg0, %arg1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %0 : tensor<40x40xf32>
|
||||
}
|
||||
|
||||
@ -12,14 +12,14 @@ func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 40, 37 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 40, 37 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "Input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -65,8 +65,6 @@ func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%cst = constant unit
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%2:2 = "tfl.fully_connected"(%0, %1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "SHUFFLED4x16INT8"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %2 : tensor<40x40xf32>
|
||||
%0:2 = "tfl.fully_connected"(%arg0, %arg1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "SHUFFLED4x16INT8"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %0 : tensor<40x40xf32>
|
||||
}
|
||||
|
||||
@ -19,14 +19,14 @@
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 1 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 1 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -160,11 +160,9 @@
|
||||
// CHECK-NEXT: }
|
||||
|
||||
func @main(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<1xf32>) -> tensor<1xf32>
|
||||
%2 = "tfl.less"(%0, %1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1>
|
||||
%3 = "tf.If"(%2, %0, %1) {else_branch = @cond_false, then_branch = @cond_true, is_stateless = false} : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
return %3 : tensor<1xf32>
|
||||
%0 = "tfl.less"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xi1>
|
||||
%1 = "tf.If"(%0, %arg0, %arg1) {else_branch = @cond_false, then_branch = @cond_true, is_stateless = false} : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
return %1 : tensor<1xf32>
|
||||
}
|
||||
|
||||
func @cond_true(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
|
||||
|
||||
@ -16,7 +16,7 @@ func @main(tensor<4xi1>) -> tensor<4xi1> {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: type: BOOL,
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -82,10 +82,9 @@ func @main(tensor<4xi1>) -> tensor<4xi1> {
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-EMPTY:
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xi1>) -> tensor<4xi1> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<true> : tensor<4xi1>} : () -> tensor<4xi1> loc("Const1")
|
||||
%2 = "tfl.pseudo_const" () {value = dense<false> : tensor<4xi1>} : () -> tensor<4xi1> loc("Const2")
|
||||
%3 = "tfl.logical_or"(%0, %2) : (tensor<4xi1>, tensor<4xi1>) -> tensor<4xi1> loc("logical_or")
|
||||
%4 = "tfl.logical_and"(%3, %1) : (tensor<4xi1>, tensor<4xi1>) -> tensor<4xi1> loc("logical_and")
|
||||
return %4 : tensor<4xi1>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<true> : tensor<4xi1>} : () -> tensor<4xi1> loc("Const1")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<false> : tensor<4xi1>} : () -> tensor<4xi1> loc("Const2")
|
||||
%2 = "tfl.logical_or"(%arg0, %1) : (tensor<4xi1>, tensor<4xi1>) -> tensor<4xi1> loc("logical_or")
|
||||
%3 = "tfl.logical_and"(%2, %0) : (tensor<4xi1>, tensor<4xi1>) -> tensor<4xi1> loc("logical_and")
|
||||
return %3 : tensor<4xi1>
|
||||
}
|
||||
|
||||
@ -11,126 +11,154 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, t
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 3,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input2",
|
||||
// CHECK-NEXT: name: "arg2",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 4,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input3",
|
||||
// CHECK-NEXT: name: "arg3",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 5,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input4",
|
||||
// CHECK-NEXT: name: "arg4",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 6,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input5",
|
||||
// CHECK-NEXT: name: "arg5",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 7,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input6",
|
||||
// CHECK-NEXT: name: "arg6",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 8,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input7",
|
||||
// CHECK-NEXT: name: "arg7",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 9,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input8",
|
||||
// CHECK-NEXT: name: "arg8",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 10,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input9",
|
||||
// CHECK-NEXT: name: "arg9",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 11,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input10",
|
||||
// CHECK-NEXT: name: "arg10",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 12,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input11",
|
||||
// CHECK-NEXT: name: "arg11",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 13,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input12",
|
||||
// CHECK-NEXT: name: "arg12",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 14,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input13",
|
||||
// CHECK-NEXT: name: "arg13",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 15,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input14",
|
||||
// CHECK-NEXT: name: "arg14",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 16,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input15",
|
||||
// CHECK-NEXT: name: "arg15",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 17,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input16",
|
||||
// CHECK-NEXT: name: "arg16",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 18,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input17",
|
||||
// CHECK-NEXT: name: "arg17",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 19,
|
||||
// CHECK-NEXT: name: "arg18",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 20,
|
||||
// CHECK-NEXT: name: "arg19",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 21,
|
||||
// CHECK-NEXT: name: "arg20",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 22,
|
||||
// CHECK-NEXT: name: "arg21",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -150,44 +178,16 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, t
|
||||
// CHECK-NEXT: is_variable: true
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 21,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input18",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 22,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input19",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 23,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input20",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 24,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input21",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 25,
|
||||
// CHECK-NEXT: name: "tfl.lstm",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: } ],
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 21, 22, 23 ],
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 ],
|
||||
// CHECK-NEXT: outputs: [ 24 ],
|
||||
// CHECK-NEXT: operators: [ {
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ],
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 22, 23, 18, 19, 20, 21 ],
|
||||
// CHECK-NEXT: outputs: [ 24 ],
|
||||
// CHECK-NEXT: builtin_options_type: LSTMOptions,
|
||||
// CHECK-NEXT: builtin_options: {
|
||||
@ -236,49 +236,27 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, t
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-EMPTY:
|
||||
|
||||
|
||||
^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>, %arg4: tensor<4 x f32>, %arg5: tensor<4 x f32>, %arg6: tensor<4 x f32>, %arg7: tensor<4 x f32>, %arg8: tensor<4 x f32>, %arg9: tensor<4 x f32>, %arg10: tensor<4 x f32>, %arg11: tensor<4 x f32>, %arg12: tensor<4 x f32>, %arg13: tensor<4 x f32>, %arg14: tensor<4 x f32>, %arg15: tensor<4 x f32>, %arg16: tensor<4 x f32>, %arg17: tensor<4 x f32>, %arg20: tensor<4 x f32>, %arg21: tensor<4 x f32>, %arg22: tensor<4 x f32>, %arg23: tensor<4 x f32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%1 = "tfl.pseudo_input" (%arg1) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%2 = "tfl.pseudo_input" (%arg2) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%3 = "tfl.pseudo_input" (%arg3) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%4 = "tfl.pseudo_input" (%arg4) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%5 = "tfl.pseudo_input" (%arg5) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%6 = "tfl.pseudo_input" (%arg6) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%7 = "tfl.pseudo_input" (%arg7) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%8 = "tfl.pseudo_input" (%arg8) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%9 = "tfl.pseudo_input" (%arg9) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%10 = "tfl.pseudo_input" (%arg10) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%11 = "tfl.pseudo_input" (%arg11) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%12 = "tfl.pseudo_input" (%arg12) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%13 = "tfl.pseudo_input" (%arg13) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%14 = "tfl.pseudo_input" (%arg14) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%15 = "tfl.pseudo_input" (%arg15) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%16 = "tfl.pseudo_input" (%arg16) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%17 = "tfl.pseudo_input" (%arg17) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%18 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%19 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%20 = "tfl.pseudo_input" (%arg20) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%21 = "tfl.pseudo_input" (%arg21) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%22 = "tfl.pseudo_input" (%arg22) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%23 = "tfl.pseudo_input" (%arg23) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%24 = "tfl.lstm"(%0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %16, %17, %18, %19, %20, %21, %22, %23) ({}) {fused_activation_function = "NONE", kernel_type = "FULL"} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>, %arg4: tensor<4 x f32>, %arg5: tensor<4 x f32>, %arg6: tensor<4 x f32>, %arg7: tensor<4 x f32>, %arg8: tensor<4 x f32>, %arg9: tensor<4 x f32>, %arg10: tensor<4 x f32>, %arg11: tensor<4 x f32>, %arg12: tensor<4 x f32>, %arg13: tensor<4 x f32>, %arg14: tensor<4 x f32>, %arg15: tensor<4 x f32>, %arg16: tensor<4 x f32>, %arg17: tensor<4 x f32>, %arg18: tensor<4 x f32>, %arg19: tensor<4 x f32>, %arg20: tensor<4 x f32>, %arg21: tensor<4 x f32>):
|
||||
%cst0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%cst1 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%24 = "tfl.lstm"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6, %arg7, %arg8, %arg9, %arg10, %arg11, %arg12, %arg13, %arg14, %arg15, %arg16, %arg17, %cst0, %cst1, %arg18, %arg19, %arg20, %arg21) ({}) {fused_activation_function = "NONE", kernel_type = "FULL"} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %24 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -24,7 +24,7 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -131,12 +131,11 @@ func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4xf32>) -> tensor<4xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%2 = "tfl.squared_difference"(%0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
|
||||
%3 = "tfl.mul"(%0, %2) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
%4 = "tfl.div"(%3, %2) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
%5 = "tfl.exp"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
%6 = "tfl.neg"(%5) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
|
||||
return %6 : tensor<4xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.squared_difference"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
|
||||
%2 = "tfl.mul"(%arg0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
|
||||
%3 = "tfl.div"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
|
||||
%4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
|
||||
%5 = "tfl.neg"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
|
||||
return %5 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -6,10 +6,9 @@ module attributes {
|
||||
func @main(tensor<3x2xi32>) -> tensor<3x2xi32>
|
||||
attributes {tf.entry_function = {inputs = "input", outputs = "SameNameAsOutput"}} {
|
||||
^bb0(%arg0: tensor<3x2xi32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<3x2xi32>) -> tensor<3x2xi32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
|
||||
%2 = "tfl.sub" (%0, %1) {fused_activation_function = "NONE"} : (tensor<3x2xi32>, tensor<3x2xi32>) -> tensor<3x2xi32>
|
||||
return %2 : tensor<3x2xi32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32>
|
||||
%1 = "tfl.sub" (%arg0, %0) {fused_activation_function = "NONE"} : (tensor<3x2xi32>, tensor<3x2xi32>) -> tensor<3x2xi32>
|
||||
return %1 : tensor<3x2xi32>
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ func @main(tensor<3x!quant.uniform<i8:f32, 0.1>>) -> tensor<3x!quant.uniform<i8:
|
||||
// CHECK-NEXT: shape: [ 3 ],
|
||||
// CHECK-NEXT: type: INT8,
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-NEXT: scale: [ 0.1 ],
|
||||
// CHECK-NEXT: zero_point: [ 0 ]
|
||||
@ -61,8 +61,7 @@ func @main(tensor<3x!quant.uniform<i8:f32, 0.1>>) -> tensor<3x!quant.uniform<i8:
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<3x!quant.uniform<i8:f32, 0.1>>) ->tensor<3x!quant.uniform<i8:f32, 0.1>> loc("Input")
|
||||
%1 = "tfl.pseudo_qconst"() { qtype = tensor<3x!quant.uniform<i8:f32, 0.1>>, value = dense<2> : tensor<3xi8>} : () -> tensor<3x!quant.uniform<i8:f32, 0.1>>
|
||||
%2 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor<3x!quant.uniform<i8:f32, 0.1>>, tensor<3x!quant.uniform<i8:f32, 0.1>>) -> tensor<3x!quant.uniform<i8:f32, 0.1>> loc("mul")
|
||||
return %2 : tensor<3x!quant.uniform<i8:f32, 0.1>>
|
||||
%0 = "tfl.pseudo_qconst"() { qtype = tensor<3x!quant.uniform<i8:f32, 0.1>>, value = dense<2> : tensor<3xi8>} : () -> tensor<3x!quant.uniform<i8:f32, 0.1>>
|
||||
%1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<3x!quant.uniform<i8:f32, 0.1>>, tensor<3x!quant.uniform<i8:f32, 0.1>>) -> tensor<3x!quant.uniform<i8:f32, 0.1>> loc("mul")
|
||||
return %1 : tensor<3x!quant.uniform<i8:f32, 0.1>>
|
||||
}
|
||||
|
||||
@ -13,7 +13,7 @@ func @main(tensor<3x!quant.uniform<i8:f32, 1.0>>) -> tensor<3x!quant.uniform<i8:
|
||||
// CHECK-NEXT: shape: [ 3 ],
|
||||
// CHECK-NEXT: type: INT8,
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-NEXT: scale: [ 1.0 ],
|
||||
// CHECK-NEXT: zero_point: [ 0 ]
|
||||
@ -61,8 +61,7 @@ func @main(tensor<3x!quant.uniform<i8:f32, 1.0>>) -> tensor<3x!quant.uniform<i8:
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<3x!quant.uniform<i8:f32, 1.0>>) ->tensor<3x!quant.uniform<i8:f32, 1.0>> loc("Input")
|
||||
%1 = "tfl.pseudo_qconst"() { qtype = tensor<3x!quant.uniform<i8:f32, 1.0>>, value = dense<2> : tensor<3xi8>} : () -> tensor<3x!quant.uniform<i8:f32, 1.0>>
|
||||
%2 = "tfl.mul"(%0, %1) {fused_activation_function = "NONE"} : (tensor<3x!quant.uniform<i8:f32, 1.0>>, tensor<3x!quant.uniform<i8:f32, 1.0>>) -> tensor<3x!quant.uniform<i8:f32, 1.0>> loc("mul")
|
||||
return %2 : tensor<3x!quant.uniform<i8:f32, 1.0>>
|
||||
%0 = "tfl.pseudo_qconst"() { qtype = tensor<3x!quant.uniform<i8:f32, 1.0>>, value = dense<2> : tensor<3xi8>} : () -> tensor<3x!quant.uniform<i8:f32, 1.0>>
|
||||
%1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<3x!quant.uniform<i8:f32, 1.0>>, tensor<3x!quant.uniform<i8:f32, 1.0>>) -> tensor<3x!quant.uniform<i8:f32, 1.0>> loc("mul")
|
||||
return %1 : tensor<3x!quant.uniform<i8:f32, 1.0>>
|
||||
}
|
||||
|
||||
@ -12,7 +12,7 @@ func @main(tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 1, 6, 6, 16 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -50,7 +50,6 @@ func @main(tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<1x6x6x16xf32>) -> tensor<1x6x6x16xf32> loc("Input")
|
||||
%1 = "tfl.average_pool_2d"(%0) {filter_height = 3 : i32, filter_width = 6 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 3 : i32, stride_w = 1 : i32} : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> loc("avgpool")
|
||||
return %1 : tensor<1x1x1x16xf32>
|
||||
%0 = "tfl.average_pool_2d"(%arg0) {filter_height = 3 : i32, filter_width = 6 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 3 : i32, stride_w = 1 : i32} : (tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> loc("avgpool")
|
||||
return %0 : tensor<1x1x1x16xf32>
|
||||
}
|
||||
|
||||
@ -1,11 +1,9 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
%cst = constant unit
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<40x37xf32>) -> tensor<40x37xf32> loc("Input")
|
||||
%2:2 = "tfl.fully_connected"(%0, %1, %cst) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %2 : tensor<40x40xf32>
|
||||
%0 = constant unit
|
||||
%1:2 = "tfl.fully_connected"(%arg0, %arg1, %0) {fused_activation_function = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : (tensor<40x37xf32>, tensor<40x37xf32>, none) -> (tensor<40x40xf32>, tensor<40x40xf32>)
|
||||
return %1 : tensor<40x40xf32>
|
||||
}
|
||||
|
||||
// CHECK: operators: [ {
|
||||
|
||||
@ -21,17 +21,17 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK-NEXT: } ],
|
||||
// CHECK-NEXT: subgraphs: [ {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 2 ],
|
||||
// CHECK-NEXT: type: INT32,
|
||||
// CHECK-NEXT: shape: [ 1, 224, 224, 3 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Const",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 1, 224, 224, 3 ],
|
||||
// CHECK-NEXT: shape: [ 2 ],
|
||||
// CHECK-NEXT: type: INT32,
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "Const",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -97,10 +97,10 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: } ],
|
||||
// CHECK-NEXT: inputs: [ 1 ],
|
||||
// CHECK-NEXT: inputs: [ 0 ],
|
||||
// CHECK-NEXT: outputs: [ 8 ],
|
||||
// CHECK-NEXT: operators: [ {
|
||||
// CHECK-NEXT: inputs: [ 1 ],
|
||||
// CHECK-NEXT: inputs: [ 0 ],
|
||||
// CHECK-NEXT: outputs: [ 2 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: opcode_index: 1,
|
||||
@ -115,7 +115,7 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: opcode_index: 2,
|
||||
// CHECK-NEXT: inputs: [ 5, 0 ],
|
||||
// CHECK-NEXT: inputs: [ 5, 1 ],
|
||||
// CHECK-NEXT: outputs: [ 6 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: opcode_index: 3,
|
||||
@ -136,10 +136,10 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK-NEXT: buffers: [ {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 1, 0, 0, 0, 233, 3, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 1, 0, 0, 0, 233, 3, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180 ]
|
||||
@ -156,13 +156,12 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT:}
|
||||
|
||||
%cst = "tfl.pseudo_const" () {value = dense<[1, 1001]> : tensor<2xi32>} : () -> tensor<2xi32> loc("Const")
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3xf32>
|
||||
%1 = "tfl.quantize"(%0) {qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<[1, 1001]> : tensor<2xi32>} : () -> tensor<2xi32> loc("Const")
|
||||
%1 = "tfl.quantize"(%arg0) {qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
|
||||
%2 = "tfl.pseudo_qconst"() {qtype = tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, value = dense<-76> : tensor<32x3x3x3xi8>} : () -> tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>
|
||||
%3 = "tfl.pseudo_qconst"() {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, value = dense<0> : tensor<32xi32>} : () -> tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>
|
||||
%4 = "tfl.conv_2d"(%1, %2, %3) {dilation_h_factor = 2 : i32, dilation_w_factor = 3 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 4 : i32, stride_w = 5 : i32} : (tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>, tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>, tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>) -> tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
|
||||
%5 = "tfl.reshape"(%4, %cst) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>) -> tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>
|
||||
%5 = "tfl.reshape"(%4, %0) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>, tensor<2xi32>) -> tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>
|
||||
%6 = "tfl.softmax"(%5) {beta = 1.000000e+00 : f32} : (tensor<1x1001x!quant.uniform<u8:f32, 0.023528476789885875>>) -> tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-03>>
|
||||
%7 = "tfl.dequantize"(%6) : (tensor<1x1001x!quant.uniform<u8:f32, 3.906250e-03>>) -> tensor<1x1001xf32>
|
||||
return %7 : tensor<1x1001xf32>
|
||||
|
||||
@ -10,18 +10,18 @@ func @main(tensor<3x2xi32>) -> tensor<6xi32> {
|
||||
// CHECK-NEXT: } ],
|
||||
// CHECK-NEXT: subgraphs: [ {
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 1 ],
|
||||
// CHECK-NEXT: shape: [ 3, 2 ],
|
||||
// CHECK-NEXT: type: INT32,
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "Const",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 3, 2 ],
|
||||
// CHECK-NEXT: shape: [ 1 ],
|
||||
// CHECK-NEXT: type: INT32,
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "Input",
|
||||
// CHECK-NEXT: name: "Const",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -34,10 +34,10 @@ func @main(tensor<3x2xi32>) -> tensor<6xi32> {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: } ],
|
||||
// CHECK-NEXT: inputs: [ 1 ],
|
||||
// CHECK-NEXT: inputs: [ 0 ],
|
||||
// CHECK-NEXT: outputs: [ 2 ],
|
||||
// CHECK-NEXT: operators: [ {
|
||||
// CHECK-NEXT: inputs: [ 1, 0 ],
|
||||
// CHECK-NEXT: inputs: [ 0, 1 ],
|
||||
// CHECK-NEXT: outputs: [ 2 ]
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: name: "main"
|
||||
@ -46,16 +46,15 @@ func @main(tensor<3x2xi32>) -> tensor<6xi32> {
|
||||
// CHECK-NEXT: buffers: [ {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 6, 0, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 6, 0, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
|
||||
%cst = "tfl.pseudo_const" () {value = dense<[6]> : tensor<1xi32>} : () -> tensor<1xi32> loc("Const")
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<3x2xi32>) -> tensor<3x2xi32> loc("Input")
|
||||
%2 = "tfl.reshape" (%0, %cst) : (tensor<3x2xi32>, tensor<1xi32>) -> tensor<6xi32>
|
||||
return %2 : tensor<6xi32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<[6]> : tensor<1xi32>} : () -> tensor<1xi32> loc("Const")
|
||||
%1 = "tfl.reshape" (%arg0, %0) : (tensor<3x2xi32>, tensor<1xi32>) -> tensor<6xi32>
|
||||
return %1 : tensor<6xi32>
|
||||
}
|
||||
|
||||
@ -100,10 +100,9 @@ func @main(tensor<3x2xi32>) -> tensor<3x2xi32>
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<3x2xi32>) -> tensor<3x2xi32> loc("Input")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32> loc("Const")
|
||||
%2 = "tfl.sub" (%0, %1) {fused_activation_function = "RELU6"} : (tensor<3x2xi32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("sub")
|
||||
%3 = "std.constant" () {value = dense<10> : tensor<i32>} : () -> tensor<i32> loc("SameNameAsOutput")
|
||||
%4 = "tfl.add" (%3, %2) {fused_activation_function = "NONE"} : (tensor<i32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("add")
|
||||
return %4 : tensor<3x2xi32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>} : () -> tensor<3x2xi32> loc("Const")
|
||||
%1 = "tfl.sub" (%arg0, %0) {fused_activation_function = "RELU6"} : (tensor<3x2xi32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("sub")
|
||||
%2 = "std.constant" () {value = dense<10> : tensor<i32>} : () -> tensor<i32> loc("SameNameAsOutput")
|
||||
%3 = "tfl.add" (%2, %1) {fused_activation_function = "NONE"} : (tensor<i32>, tensor<3x2xi32>) -> tensor<3x2xi32> loc("add")
|
||||
return %3 : tensor<3x2xi32>
|
||||
}
|
||||
|
||||
@ -11,28 +11,28 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 3,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input2",
|
||||
// CHECK-NEXT: name: "arg2",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 4,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input3",
|
||||
// CHECK-NEXT: name: "arg3",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -84,11 +84,7 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -
|
||||
// CHECK-EMPTY:
|
||||
|
||||
^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%1 = "tfl.pseudo_input" (%arg1) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%2 = "tfl.pseudo_input" (%arg2) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%3 = "tfl.pseudo_input" (%arg3) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%4 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%5 = "tfl.svdf"(%0, %1, %2, %3, %4) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %5 : tensor<4xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %0) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %1 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ func @main(tensor<4 x f32>, tensor<4 x i8>, tensor<4 x f32>, tensor<4 x f32>) ->
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -19,21 +19,21 @@ func @main(tensor<4 x f32>, tensor<4 x i8>, tensor<4 x f32>, tensor<4 x f32>) ->
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: type: INT8,
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 3,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input2",
|
||||
// CHECK-NEXT: name: "arg2",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 4,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input3",
|
||||
// CHECK-NEXT: name: "arg3",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -85,11 +85,7 @@ func @main(tensor<4 x f32>, tensor<4 x i8>, tensor<4 x f32>, tensor<4 x f32>) ->
|
||||
// CHECK-EMPTY:
|
||||
|
||||
^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x i8>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%1 = "tfl.pseudo_input" (%arg1) : (tensor<4 x i8>) -> tensor<4 x i8>
|
||||
%2 = "tfl.pseudo_input" (%arg2) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%3 = "tfl.pseudo_input" (%arg3) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%4 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%5 = "tfl.svdf"(%0, %1, %2, %3, %4) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<4xf32>, tensor<4xi8>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %5 : tensor<4xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %0) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<4xf32>, tensor<4xi8>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %1 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -11,126 +11,154 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, t
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 3,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input2",
|
||||
// CHECK-NEXT: name: "arg2",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 4,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input3",
|
||||
// CHECK-NEXT: name: "arg3",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 5,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input4",
|
||||
// CHECK-NEXT: name: "arg4",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 6,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input5",
|
||||
// CHECK-NEXT: name: "arg5",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 7,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input6",
|
||||
// CHECK-NEXT: name: "arg6",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 8,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input7",
|
||||
// CHECK-NEXT: name: "arg7",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 9,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input8",
|
||||
// CHECK-NEXT: name: "arg8",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 10,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input9",
|
||||
// CHECK-NEXT: name: "arg9",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 11,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input10",
|
||||
// CHECK-NEXT: name: "arg10",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 12,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input11",
|
||||
// CHECK-NEXT: name: "arg11",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 13,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input12",
|
||||
// CHECK-NEXT: name: "arg12",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 14,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input13",
|
||||
// CHECK-NEXT: name: "arg13",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 15,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input14",
|
||||
// CHECK-NEXT: name: "arg14",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 16,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input15",
|
||||
// CHECK-NEXT: name: "arg15",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 17,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input16",
|
||||
// CHECK-NEXT: name: "arg16",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 18,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input17",
|
||||
// CHECK-NEXT: name: "arg17",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 19,
|
||||
// CHECK-NEXT: name: "arg18",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 20,
|
||||
// CHECK-NEXT: name: "arg19",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 21,
|
||||
// CHECK-NEXT: name: "arg20",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 22,
|
||||
// CHECK-NEXT: name: "arg21",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -150,44 +178,16 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, t
|
||||
// CHECK-NEXT: is_variable: true
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 21,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input18",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 22,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input19",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 23,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input20",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 24,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input21",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 25,
|
||||
// CHECK-NEXT: name: "tfl.unidirectional_sequence_lstm",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: } ],
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 21, 22, 23 ],
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 ],
|
||||
// CHECK-NEXT: outputs: [ 24 ],
|
||||
// CHECK-NEXT: operators: [ {
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 ],
|
||||
// CHECK-NEXT: inputs: [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 22, 23, 18, 19, 20, 21 ],
|
||||
// CHECK-NEXT: outputs: [ 24 ],
|
||||
// CHECK-NEXT: builtin_options_type: UnidirectionalSequenceLSTMOptions,
|
||||
// CHECK-NEXT: builtin_options: {
|
||||
@ -236,48 +236,26 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, t
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: data: [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: } ]
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-EMPTY:
|
||||
|
||||
^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>, %arg4: tensor<4 x f32>, %arg5: tensor<4 x f32>, %arg6: tensor<4 x f32>, %arg7: tensor<4 x f32>, %arg8: tensor<4 x f32>, %arg9: tensor<4 x f32>, %arg10: tensor<4 x f32>, %arg11: tensor<4 x f32>, %arg12: tensor<4 x f32>, %arg13: tensor<4 x f32>, %arg14: tensor<4 x f32>, %arg15: tensor<4 x f32>, %arg16: tensor<4 x f32>, %arg17: tensor<4 x f32>, %arg20: tensor<4 x f32>, %arg21: tensor<4 x f32>, %arg22: tensor<4 x f32>, %arg23: tensor<4 x f32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%1 = "tfl.pseudo_input" (%arg1) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%2 = "tfl.pseudo_input" (%arg2) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%3 = "tfl.pseudo_input" (%arg3) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%4 = "tfl.pseudo_input" (%arg4) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%5 = "tfl.pseudo_input" (%arg5) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%6 = "tfl.pseudo_input" (%arg6) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%7 = "tfl.pseudo_input" (%arg7) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%8 = "tfl.pseudo_input" (%arg8) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%9 = "tfl.pseudo_input" (%arg9) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%10 = "tfl.pseudo_input" (%arg10) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%11 = "tfl.pseudo_input" (%arg11) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%12 = "tfl.pseudo_input" (%arg12) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%13 = "tfl.pseudo_input" (%arg13) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%14 = "tfl.pseudo_input" (%arg14) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%15 = "tfl.pseudo_input" (%arg15) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%16 = "tfl.pseudo_input" (%arg16) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%17 = "tfl.pseudo_input" (%arg17) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%18 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%19 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%20 = "tfl.pseudo_input" (%arg20) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%21 = "tfl.pseudo_input" (%arg21) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%22 = "tfl.pseudo_input" (%arg22) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%23 = "tfl.pseudo_input" (%arg23) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%24 = "tfl.unidirectional_sequence_lstm"(%0, %1, %2, %3, %4, %5, %6, %7, %8, %9, %10, %11, %12, %13, %14, %15, %16, %17, %18, %19, %20, %21, %22, %23) {fused_activation_function = "NONE", time_major = true} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %24 : tensor<4xf32>
|
||||
^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>, %arg4: tensor<4 x f32>, %arg5: tensor<4 x f32>, %arg6: tensor<4 x f32>, %arg7: tensor<4 x f32>, %arg8: tensor<4 x f32>, %arg9: tensor<4 x f32>, %arg10: tensor<4 x f32>, %arg11: tensor<4 x f32>, %arg12: tensor<4 x f32>, %arg13: tensor<4 x f32>, %arg14: tensor<4 x f32>, %arg15: tensor<4 x f32>, %arg16: tensor<4 x f32>, %arg17: tensor<4 x f32>, %arg18: tensor<4 x f32>, %arg19: tensor<4 x f32>, %arg20: tensor<4 x f32>, %arg21: tensor<4 x f32>):
|
||||
%0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%2 = "tfl.unidirectional_sequence_lstm"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6, %arg7, %arg8, %arg9, %arg10, %arg11, %arg12, %arg13, %arg14, %arg15, %arg16, %arg17, %0, %1, %arg18, %arg19, %arg20, %arg21) {fused_activation_function = "NONE", time_major = true} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %2 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -11,28 +11,28 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -
|
||||
// CHECK-NEXT: tensors: [ {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 3,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input2",
|
||||
// CHECK-NEXT: name: "arg2",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 4 ],
|
||||
// CHECK-NEXT: buffer: 4,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input3",
|
||||
// CHECK-NEXT: name: "arg3",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -84,11 +84,7 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -
|
||||
// CHECK-EMPTY:
|
||||
|
||||
^bb0(%arg0: tensor<4 x f32>, %arg1: tensor<4 x f32>, %arg2: tensor<4 x f32>, %arg3: tensor<4 x f32>):
|
||||
%0 = "tfl.pseudo_input" (%arg0) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%1 = "tfl.pseudo_input" (%arg1) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%2 = "tfl.pseudo_input" (%arg2) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%3 = "tfl.pseudo_input" (%arg3) : (tensor<4 x f32>) -> tensor<4 x f32>
|
||||
%4 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%5 = "tfl.unidirectional_sequence_rnn"(%0, %1, %2, %3, %4) {fused_activation_function = "TANH", time_major = true} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %5 : tensor<4xf32>
|
||||
%0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
|
||||
%1 = "tfl.unidirectional_sequence_rnn"(%arg0, %arg1, %arg2, %arg3, %0) {fused_activation_function = "TANH", time_major = true} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
return %1 : tensor<4xf32>
|
||||
}
|
||||
|
||||
@ -19,14 +19,14 @@
|
||||
// CHECK-NEXT: shape: [ ],
|
||||
// CHECK-NEXT: type: INT32,
|
||||
// CHECK-NEXT: buffer: 1,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input",
|
||||
// CHECK-NEXT: name: "arg0",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }, {
|
||||
// CHECK-NEXT: shape: [ 1 ],
|
||||
// CHECK-NEXT: buffer: 2,
|
||||
// CHECK-NEXT: name: "tfl.pseudo_input1",
|
||||
// CHECK-NEXT: name: "arg1",
|
||||
// CHECK-NEXT: quantization: {
|
||||
// CHECK-EMPTY:
|
||||
// CHECK-NEXT: }
|
||||
@ -193,14 +193,11 @@
|
||||
// CHECK-NEXT: }
|
||||
|
||||
func @main(%arg0: tensor<i32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
|
||||
%0 = "tfl.pseudo_input"(%arg0) : (tensor<i32>) -> tensor<i32>
|
||||
%1 = "tfl.pseudo_input"(%arg1) : (tensor<1xf32>) -> tensor<1xf32>
|
||||
|
||||
// While %0 is greater than zero, element wise add %1 with itself.
|
||||
%2:2 = "tf.While"(%0, %1) {
|
||||
// While %arg0 is greater than zero, element wise add %arg1 with itself.
|
||||
%0:2 = "tf.While"(%arg0, %arg1) {
|
||||
cond = @cond, body = @body, is_stateless = false
|
||||
} : (tensor<i32>, tensor<1xf32>) -> (tensor<i32>, tensor<1xf32>)
|
||||
return %2#1 : tensor<1xf32>
|
||||
return %0#1 : tensor<1xf32>
|
||||
}
|
||||
|
||||
func @cond(%arg0: tensor<*xi32>, %arg1: tensor<*xf32>) -> tensor<i1> {
|
||||
|
||||
@ -135,7 +135,8 @@ int main(int argc, char **argv) {
|
||||
input_file_name, input_mlir, use_splatted_constant, custom_opdefs,
|
||||
debug_info_file, input_arrays, input_dtypes, input_shapes,
|
||||
output_arrays,
|
||||
/*prune_unused_nodes=*/true, &source_mgr, &context);
|
||||
/*prune_unused_nodes=*/true, &source_mgr, &context,
|
||||
/*add_pseudo_input_nodes=*/false);
|
||||
|
||||
// If errors occur, the library call in the above already logged the error
|
||||
// message. So we can just return here.
|
||||
@ -186,7 +187,8 @@ int main(int argc, char **argv) {
|
||||
auto status = tensorflow::ConvertTFExecutorToTFLOrFlatbuffer(
|
||||
module.ValueOrDie().get(), output_mlir, emit_builtin_tflite_ops,
|
||||
emit_select_tf_ops, emit_custom_ops, emit_quant_adaptor_ops,
|
||||
lower_tensor_list_ops, quant_specs, &result, &pm);
|
||||
lower_tensor_list_ops, quant_specs, &result, &pm,
|
||||
/*add_pseudo_input_nodes=*/false);
|
||||
if (!status.ok()) return kTrFailure;
|
||||
|
||||
std::string error_msg;
|
||||
|
||||
@ -53,7 +53,8 @@ StatusOr<OwningModuleRef> LoadFromGraphdefOrMlirSource(
|
||||
absl::string_view debug_info_file, absl::string_view input_arrays,
|
||||
absl::string_view input_dtypes, absl::string_view input_shapes,
|
||||
absl::string_view output_arrays, bool prune_unused_nodes,
|
||||
llvm::SourceMgr* source_mgr, MLIRContext* context) {
|
||||
llvm::SourceMgr* source_mgr, MLIRContext* context,
|
||||
bool add_pseudo_input_nodes) {
|
||||
// Set up the input file.
|
||||
std::string error_message;
|
||||
auto file = mlir::openInputFile(input_filename, &error_message);
|
||||
@ -89,20 +90,21 @@ StatusOr<OwningModuleRef> LoadFromGraphdefOrMlirSource(
|
||||
input_shapes, output_arrays, prune_unused_nodes,
|
||||
/*convert_legacy_fed_inputs=*/true,
|
||||
/*graph_as_function=*/false, /*upgrade_legacy=*/true,
|
||||
/*add_pseudo_input_nodes=*/true, context);
|
||||
add_pseudo_input_nodes, context);
|
||||
}
|
||||
return tensorflow::GraphdefToMlirTranslateFunction(
|
||||
std::move(file), debug_info_file, input_arrays, input_dtypes,
|
||||
input_shapes, output_arrays, prune_unused_nodes,
|
||||
/*convert_legacy_fed_inputs=*/true, /*graph_as_function=*/false,
|
||||
/*upgrade_legacy=*/true, /*add_pseudo_input_nodes=*/true, context);
|
||||
/*upgrade_legacy=*/true, add_pseudo_input_nodes, context);
|
||||
}
|
||||
|
||||
Status ConvertTFExecutorToTFLOrFlatbuffer(
|
||||
mlir::ModuleOp module, bool export_to_mlir, bool emit_builtin_tflite_ops,
|
||||
bool emit_select_tf_ops, bool emit_custom_ops, bool emit_quant_adaptor_ops,
|
||||
bool lower_tensor_list_ops, const mlir::TFL::QuantizationSpecs& quant_specs,
|
||||
std::string* result, mlir::PassManager* pass_manager) {
|
||||
std::string* result, mlir::PassManager* pass_manager,
|
||||
bool add_pseudo_input_nodes) {
|
||||
mlir::StatusScopedDiagnosticHandler statusHandler(module.getContext(),
|
||||
/*propagate=*/true);
|
||||
if (failed(pass_manager->run(module))) {
|
||||
@ -119,7 +121,7 @@ Status ConvertTFExecutorToTFLOrFlatbuffer(
|
||||
if (!quant_specs.RunWeightQuantization()) {
|
||||
if (tflite::MlirToFlatBufferTranslateFunction(
|
||||
module, result, emit_builtin_tflite_ops, emit_select_tf_ops,
|
||||
emit_custom_ops)) {
|
||||
emit_custom_ops, add_pseudo_input_nodes)) {
|
||||
return statusHandler.ConsumeStatus();
|
||||
}
|
||||
} else {
|
||||
@ -128,7 +130,7 @@ Status ConvertTFExecutorToTFLOrFlatbuffer(
|
||||
std::string pre_quantized_result;
|
||||
if (tflite::MlirToFlatBufferTranslateFunction(
|
||||
module, &pre_quantized_result, emit_builtin_tflite_ops,
|
||||
emit_select_tf_ops, emit_custom_ops)) {
|
||||
emit_select_tf_ops, emit_custom_ops, add_pseudo_input_nodes)) {
|
||||
return statusHandler.ConsumeStatus();
|
||||
}
|
||||
flatbuffers::FlatBufferBuilder q_builder(/*initial_size=*/10240);
|
||||
|
||||
@ -38,7 +38,8 @@ LoadFromGraphdefOrMlirSource(
|
||||
absl::string_view debug_info_file, absl::string_view input_arrays,
|
||||
absl::string_view input_dtypes, absl::string_view input_shapes,
|
||||
absl::string_view output_arrays, bool prune_unused_nodes,
|
||||
llvm::SourceMgr* source_mgr, mlir::MLIRContext* context);
|
||||
llvm::SourceMgr* source_mgr, mlir::MLIRContext* context,
|
||||
bool add_pseudo_input_nodes = true);
|
||||
|
||||
// Taking a MLIR module in TF executor dialect and a set of parameters,
|
||||
// applies a set of passes to convert the module to TF Lite dialect and
|
||||
@ -52,7 +53,8 @@ Status ConvertTFExecutorToTFLOrFlatbuffer(
|
||||
mlir::ModuleOp module, bool export_to_mlir, bool emit_builtin_tflite_ops,
|
||||
bool emit_select_tf_ops, bool emit_custom_ops, bool emit_quant_adaptor_ops,
|
||||
bool lower_tensor_list_ops, const mlir::TFL::QuantizationSpecs& quant_specs,
|
||||
std::string* result, mlir::PassManager* pass_manager);
|
||||
std::string* result, mlir::PassManager* pass_manager,
|
||||
bool add_pseudo_input_nodes = true);
|
||||
} // namespace tensorflow
|
||||
|
||||
#endif // TENSORFLOW_COMPILER_MLIR_LITE_TF_TO_TFL_FLATBUFFER_H_
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user