Create a json to flatbuffer utility for tests that cannot use mlir-flatbuffer-mlir round-trip.
PiperOrigin-RevId: 305808867 Change-Id: Iad7b36caf6b77733f58649f97b9bad953a8b48f0
This commit is contained in:
parent
035e2160fe
commit
a27d161222
@ -537,6 +537,15 @@ tf_native_cc_binary(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
tf_native_cc_binary(
|
||||||
|
name = "json_to_flatbuffer",
|
||||||
|
srcs = ["json_to_flatbuffer.cc"],
|
||||||
|
deps = [
|
||||||
|
"//tensorflow/lite/schema:schema_fbs",
|
||||||
|
"@flatbuffers",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "emit_error_reporter",
|
name = "emit_error_reporter",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
63
tensorflow/compiler/mlir/lite/json_to_flatbuffer.cc
Normal file
63
tensorflow/compiler/mlir/lite/json_to_flatbuffer.cc
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
==============================================================================*/
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
||||||
|
#include "flatbuffers/idl.h" // from @flatbuffers
|
||||||
|
#include "flatbuffers/util.h" // from @flatbuffers
|
||||||
|
#include "tensorflow/lite/schema/schema_generated.h"
|
||||||
|
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
// load FlatBuffer schema (.fbs) and JSON from disk
|
||||||
|
if (argc < 2) {
|
||||||
|
std::cerr << "Missing input argument. Usage:\n"
|
||||||
|
<< argv[0] << " <filename or - for stdin>\n\n";
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
const char* schema_path = argv[1];
|
||||||
|
const char* json_path = argv[2];
|
||||||
|
std::string schema;
|
||||||
|
std::string json;
|
||||||
|
|
||||||
|
const bool status =
|
||||||
|
flatbuffers::LoadFile(schema_path, /*binary=*/false, &schema) &&
|
||||||
|
flatbuffers::LoadFile(json_path, /*binary=*/false, &json);
|
||||||
|
if (!status) {
|
||||||
|
std::cerr << "couldn't load files!\n";
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse schema first, so we can use it to parse the data after
|
||||||
|
flatbuffers::Parser parser;
|
||||||
|
const bool schema_parse_result =
|
||||||
|
parser.Parse(schema.c_str()) && parser.Parse(json.c_str());
|
||||||
|
if (!schema_parse_result) {
|
||||||
|
std::cerr << "Parse error.\n";
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
const size_t length = parser.builder_.GetSize();
|
||||||
|
const size_t n =
|
||||||
|
std::fwrite(parser.builder_.GetBufferPointer(), 1, length, stdout);
|
||||||
|
if (n != length) {
|
||||||
|
std::cerr << "print to stdout filed.\n";
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
@ -12,6 +12,7 @@ glob_lit_tests(
|
|||||||
test_file_exts = [
|
test_file_exts = [
|
||||||
"mlir",
|
"mlir",
|
||||||
"cc",
|
"cc",
|
||||||
|
"json",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -24,6 +25,8 @@ filegroup(
|
|||||||
":importer_test_min_max",
|
":importer_test_min_max",
|
||||||
"//tensorflow/compiler/mlir/lite:flatbuffer_to_string",
|
"//tensorflow/compiler/mlir/lite:flatbuffer_to_string",
|
||||||
"//tensorflow/compiler/mlir/lite:flatbuffer_translate",
|
"//tensorflow/compiler/mlir/lite:flatbuffer_translate",
|
||||||
|
"//tensorflow/compiler/mlir/lite:json_to_flatbuffer",
|
||||||
|
"//tensorflow/lite/schema:schema.fbs",
|
||||||
"@llvm-project//llvm:FileCheck",
|
"@llvm-project//llvm:FileCheck",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -0,0 +1,83 @@
|
|||||||
|
// RUN: json_to_flatbuffer %p/../../../../../lite/schema/schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck --dump-input-on-failure %s
|
||||||
|
|
||||||
|
// CHECK: %cst = constant unit
|
||||||
|
// CHECK: %[[RES0:.*]] = "tfl.conv_2d"(%arg0, %arg1, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 0 : i32, stride_w = 0 : i32} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, none) -> tensor<256x32x32x16xf32>
|
||||||
|
// CHECK: return %[[RES0]] : tensor<256x32x32x16xf32>
|
||||||
|
|
||||||
|
{
|
||||||
|
version: 3,
|
||||||
|
operator_codes: [
|
||||||
|
{
|
||||||
|
builtin_code: "CONV_2D",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
subgraphs: [
|
||||||
|
{
|
||||||
|
tensors: [
|
||||||
|
{
|
||||||
|
shape: [
|
||||||
|
256,
|
||||||
|
32,
|
||||||
|
32,
|
||||||
|
3
|
||||||
|
],
|
||||||
|
name: "arg0",
|
||||||
|
quantization: {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
shape: [
|
||||||
|
16,
|
||||||
|
3,
|
||||||
|
3,
|
||||||
|
3
|
||||||
|
],
|
||||||
|
name: "arg1",
|
||||||
|
quantization: {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
shape: [
|
||||||
|
0
|
||||||
|
],
|
||||||
|
name: "cst"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
shape: [
|
||||||
|
256,
|
||||||
|
32,
|
||||||
|
32,
|
||||||
|
16
|
||||||
|
],
|
||||||
|
name: "output",
|
||||||
|
quantization: {
|
||||||
|
}
|
||||||
|
},
|
||||||
|
],
|
||||||
|
inputs: [
|
||||||
|
0,
|
||||||
|
1
|
||||||
|
],
|
||||||
|
outputs: [
|
||||||
|
3
|
||||||
|
],
|
||||||
|
operators: [
|
||||||
|
{
|
||||||
|
inputs: [
|
||||||
|
0,
|
||||||
|
1,
|
||||||
|
-1
|
||||||
|
],
|
||||||
|
outputs: [
|
||||||
|
3
|
||||||
|
],
|
||||||
|
builtin_options_type: "Conv2DOptions",
|
||||||
|
builtin_options: {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
name: "main"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
description: "MLIR Converted."
|
||||||
|
}
|
@ -71,7 +71,7 @@ tool_dirs = config.mlir_tf_tools_dirs + [
|
|||||||
tool_names = [
|
tool_names = [
|
||||||
'mlir-opt', 'mlir-translate', 'tf-opt', 'tf_tfl_translate',
|
'mlir-opt', 'mlir-translate', 'tf-opt', 'tf_tfl_translate',
|
||||||
'flatbuffer_to_string', 'flatbuffer_translate', 'tf-mlir-translate',
|
'flatbuffer_to_string', 'flatbuffer_translate', 'tf-mlir-translate',
|
||||||
'mlir-tflite-runner', 'tfcompile'
|
'mlir-tflite-runner', 'tfcompile', 'json_to_flatbuffer'
|
||||||
]
|
]
|
||||||
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
|
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
|
||||||
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
llvm_config.add_tool_substitutions(tools, tool_dirs)
|
||||||
|
Loading…
Reference in New Issue
Block a user