From a27d161222360d8c0af4d3bfa5c653fb16cbdefc Mon Sep 17 00:00:00 2001
From: Chuan He <chhe@google.com>
Date: Thu, 9 Apr 2020 19:26:23 -0700
Subject: [PATCH]   Create a json to flatbuffer utility for tests that cannot
 use mlir-flatbuffer-mlir round-trip.

PiperOrigin-RevId: 305808867
Change-Id: Iad7b36caf6b77733f58649f97b9bad953a8b48f0
---
 tensorflow/compiler/mlir/lite/BUILD           |  9 ++
 .../compiler/mlir/lite/json_to_flatbuffer.cc  | 63 ++++++++++++++
 .../mlir/lite/tests/flatbuffer2mlir/BUILD     |  3 +
 .../tests/flatbuffer2mlir/import_json.json    | 83 +++++++++++++++++++
 tensorflow/compiler/mlir/runlit.cfg.py        |  2 +-
 5 files changed, 159 insertions(+), 1 deletion(-)
 create mode 100644 tensorflow/compiler/mlir/lite/json_to_flatbuffer.cc
 create mode 100644 tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/import_json.json

diff --git a/tensorflow/compiler/mlir/lite/BUILD b/tensorflow/compiler/mlir/lite/BUILD
index 697b161c16d..c4042aad12e 100644
--- a/tensorflow/compiler/mlir/lite/BUILD
+++ b/tensorflow/compiler/mlir/lite/BUILD
@@ -537,6 +537,15 @@ tf_native_cc_binary(
     ],
 )
 
+tf_native_cc_binary(
+    name = "json_to_flatbuffer",
+    srcs = ["json_to_flatbuffer.cc"],
+    deps = [
+        "//tensorflow/lite/schema:schema_fbs",
+        "@flatbuffers",
+    ],
+)
+
 cc_library(
     name = "emit_error_reporter",
     srcs = [
diff --git a/tensorflow/compiler/mlir/lite/json_to_flatbuffer.cc b/tensorflow/compiler/mlir/lite/json_to_flatbuffer.cc
new file mode 100644
index 00000000000..4a4e7a65cd6
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/json_to_flatbuffer.cc
@@ -0,0 +1,63 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <stdint.h>
+
+#include <cstddef>
+#include <cstdio>
+#include <iostream>
+#include <string>
+
+#include "flatbuffers/flatbuffers.h"  // from @flatbuffers
+#include "flatbuffers/idl.h"  // from @flatbuffers
+#include "flatbuffers/util.h"  // from @flatbuffers
+#include "tensorflow/lite/schema/schema_generated.h"
+
+int main(int argc, char** argv) {
+  // load FlatBuffer schema (.fbs) and JSON from disk
+  if (argc < 2) {
+    std::cerr << "Missing input argument. Usage:\n"
+              << argv[0] << " <filename or - for stdin>\n\n";
+    return 1;
+  }
+  const char* schema_path = argv[1];
+  const char* json_path = argv[2];
+  std::string schema;
+  std::string json;
+
+  const bool status =
+      flatbuffers::LoadFile(schema_path, /*binary=*/false, &schema) &&
+      flatbuffers::LoadFile(json_path, /*binary=*/false, &json);
+  if (!status) {
+    std::cerr << "couldn't load files!\n";
+    return 1;
+  }
+
+  // parse schema first, so we can use it to parse the data after
+  flatbuffers::Parser parser;
+  const bool schema_parse_result =
+      parser.Parse(schema.c_str()) && parser.Parse(json.c_str());
+  if (!schema_parse_result) {
+    std::cerr << "Parse error.\n";
+    return 1;
+  }
+  const size_t length = parser.builder_.GetSize();
+  const size_t n =
+      std::fwrite(parser.builder_.GetBufferPointer(), 1, length, stdout);
+  if (n != length) {
+    std::cerr << "print to stdout filed.\n";
+    return 1;
+  }
+  return 0;
+}
diff --git a/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/BUILD b/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/BUILD
index b52b766a10d..da3fe02562b 100644
--- a/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/BUILD
+++ b/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/BUILD
@@ -12,6 +12,7 @@ glob_lit_tests(
     test_file_exts = [
         "mlir",
         "cc",
+        "json",
     ],
 )
 
@@ -24,6 +25,8 @@ filegroup(
         ":importer_test_min_max",
         "//tensorflow/compiler/mlir/lite:flatbuffer_to_string",
         "//tensorflow/compiler/mlir/lite:flatbuffer_translate",
+        "//tensorflow/compiler/mlir/lite:json_to_flatbuffer",
+        "//tensorflow/lite/schema:schema.fbs",
         "@llvm-project//llvm:FileCheck",
     ],
 )
diff --git a/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/import_json.json b/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/import_json.json
new file mode 100644
index 00000000000..d6d3b142931
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/import_json.json
@@ -0,0 +1,83 @@
+// RUN: json_to_flatbuffer %p/../../../../../lite/schema/schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck --dump-input-on-failure %s
+
+// CHECK: %cst = constant unit
+// CHECK: %[[RES0:.*]] = "tfl.conv_2d"(%arg0, %arg1, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 0 : i32, stride_w = 0 : i32} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, none) -> tensor<256x32x32x16xf32>
+// CHECK: return %[[RES0]] : tensor<256x32x32x16xf32>
+
+{
+  version: 3,
+  operator_codes: [
+    {
+      builtin_code: "CONV_2D",
+    }
+  ],
+  subgraphs: [
+    {
+      tensors: [
+        {
+          shape: [
+            256,
+            32,
+            32,
+            3
+          ],
+          name: "arg0",
+          quantization: {
+          }
+        },
+        {
+          shape: [
+            16,
+            3,
+            3,
+            3
+          ],
+          name: "arg1",
+          quantization: {
+          }
+        },
+        {
+          shape: [
+            0
+          ],
+          name: "cst"
+        },
+        {
+          shape: [
+            256,
+            32,
+            32,
+            16
+          ],
+          name: "output",
+          quantization: {
+          }
+        },
+      ],
+      inputs: [
+        0,
+        1
+      ],
+      outputs: [
+        3
+      ],
+      operators: [
+        {
+          inputs: [
+            0,
+            1,
+            -1
+          ],
+          outputs: [
+            3
+          ],
+          builtin_options_type: "Conv2DOptions",
+          builtin_options: {
+          }
+        }
+      ],
+      name: "main"
+    }
+  ],
+  description: "MLIR Converted."
+}
diff --git a/tensorflow/compiler/mlir/runlit.cfg.py b/tensorflow/compiler/mlir/runlit.cfg.py
index 67533197f3e..ab8c1107fc8 100644
--- a/tensorflow/compiler/mlir/runlit.cfg.py
+++ b/tensorflow/compiler/mlir/runlit.cfg.py
@@ -71,7 +71,7 @@ tool_dirs = config.mlir_tf_tools_dirs + [
 tool_names = [
     'mlir-opt', 'mlir-translate', 'tf-opt', 'tf_tfl_translate',
     'flatbuffer_to_string', 'flatbuffer_translate', 'tf-mlir-translate',
-    'mlir-tflite-runner', 'tfcompile'
+    'mlir-tflite-runner', 'tfcompile', 'json_to_flatbuffer'
 ]
 tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
 llvm_config.add_tool_substitutions(tools, tool_dirs)