Drop --dump-input-on-failure from FileCheck commands
This is a debug only option. Its presence does not affect test execution. Its functionality is made default by https://reviews.llvm.org/D81422 The option is no longer available. PiperOrigin-RevId: 315731836 Change-Id: I721a1a33ad004786e4be17869d038690f89679c4
This commit is contained in:
parent
8aefe94dc7
commit
4660a50f07
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -quant-import-stats --quant-test-stats='entries { name: "op" params { min_max { min: -1 max: 1 } } } entries { name: "op_0:0" params { min_max { min: -2 max: 2 } } } entries { name_regex: "op_*" params { min_max { min: -3 max: 3 } } }' | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -quant-import-stats --quant-test-stats='entries { name: "op" params { min_max { min: -1 max: 1 } } } entries { name: "op_0:0" params { min_max { min: -2 max: 2 } } } entries { name_regex: "op_*" params { min_max { min: -3 max: 3 } } }' | FileCheck %s
|
||||
|
||||
|
||||
// CHECK-LABEL: import_stats_skip
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -pass-pipeline='func(canonicalize)' %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -pass-pipeline='func(canonicalize)' %s | FileCheck %s
|
||||
|
||||
// Checks that tfl.reshape should be removed if its output's only user is
|
||||
// another tfl.reshape
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -canonicalize | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -canonicalize | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: @add_float
|
||||
func @add_float() -> (tensor<f32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -tfl-identify-dilated-conv | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tfl-identify-dilated-conv | FileCheck %s
|
||||
|
||||
func @testDilatedConv(%arg0: tensor<1x128x128x3xf32>, %arg1: tensor<5x5x3x8xf32>) -> tensor<1x128x128x8xf32> {
|
||||
%cst = constant dense<[2, 2]> : tensor<2xi32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf_tfl_translate -tf-input-arrays=input0,input1 -tf-input-shapes=4:4 -tf-input-data-types=DT_INT32,DT_INT32 -tf-output-arrays=Add %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
# RUN: tf_tfl_translate -tf-input-arrays=input0,input1 -tf-input-shapes=4:4 -tf-input-data-types=DT_INT32,DT_INT32 -tf-output-arrays=Add %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
# Add two tensor<4xi32> inputs and return the result
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Ensure basic_lstm roundtrip exactly
|
||||
|
||||
func @main(%arg0: tensor<1x384xf32>, %arg1: tensor<1x96xf32>, %arg2: tensor<384x480xf32>, %arg3: tensor<384xf32>, %arg4: tensor<1x96xf32>) -> tensor<1x96xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Ensure constants roundtrip exactly
|
||||
|
||||
func @bool() -> tensor<4xi1> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
// CHECK: func @main(%arg0: tensor<?x19x19x3xf32>) -> tensor<?x9x9x4xf32>
|
||||
func @main(%arg0: tensor<?x19x19x3xf32>) -> tensor<?x9x9x4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Confirm function references in if ops are preserved
|
||||
func @main(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
|
||||
// CHECK: %{{.*}} = "tf.If"(%{{.*}}, %{{.*}}, %{{.*}}) {else_branch = @cond_false, is_stateless = false, then_branch = @cond_true} : (tensor<1xi1>, tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// This only test the exporter and importer are working without min/max quantization parameters.
|
||||
|
||||
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
// Tests input and output names from FlatBuffer are added to `tf.entry_function` attribute.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Confirm float constants and operators survive a roundtrip
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Test to make sure optional parameters survive a roundtrip
|
||||
|
||||
func @main(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s
|
||||
|
||||
// This test is to test that if the flatbuffer omits the last optional input `bias` of tfl.conv_2d op, the flatbuffer_importer will automatically adds `none` value to tfl.conv_2d.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,exp,div --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,exp,div --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Confirm output-arrays works.
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,exp,div --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,exp,div --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Confirm graph pruning.
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK: %{{.*}} = "tfl.quantize"(%{{.*}}) {qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>} : (tensor<1x224x224x3xf32>) -> tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Confirm we can extract type info from reshape
|
||||
|
||||
func @main() -> tensor<2x2xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Check a few basic properties of the import-export,
|
||||
// including constants retaining their shape
|
||||
// and the module including the TFLite version.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
// Check to see if function references in while loops are preserved
|
||||
// TODO(b/138222071) Expect first output to be a scalar
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tfl-prepare-composite-funcs-tf -tfl-fuse-tftext=true %s -split-input-file | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tfl-prepare-composite-funcs-tf -tfl-fuse-tftext=true %s -split-input-file | FileCheck %s
|
||||
module {
|
||||
|
||||
func @whitespace_tokenizer_rank1(%arg0: tensor<1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>) attributes {tf._input_shapes = [#tf.shape<1>], tf.api_implements = "tftext:WhitespaceTokenizer", tf.signature.is_stateful} {
|
||||
|
@ -1,6 +1,6 @@
|
||||
// RUN: tf-opt --tfl-legalize-tf-while %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt --tfl-legalize-tf-while %s -o - --tfl-legalize-tf-while --inline="disable-simplify" | FileCheck %s --dump-input-on-failure --check-prefix=INLINE
|
||||
// RUN: tf-opt --tfl-legalize-tf-while %s -o - --tfl-legalize-tf-while --inline | FileCheck %s --dump-input-on-failure --check-prefix=CANON
|
||||
// RUN: tf-opt --tfl-legalize-tf-while %s -o - | FileCheck %s
|
||||
// RUN: tf-opt --tfl-legalize-tf-while %s -o - --tfl-legalize-tf-while --inline="disable-simplify" | FileCheck %s --check-prefix=INLINE
|
||||
// RUN: tf-opt --tfl-legalize-tf-while %s -o - --tfl-legalize-tf-while --inline | FileCheck %s --check-prefix=CANON
|
||||
|
||||
func @while_main(%arg0: tensor<?x256x256xf32>) -> (tensor<i32>, tensor<256x256xf32>, tensor<?x256x256xf32>) attributes {tf.entry_function = {inputs = "input", outputs = "Identity,Identity_1,Identity_2"}} {
|
||||
%cst = constant dense<1.000000e+00> : tensor<256x256xf32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -tfl-legalize-tf | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tfl-legalize-tf | FileCheck %s
|
||||
|
||||
func @add(%arg0: tensor<1xf32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
|
||||
%0 = "tf.Add"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -allow-unregistered-dialect -tfl-load-recipe %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -allow-unregistered-dialect -tfl-load-recipe %s | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: testLstm
|
||||
func @testLstm(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>, %arg3: tensor<*xf32>, %arg4: tensor<*xf32>, %arg5: tensor<*xf32>, %arg6: tensor<*xf32>, %arg7: tensor<*xf32>, %arg8: tensor<*xf32>, %arg9: tensor<*xf32>, %arg10: tensor<*xf32>, %arg11: tensor<*xf32>, %arg12: tensor<*xf32>, %arg13: tensor<*xf32>, %arg14: tensor<*xf32>, %arg15: tensor<*xf32>, %arg16: tensor<*xf32>, %arg17: tensor<*xf32>, %arg18: tensor<*xf32>, %arg19: tensor<*xf32>, %arg20: tensor<*xf32>, %arg21: tensor<*xf32>, %arg22: tensor<*xf32>, %arg23: tensor<*xf32>) -> tensor<*xf32> {
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Test to verify translation & export work as intended with runtime.
|
||||
|
||||
// RUN: tf-opt --mlir-print-debuginfo --canonicalize --tfl-while-loop-outline %s | mlir-tflite-runner --dump-interpreter-state 2>&1 | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt --mlir-print-debuginfo --canonicalize --tfl-while-loop-outline %s | mlir-tflite-runner --dump-interpreter-state 2>&1 | FileCheck %s
|
||||
|
||||
// Verify value computed:
|
||||
// ----------------------
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
|
||||
^bb0(%arg0: tensor<1x224x224x3xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -tflite-flatbuffer-to-mlir - -o - | FileCheck --check-prefix=IMPORT %s
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4xi1>) -> tensor<4xi1> {
|
||||
^bb0(%arg0: tensor<4xi1>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<1x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<1x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<1x4xf32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
^bb0(%arg0: tensor<4xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<3x!quant.uniform<i8:f32, 0.1>>) -> tensor<3x!quant.uniform<i8:f32, 0.1>> {
|
||||
^bb0(%arg0: tensor<3x!quant.uniform<i8:f32, 0.1>>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<3x!quant.uniform<i8:f32, 1.0>>) -> tensor<3x!quant.uniform<i8:f32, 1.0>> {
|
||||
^bb0(%arg0: tensor<3x!quant.uniform<i8:f32, 1.0>>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> {
|
||||
^bb0(%arg0: tensor<1x6x6x16xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<3x2xi32>) -> tensor<6xi32> {
|
||||
^bb0(%arg0: tensor<3x2xi32>):
|
||||
|
@ -1,5 +1,5 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - -strip-debug-info | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s --check-prefix=STRIP
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - -strip-debug-info | flatbuffer_to_string - | FileCheck %s --check-prefix=STRIP
|
||||
|
||||
func @main(tensor<3x2xi32>) -> tensor<3x2xi32>
|
||||
attributes {tf.entry_function = {inputs = "input", outputs = "SameNameAsOutput"}} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -> tensor<4 x f32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4 x f32>, tensor<4 x i8>, tensor<4 x f32>, tensor<4 x f32>) -> tensor<4 x f32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
// CHECK: {
|
||||
// CHECK-NEXT: version: 3,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -split-input-file -verify-diagnostics -tfl-runtime-verify %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -split-input-file -verify-diagnostics -tfl-runtime-verify %s | FileCheck %s
|
||||
|
||||
// Unary math ops
|
||||
// -----
|
||||
|
@ -1,10 +1,10 @@
|
||||
// Run optimize pass only and check the results.
|
||||
// RUN: tf-opt %s -tfl-optimize | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tfl-optimize | FileCheck %s
|
||||
// Run optimize pass and then canonicalize pass, and make sure some folding is applied.
|
||||
// RUN: tf-opt %s -tfl-optimize -canonicalize | FileCheck --check-prefix=FOLD %s
|
||||
|
||||
// Run legalize pass and then optimize pass, and make sure some fusing is applied.
|
||||
// RUN: tf-opt %s -tfl-legalize-tf -tfl-optimize | FileCheck --check-prefix=Fusing --dump-input-on-failure %s
|
||||
// RUN: tf-opt %s -tfl-legalize-tf -tfl-optimize | FileCheck --check-prefix=Fusing %s
|
||||
|
||||
// CHECK-LABEL: fusedConv2dRelu
|
||||
func @fusedConv2dRelu(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<16x3x3x3xf32>, %arg2: tensor<16xf32>) -> tensor<256x30x30x16xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -tfl-optimize-functional-ops -split-input-file | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tfl-optimize-functional-ops -split-input-file | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: main
|
||||
func @main(%arg0: tensor<f32>, %arg1: tensor<f32>) -> (tensor<f32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -tfl-post-quantize | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tfl-post-quantize | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: RemoveUnused
|
||||
func @RemoveUnused(%arg0: tensor<4xf32>, %arg1: tensor<i32>) -> (tensor<2xf32>,tensor<2xf32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tfl-prepare-composite-funcs-tf %s -split-input-file | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tfl-prepare-composite-funcs-tf %s -split-input-file | FileCheck %s
|
||||
|
||||
module{
|
||||
func @embedding(%arg0: tensor<*xf32>, %arg1: tensor<*xi32>) -> tensor<*xf32> attributes {tf._implements = "embedding_matmul", tf._reference = "mlir"} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tfl-prepare-tf %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tfl-prepare-tf %s | FileCheck %s
|
||||
|
||||
func @conv(tensor<256x32x32x3xf32>, tensor<3x3x3x16xf32>, tensor<256x3x32x32xf32>) -> (tensor<256x30x30x16xf32>, tensor<256x16x30x30xf32>, tensor<256x30x30x16xf32>, tensor<256x30x30x16xf32>, tensor<256x30x30x16xf32>) {
|
||||
^bb0(%arg0: tensor<256x32x32x3xf32>, %arg1: tensor<3x3x3x16xf32>, %arg2: tensor<256x3x32x32xf32>) :
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -loop-invariant-code-motion %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -loop-invariant-code-motion %s -o - | FileCheck %s
|
||||
|
||||
// CHECK: while_1([[ARG0:%[^ :]*]]: tensor<i32>, [[ARG1:%[^ :]*]]: tensor<1xf32>)
|
||||
func @while_1(%arg0: tensor<i32>, %arg1: tensor<1xf32>) -> tensor<1xf32> {
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Test to verify loop outlining.
|
||||
|
||||
// RUN: tf-opt --split-input-file --tfl-while-loop-outline %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt --split-input-file --tfl-while-loop-outline %s | FileCheck %s
|
||||
// Check that while loop outlining is nop if re-ran.
|
||||
// RUN: tf-opt --tfl-while-loop-outline %s -o %t1
|
||||
// RUN: tf-opt --tfl-while-loop-outline %t1 -o %t2
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tfl-trim-funcs-tf -tfl-trim-funcs-whitelist="bar,foobar" %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tfl-trim-funcs-tf -tfl-trim-funcs-whitelist="bar,foobar" %s | FileCheck %s
|
||||
|
||||
func @foo(%arg0: tensor<1x4xf32>, %arg1: tensor<1x4xf32>) -> tensor<1x4xf32> {
|
||||
return %arg0 : tensor<1x4xf32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -tf-batch-matmul-to-tf-einsum | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tf-batch-matmul-to-tf-einsum | FileCheck %s
|
||||
|
||||
func @test_batch_matmul_to_einsum(%arg0: tensor<1x2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x2x4xf32> {
|
||||
// CHECK-LABEL: test_batch_matmul_to_einsum
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tf-switch-fold %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tf-switch-fold %s | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: test_single_branch_direct_f
|
||||
// CHECK-NOT: Switch
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s --run-tf-graph-optimization --graph-passes=FunctionalizeControlFlowForXlaPass | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s --run-tf-graph-optimization --graph-passes=FunctionalizeControlFlowForXlaPass | FileCheck %s
|
||||
|
||||
func @main() {
|
||||
tf_executor.graph {
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: not tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false -tf-input-arrays=x,y -tf-input-data-types=DT_INT32,DT_INT32 -tf-input-shapes=2:3 -tf-output-arrays=x_y_sum %s --tf-debug-info=%s.debug -o - 2>&1 | FileCheck %s --dump-input-on-failure
|
||||
# RUN: not tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false -tf-input-arrays=x,y -tf-input-data-types=DT_INT32,DT_INT32 -tf-input-shapes=2:3 -tf-output-arrays=x_y_sum %s --tf-debug-info=%s.debug -o - 2>&1 | FileCheck %s
|
||||
|
||||
# Checks that source debug information is used in the output error message.
|
||||
# CHECK: error: 'tf.Add' op operands don't have broadcast-compatible shapes
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false -mlir-print-debuginfo %s -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false -mlir-print-debuginfo %s -o - | FileCheck %s
|
||||
|
||||
node {
|
||||
name: "PartitionedCall"
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-splatted-mlir %s -o - -mlir-print-debuginfo | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-splatted-mlir %s -o - -mlir-print-debuginfo | FileCheck %s
|
||||
|
||||
# CHECK: tf_executor.SwitchN
|
||||
# CHECK-SAME: of 3 : tensor<*xi32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tf-legalize-hlo %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tf-legalize-hlo %s | FileCheck %s
|
||||
|
||||
|
||||
func @biasAdd_NHWC(%arg0: tensor<1x32x10x32xi32>, %arg1: tensor<32xi32>) -> tensor<1x32x10x32xi32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -test-tf-lower-tf | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -test-tf-lower-tf | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: invert_permutation
|
||||
func @invert_permutation(%arg0: tensor<5xi32>) -> tensor<5xi32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<f32>, %arg1: tensor<f32>) -> (tensor<f32>, tensor<f32>) {
|
||||
%0:2 = tf_executor.graph {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
// CHECK: name: "tf.ParseExample"
|
||||
// CHECK-NEXT: op: "ParseExample"
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s --run-tf-graph-optimization --graph-passes=MlirRoundtripPass | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s --run-tf-graph-optimization --graph-passes=MlirRoundtripPass | FileCheck %s
|
||||
|
||||
// The test uses the tf_graph_optimization_pass to run the MlirRoundtripPass.
|
||||
// We convert mlir -> Graph -> mlir -> Graph -> mlir
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s | tf-opt | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s | tf-opt | FileCheck %s
|
||||
|
||||
// Tests printer for tf_executor.island "wraps" short form.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tpu-extract-head-tail-outside-compilation | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tpu-extract-head-tail-outside-compilation | FileCheck %s
|
||||
|
||||
// Tests extraction of a outside compiled ops at head of TPU computation.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tpu-extract-outside-compilation | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tpu-extract-outside-compilation | FileCheck %s
|
||||
|
||||
// Tests that missing `_xla_outside_compilation` attribute value results in an error.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: add_fold
|
||||
func @add_fold() -> tensor<4xi64> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt -hlo-legalize-to-lhlo -buffer-placement -split-input-file %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt -hlo-legalize-to-lhlo -buffer-placement -split-input-file %s -o - | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @attrs
|
||||
func @attrs_copy(%operand: memref<2x2xf32>, %result: memref<2x2xf32>) {
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Note that binary elementwise tests are run with chlo legalization enabled
|
||||
// (unlike the rest), since this is the primary use case for such ops and
|
||||
// verification of shapes and broadcasts is desired.
|
||||
// RUN: tf-opt "-xla-legalize-tf=allow-partial-conversion legalize-chlo=true" %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt "-xla-legalize-tf=allow-partial-conversion legalize-chlo=true" %s | FileCheck %s
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Binary op legalizations.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -xla-legalize-tf-control-flow %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -xla-legalize-tf-control-flow %s | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: @if
|
||||
func @if(%arg0: tensor<f32>, %arg1: tensor<f32>) -> (tensor<f32>)
|
||||
|
@ -1,6 +1,6 @@
|
||||
// RUN: tf-opt -xla-legalize-tf-with-tf2xla=device-type=XLA_CPU_JIT %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -xla-legalize-tf-with-tf2xla=device-type=XLA_CPU_JIT %s | FileCheck %s
|
||||
|
||||
// INVALID_DEVICE: xla-opt -xla-legalize-tf-with-tf2xla=device-type=INVALID_DEVICE %s | FileCheck %s --dump-input-on-failure
|
||||
// INVALID_DEVICE: xla-opt -xla-legalize-tf-with-tf2xla=device-type=INVALID_DEVICE %s | FileCheck %s
|
||||
|
||||
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt "-xla-legalize-tf=allow-partial-conversion legalize-chlo=false" %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt "-xla-legalize-tf=allow-partial-conversion legalize-chlo=false" %s | FileCheck %s
|
||||
// RUN: tf-opt "-xla-legalize-tf=allow-partial-conversion legalize-chlo=true" -verify-diagnostics %s
|
||||
// This test runs twice:
|
||||
// 1. Through FileCheck with chlo legalization disabled since verifying
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt -xla-legalize-to-std %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt -xla-legalize-to-std %s -o - | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @binary_ops_float(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
|
||||
func @binary_ops_float(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt -lhlo-copy-removal %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt -lhlo-copy-removal %s -o - | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @remove_simple
|
||||
func @remove_simple(%arg0: memref<2x2xf32>) {
|
||||
|
@ -1,6 +1,6 @@
|
||||
// RUN: xla-opt -lhlo-fuse-linalg %s -o - | FileCheck %s --dump-input=always
|
||||
// RUN: xla-opt -lhlo-fuse-linalg=tile-sizes=2,3 %s -o - | FileCheck %s -check-prefix=TILED --dump-input-on-failure
|
||||
// RUN: xla-opt -lhlo-fuse-linalg=use-parallel-loops %s -o - | FileCheck %s -check-prefix=PLOOP --dump-input-on-failure
|
||||
// RUN: xla-opt -lhlo-fuse-linalg=tile-sizes=2,3 %s -o - | FileCheck %s -check-prefix=TILED
|
||||
// RUN: xla-opt -lhlo-fuse-linalg=use-parallel-loops %s -o - | FileCheck %s -check-prefix=PLOOP
|
||||
|
||||
|
||||
#map0 = affine_map<(d0, d1) -> (d0, d1)>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt %s -lhlo-legalize-to-linalg -split-input-file | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt %s -lhlo-legalize-to-linalg -split-input-file | FileCheck %s
|
||||
|
||||
// CHECK: #map0 = affine_map<(d0, d1) -> (d0, d1)>
|
||||
// CHECK-LABEL: func @element_wise
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt %s --test-lhlo-legalize-to-llvm -split-input-file | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt %s --test-lhlo-legalize-to-llvm -split-input-file | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @static_memref_cast
|
||||
func @static_memref_cast(%buf : memref<10x1x5xf32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt %s -test-xla-chlo-legalize-to-hlo -test-xla-lower-complex | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt %s -test-xla-chlo-legalize-to-hlo -test-xla-lower-complex | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: @add
|
||||
func @add(%arg0 : tensor<2xf32>, %arg1 : tensor<2xf32>, %arg2 : tensor<2xf32>, %arg3 : tensor<2xf32>) -> (tensor<2xf32>, tensor<2xf32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -split-input-file -mlir-hlo-to-hlo-text %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -split-input-file -mlir-hlo-to-hlo-text %s | FileCheck %s
|
||||
|
||||
// CHECK: HloModule
|
||||
func @main(%arg0: !xla_hlo.token, %arg1: !xla_hlo.token) -> !xla_hlo.token {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -hlo-text-to-mlir-hlo %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -hlo-text-to-mlir-hlo %s -o - | FileCheck %s
|
||||
|
||||
HloModule main
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user