Drop more --dump-input-on-failure from FileCheck commands

This is a debug only option. Its presence does not affect test execution.
Its functionality is made default by https://reviews.llvm.org/D81422
The option is no longer available.

PiperOrigin-RevId: 315742913
Change-Id: Ie52ae57abe5fed324ce13f4b218cc031c8795dcb
This commit is contained in:
A. Unique TensorFlower 2020-06-10 12:24:20 -07:00 committed by TensorFlower Gardener
parent c99a6fba17
commit e66dabe79d
101 changed files with 103 additions and 103 deletions

View File

@ -1,5 +1,5 @@
# RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=Bridge --debug_info=%s.debug.pbtxt 2>&1 | FileCheck %s -dump-input-on-failure # RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=Bridge --debug_info=%s.debug.pbtxt 2>&1 | FileCheck %s
# RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=None 2>&1 | FileCheck -check-prefix=OLD %s -dump-input-on-failure # RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=None 2>&1 | FileCheck -check-prefix=OLD %s
# Checks the error message produced by tfcompile with mlir_component # Checks the error message produced by tfcompile with mlir_component
# Checks that source debug information is used in the output error message and # Checks that source debug information is used in the output error message and

View File

@ -1,4 +1,4 @@
# RUN: tf_tfl_translate -tf-input-arrays=input -tf-input-shapes=1,1,1,256 -tf-input-data-types=DT_FLOAT -tf-inference-type=DT_QINT8 -tf-input-min-values='-33.614346' -tf-input-max-values='21.54917' -tf-output-arrays=output %s -o - --output-mlir 2>&1 | FileCheck --check-prefix=MLIR %s --dump-input-on-failure # RUN: tf_tfl_translate -tf-input-arrays=input -tf-input-shapes=1,1,1,256 -tf-input-data-types=DT_FLOAT -tf-inference-type=DT_QINT8 -tf-input-min-values='-33.614346' -tf-input-max-values='21.54917' -tf-output-arrays=output %s -o - --output-mlir 2>&1 | FileCheck --check-prefix=MLIR %s
# RUN: tf_tfl_translate -tf-input-arrays=input -tf-input-shapes=1,1,1,256 -tf-input-data-types=DT_FLOAT -tf-inference-type=DT_QINT8 -tf-input-min-values='-33.614346' -tf-input-max-values='21.54917' -tf-output-arrays=output %s -o - | flatbuffer_to_string - | FileCheck %s # RUN: tf_tfl_translate -tf-input-arrays=input -tf-input-shapes=1,1,1,256 -tf-input-data-types=DT_FLOAT -tf-inference-type=DT_QINT8 -tf-input-min-values='-33.614346' -tf-input-max-values='21.54917' -tf-output-arrays=output %s -o - | flatbuffer_to_string - | FileCheck %s
node { node {

View File

@ -1,4 +1,4 @@
# RUN: tf_tfl_translate -tf-input-arrays=unranked -tf-input-shapes=1,8,8,2 -tf-input-data-types=DT_INT32 -tf-output-arrays=unranked,static,static_10 %s -o - --output-mlir | FileCheck %s --dump-input-on-failure # RUN: tf_tfl_translate -tf-input-arrays=unranked -tf-input-shapes=1,8,8,2 -tf-input-data-types=DT_INT32 -tf-output-arrays=unranked,static,static_10 %s -o - --output-mlir | FileCheck %s
node { node {
name: "tf.Const" name: "tf.Const"

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
func @main(%arg0: tensor<32x4x4x128xf32>, %arg1: tensor<1x32x42x128xf32>, %arg2: tensor<4xi32>) -> tensor<1x64x84x32xf32> { func @main(%arg0: tensor<32x4x4x128xf32>, %arg1: tensor<1x32x42x128xf32>, %arg2: tensor<4xi32>) -> tensor<1x64x84x32xf32> {
%0 = "tfl.custom"(%arg0, %arg1, %arg2) {custom_code = "Convolution2DTransposeBias", custom_option = opaque<"tfl", "0x010000000200000002000000"> : tensor<12xi8>} : (tensor<32x4x4x128xf32>, tensor<1x32x42x128xf32>, tensor<4xi32>) -> tensor<1x64x84x32xf32> %0 = "tfl.custom"(%arg0, %arg1, %arg2) {custom_code = "Convolution2DTransposeBias", custom_option = opaque<"tfl", "0x010000000200000002000000"> : tensor<12xi8>} : (tensor<32x4x4x128xf32>, tensor<1x32x42x128xf32>, tensor<4xi32>) -> tensor<1x64x84x32xf32>

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir --use-external-constant - -o - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir --use-external-constant - -o - | FileCheck %s
// Ensure that `tfl.external_const` is imported when the flag `-use-external-constant` is enabled. // Ensure that `tfl.external_const` is imported when the flag `-use-external-constant` is enabled.
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> { func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {

View File

@ -1,4 +1,4 @@
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck --dump-input-on-failure %s // RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s
// CHECK: %cst = constant unit // CHECK: %cst = constant unit
// CHECK: %[[RES0:.*]] = "tfl.conv_2d"(%arg0, %arg1, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 0 : i32, stride_w = 0 : i32} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, none) -> tensor<256x32x32x16xf32> // CHECK: %[[RES0:.*]] = "tfl.conv_2d"(%arg0, %arg1, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 0 : i32, stride_w = 0 : i32} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, none) -> tensor<256x32x32x16xf32>

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -input-arrays=squared_difference --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -input-arrays=squared_difference --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
// Tests -input-arrays flag. // Tests -input-arrays flag.
func @main(%arg0: tensor<4xf32>) -> tensor<4xf32> { func @main(%arg0: tensor<4xf32>) -> tensor<4xf32> {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
// Ensure lstm roundtrip exactly // Ensure lstm roundtrip exactly
func @main(%arg0: tensor<1x4xf32>, %arg1: tensor<4x4xf32>, %arg2: tensor<4x4xf32>, %arg3: tensor<4x4xf32>, %arg4: tensor<4x4xf32>, %arg5: tensor<4x4xf32>, %arg6: tensor<4x4xf32>, %arg7: tensor<4x4xf32>, %arg8: tensor<4x4xf32>, %arg9: tensor<4xf32>, %arg10: tensor<4xf32>, %arg11: tensor<4xf32>, %arg12: tensor<1x4xf32>, %arg13: tensor<4xf32>, %arg14: tensor<4xf32>, %arg15: tensor<4xf32>, %arg16: tensor<4x4xf32>, %arg17: tensor<4xf32>, %arg18: tensor<4xf32>, %arg19: tensor<4xf32>, %arg20: tensor<4xf32>, %arg21: tensor<4xf32>) -> tensor<1x4xf32> { func @main(%arg0: tensor<1x4xf32>, %arg1: tensor<4x4xf32>, %arg2: tensor<4x4xf32>, %arg3: tensor<4x4xf32>, %arg4: tensor<4x4xf32>, %arg5: tensor<4x4xf32>, %arg6: tensor<4x4xf32>, %arg7: tensor<4x4xf32>, %arg8: tensor<4x4xf32>, %arg9: tensor<4xf32>, %arg10: tensor<4xf32>, %arg11: tensor<4xf32>, %arg12: tensor<1x4xf32>, %arg13: tensor<4xf32>, %arg14: tensor<4xf32>, %arg15: tensor<4xf32>, %arg16: tensor<4x4xf32>, %arg17: tensor<4xf32>, %arg18: tensor<4xf32>, %arg19: tensor<4xf32>, %arg20: tensor<4xf32>, %arg21: tensor<4xf32>) -> tensor<1x4xf32> {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
// Confirm a wide array of attribute survives the round-trip // Confirm a wide array of attribute survives the round-trip
func @main(tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> { func @main(tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,div,exp --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,div,exp --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
// CHECK: (%[[ARG:.*]]: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) // CHECK: (%[[ARG:.*]]: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>)
func @main(%arg0: tensor<4xf32>) -> tensor<4xf32> attributes {tf.entry_function = {inputs = "mul"}} { func @main(%arg0: tensor<4xf32>) -> tensor<4xf32> attributes {tf.entry_function = {inputs = "mul"}} {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
// This test is to test for unranked function output from input, the output type should be compatible with input type. // This test is to test for unranked function output from input, the output type should be compatible with input type.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt -tfl-lower-static-tensor-list %s | FileCheck %s --dump-input-on-failure // RUN: tf-opt -tfl-lower-static-tensor-list %s | FileCheck %s
// CHECK-LABEL: tensorlistConst // CHECK-LABEL: tensorlistConst
func @tensorlistConst(%arg0 : tensor<1xi32>) -> tensor<2x3xi32> { func @tensorlistConst(%arg0 : tensor<1xi32>) -> tensor<2x3xi32> {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> tensor<1x96xf32> { func @main(tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> tensor<1x96xf32> {
// CHECK: { // CHECK: {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-custom-ops -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-custom-ops -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<4xf32>) -> tensor<4xf32> { func @main(tensor<4xf32>) -> tensor<4xf32> {
^bb0(%arg0: tensor<4xf32>): ^bb0(%arg0: tensor<4xf32>):

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> { func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
^bb0(%arg0: tensor<1x224x224x3xf32>): ^bb0(%arg0: tensor<1x224x224x3xf32>):

View File

@ -1,4 +1,4 @@
// RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s --dump-input-on-failure // RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s
// CHECK: error: 'tf.MyCustomOp' op is neither a custom op nor a flex op // CHECK: error: 'tf.MyCustomOp' op is neither a custom op nor a flex op
// CHECK: error: failed while converting: 'main' // CHECK: error: failed while converting: 'main'

View File

@ -1,4 +1,4 @@
// RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s --dump-input-on-failure // RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s
// CHECK: error: 'tf.Div' op is neither a custom op nor a flex op // CHECK: error: 'tf.Div' op is neither a custom op nor a flex op
// CHECK: error: failed while converting: 'main' // CHECK: error: failed while converting: 'main'

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<4xf32>) -> tensor<4xf32> { func @main(tensor<4xf32>) -> tensor<4xf32> {
^bb0(%arg0: tensor<4xf32>): ^bb0(%arg0: tensor<4xf32>):

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops=true -emit-builtin-tflite-ops=false -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops=true -emit-builtin-tflite-ops=false -o - | flatbuffer_to_string - | FileCheck %s
func @main(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> { func @main(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> {
// CHECK: { // CHECK: {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<4xf64>, tensor<4xf64>) -> tensor<4xf64> { func @main(tensor<4xf64>, tensor<4xf64>) -> tensor<4xf64> {
^bb0(%arg0: tensor<4xf64>, %arg1: tensor<4xf64>): ^bb0(%arg0: tensor<4xf64>, %arg1: tensor<4xf64>):

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<4xf32>) -> tensor<4xf32> { func @main(tensor<4xf32>) -> tensor<4xf32> {
^bb0(%arg0: tensor<4xf32>): ^bb0(%arg0: tensor<4xf32>):

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> { func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
^bb0(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>): ^bb0(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>):

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> { func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
^bb0(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>): ^bb0(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>):

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(%arg0: tensor<1x528x!quant.uniform<i8:f32, 0.037248000502586365:-19>>, %arg1: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.059801999479532242>>, %arg2: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.031925998628139496>>, %arg3: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.056272000074386597>>, %arg4: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.063763998448848724>>, %arg5: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.013358999975025654>>, %arg6: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.022830000147223473>>, %arg7: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.032276000827550888>>, %arg8: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.035427000373601913>>, %arg9: tensor<2048x!quant.uniform<i32:f32, 4.2675782196965883E-7>>, %arg10: tensor<2048x!quant.uniform<i32:f32, 1.0742187583900886E-7>>, %arg11: tensor<2048x!quant.uniform<i32:f32, 1.6406249869760359E-7>>, %arg12: tensor<2048x!quant.uniform<i32:f32, 1.523437447303877E-7>>, %arg13: tensor<640x2048x!quant.uniform<i8<-127:127>:f32, 0.021174000576138496>>, %arg14: tensor<640x!quant.uniform<i32:f32, 1.601389680352559E-4>>, %arg15: tensor<2048x!quant.uniform<i16:f32, 4.3700000969693065E-4>>, %arg16: tensor<2048x!quant.uniform<i16:f32, 1.1000000085914508E-4>>, %arg17: tensor<2048x!quant.uniform<i16:f32, 1.6799999866634607E-4>>, %arg18: tensor<2048x!quant.uniform<i16:f32, 1.55999994603917E-4>>, %arg19: tensor<1x640x!quant.uniform<i8:f32, 0.09671100229024887:10>>, %arg20: tensor<1x2048x!quant.uniform<i16:f32, 4.8799999058246613E-4>>) -> tensor<1x640x!quant.uniform<i8:f32, 0.09671100229024887:10>> { func @main(%arg0: tensor<1x528x!quant.uniform<i8:f32, 0.037248000502586365:-19>>, %arg1: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.059801999479532242>>, %arg2: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.031925998628139496>>, %arg3: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.056272000074386597>>, %arg4: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.063763998448848724>>, %arg5: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.013358999975025654>>, %arg6: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.022830000147223473>>, %arg7: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.032276000827550888>>, %arg8: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.035427000373601913>>, %arg9: tensor<2048x!quant.uniform<i32:f32, 4.2675782196965883E-7>>, %arg10: tensor<2048x!quant.uniform<i32:f32, 1.0742187583900886E-7>>, %arg11: tensor<2048x!quant.uniform<i32:f32, 1.6406249869760359E-7>>, %arg12: tensor<2048x!quant.uniform<i32:f32, 1.523437447303877E-7>>, %arg13: tensor<640x2048x!quant.uniform<i8<-127:127>:f32, 0.021174000576138496>>, %arg14: tensor<640x!quant.uniform<i32:f32, 1.601389680352559E-4>>, %arg15: tensor<2048x!quant.uniform<i16:f32, 4.3700000969693065E-4>>, %arg16: tensor<2048x!quant.uniform<i16:f32, 1.1000000085914508E-4>>, %arg17: tensor<2048x!quant.uniform<i16:f32, 1.6799999866634607E-4>>, %arg18: tensor<2048x!quant.uniform<i16:f32, 1.55999994603917E-4>>, %arg19: tensor<1x640x!quant.uniform<i8:f32, 0.09671100229024887:10>>, %arg20: tensor<1x2048x!quant.uniform<i16:f32, 4.8799999058246613E-4>>) -> tensor<1x640x!quant.uniform<i8:f32, 0.09671100229024887:10>> {
%cst = constant unit %cst = constant unit

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
module attributes { module attributes {
tfl.metadata = {key1 = "value1", key2 = "value2"} tfl.metadata = {key1 = "value1", key2 = "value2"}

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
// CHECK: { // CHECK: {
// CHECK-NEXT: version: 3, // CHECK-NEXT: version: 3,

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
module { module {
func @serving_default(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> attributes {tf.entry_function = {inputs = "serving_default_x", outputs = "outputs"}} { func @serving_default(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> attributes {tf.entry_function = {inputs = "serving_default_x", outputs = "outputs"}} {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s --dump-input-on-failure // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
// CHECK: { // CHECK: {
// CHECK-NEXT: version: 3, // CHECK-NEXT: version: 3,

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(%arg0: tensor<4xi32>, %arg1: tensor<32x4x4x128xf32>, %arg2: tensor<1x32x42x128xf32>) -> tensor<1x64x84x32xf32> { func @main(%arg0: tensor<4xi32>, %arg1: tensor<32x4x4x128xf32>, %arg2: tensor<1x32x42x128xf32>) -> tensor<1x64x84x32xf32> {
// CHECK: { // CHECK: {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>) -> tensor<4x4xf32> { func @main(tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>) -> tensor<4x4xf32> {
// CHECK: { // CHECK: {

View File

@ -1,4 +1,4 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s // RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -> tensor<4 x f32> { func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -> tensor<4 x f32> {
// CHECK: { // CHECK: {

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s
// CHECK-LABEL: func @tfAssertTrue // CHECK-LABEL: func @tfAssertTrue
func @tfAssertTrue(%arg0: tensor<1x1x6x2xf32>) { func @tfAssertTrue(%arg0: tensor<1x1x6x2xf32>) {

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -tf-device-cluster-formation | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -tf-device-cluster-formation | FileCheck %s
// Simple case, single device cluster. // Simple case, single device cluster.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -tf-device-cluster-outlining | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -tf-device-cluster-outlining | FileCheck %s
// Tests simple case of a single `tf_device.cluster`. // Tests simple case of a single `tf_device.cluster`.

View File

@ -1,5 +1,5 @@
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-output-arrays=p,x -o - | FileCheck %s --check-prefix=NONE --dump-input-on-failure # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-output-arrays=p,x -o - | FileCheck %s --check-prefix=NONE
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-input-data-types=DT_INT32,DT_BOOL -tf-output-arrays=p,x -o - | FileCheck %s --dump-input-on-failure # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-input-data-types=DT_INT32,DT_BOOL -tf-output-arrays=p,x -o - | FileCheck %s
# Test the handling of the input data types. In particular, if the data type # Test the handling of the input data types. In particular, if the data type
# for an input graph node is specified via command line options, use it. # for an input graph node is specified via command line options, use it.

View File

@ -1,4 +1,4 @@
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s --dump-input-on-failure # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s
node { node {
name: "custom_relu_func_call" name: "custom_relu_func_call"

View File

@ -1,4 +1,4 @@
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s --dump-input-on-failure # RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s
# Verify that the data_format attributes is pulled from the default value in the # Verify that the data_format attributes is pulled from the default value in the
# registry when not present in the GraphDef # registry when not present in the GraphDef

View File

@ -1,4 +1,4 @@
# RUN: tf-mlir-translate -graphdef-to-mlir %s -o - | FileCheck %s --dump-input-on-failure # RUN: tf-mlir-translate -graphdef-to-mlir %s -o - | FileCheck %s
node { node {
name: "TensorListReserve/num_elements" name: "TensorListReserve/num_elements"

View File

@ -1,4 +1,4 @@
# RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-output-arrays=BatchDatasetV2 -o - | FileCheck %s --dump-input-on-failure # RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-output-arrays=BatchDatasetV2 -o - | FileCheck %s
# CHECK-LABEL: func @main() -> tensor<*x!tf.variant> # CHECK-LABEL: func @main() -> tensor<*x!tf.variant>
# CHECK: %[[tensor_slice:.*]], %[[tensor_slice_control:.*]] = tf_executor.island wraps "tf.TensorSliceDataset" # CHECK: %[[tensor_slice:.*]], %[[tensor_slice_control:.*]] = tf_executor.island wraps "tf.TensorSliceDataset"

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
// Check that attributes that define derived shapes are exported. // Check that attributes that define derived shapes are exported.

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
// CHECK: op: "Split" // CHECK: op: "Split"
// CHECK: attr { // CHECK: attr {

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
func @main() -> tensor<*x!tf.resource> attributes {tf.entry_function = {inputs = "", outputs = "func_call"}} { func @main() -> tensor<*x!tf.resource> attributes {tf.entry_function = {inputs = "", outputs = "func_call"}} {
%0 = tf_executor.graph { %0 = tf_executor.graph {

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
func @main() { func @main() {
tf_executor.graph { tf_executor.graph {

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
func @main(%arg0: tensor<10xi32>) -> tensor<10xi32> func @main(%arg0: tensor<10xi32>) -> tensor<10xi32>
attributes {tf.entry_function = {inputs = "input0", outputs = "output0"}} { attributes {tf.entry_function = {inputs = "input0", outputs = "output0"}} {

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 175 : i32}} { module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 175 : i32}} {
func @main(%arg0: tensor<32x!tf.string>) -> (tensor<?x2xi64>) attributes {tf.entry_function = {inputs = "input0", outputs = "ParseExample/ParseExampleV2"}} { func @main(%arg0: tensor<32x!tf.string>) -> (tensor<?x2xi64>) attributes {tf.entry_function = {inputs = "input0", outputs = "ParseExample/ParseExampleV2"}} {

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
func @main(%arg0: tensor<10xi32>, %arg1: tensor<10xi32>) -> tensor<10xi32> func @main(%arg0: tensor<10xi32>, %arg1: tensor<10xi32>) -> tensor<10xi32>
attributes {tf.entry_function = {inputs = "foo,bar", outputs = "Add"}} { attributes {tf.entry_function = {inputs = "foo,bar", outputs = "Add"}} {

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
func @main() { func @main() {
tf_executor.graph { tf_executor.graph {

View File

@ -1,4 +1,4 @@
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure // RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
func @main(%arg0: tensor<10xi32>, %arg1: tensor<10xi32>) -> tensor<10xi32> func @main(%arg0: tensor<10xi32>, %arg1: tensor<10xi32>) -> tensor<10xi32>
attributes {tf.entry_function = {inputs = "input0,input1", outputs = "Add"}} { attributes {tf.entry_function = {inputs = "input0,input1", outputs = "Add"}} {

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -tf-op-fusion | FileCheck %s --dump-input-on-failure // RUN: tf-opt %s -tf-op-fusion | FileCheck %s
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Conv2D + BiasAdd + <Activation> fusions. // Conv2D + BiasAdd + <Activation> fusions.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-resources-to-args | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-resources-to-args | FileCheck %s
// One resource, one read. The initial value of the resource is read. // One resource, one read. The initial value of the resource is read.
// CHECK-LABEL: func @main(%arg0: tensor<i1>, %arg1: tensor<f32> {tf.resource_name = "x"}) -> tensor<2xf32> // CHECK-LABEL: func @main(%arg0: tensor<i1>, %arg1: tensor<f32> {tf.resource_name = "x"}) -> tensor<2xf32>

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-var-handles-to-args | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-var-handles-to-args | FileCheck %s
// Tests main function with multiple blocks. // Tests main function with multiple blocks.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-resource-op-lifting | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-resource-op-lifting | FileCheck %s
// Tests that resource load operations are hoisted. // Tests that resource load operations are hoisted.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt -tf-rewrite-tpu-embedding-ops %s | FileCheck %s --dump-input-on-failure // RUN: tf-opt -tf-rewrite-tpu-embedding-ops %s | FileCheck %s
// CHECK-LABEL: func @recv_tpu_embedding_activations // CHECK-LABEL: func @recv_tpu_embedding_activations
func @recv_tpu_embedding_activations() -> (tensor<512x256xf32>) { func @recv_tpu_embedding_activations() -> (tensor<512x256xf32>) {

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-stack-ops-decomposition | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-stack-ops-decomposition | FileCheck %s
// Tests simple scalar stack operations without control flow. // Tests simple scalar stack operations without control flow.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-array-ops-decomposition | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-array-ops-decomposition | FileCheck %s
// Test read and write on a tensor list. // Test read and write on a tensor list.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-list-ops-decomposition | FileCheck %s -dump-input-on-failure // RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-list-ops-decomposition | FileCheck %s
// Test push and pop on a tensor list which is initially empty. // Test push and pop on a tensor list which is initially empty.

View File

@ -1,4 +1,4 @@
// RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s --dump-input-on-failure < %t // RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s < %t
module { module {
// CHECK-LABEL: fuse_map_and_batch // CHECK-LABEL: fuse_map_and_batch

View File

@ -1,4 +1,4 @@
// RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s --dump-input-on-failure < %t // RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s < %t
module { module {
// CHECK-LABEL: fuse_pmap_and_batch // CHECK-LABEL: fuse_pmap_and_batch

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
# ============================================================================== # ==============================================================================
# RUN: %p/multi_arguments_results_v1 | FileCheck -dump-input-on-failure %s # RUN: %p/multi_arguments_results_v1 | FileCheck %s
# pylint: disable=missing-docstring,line-too-long # pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import from __future__ import absolute_import

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -tf-tpu-outside-compilation-cluster | FileCheck %s --dump-input-on-failure // RUN: tf-opt %s -tf-tpu-outside-compilation-cluster | FileCheck %s
// CHECK-LABEL: func @one_cluster_no_dependencies // CHECK-LABEL: func @one_cluster_no_dependencies
func @one_cluster_no_dependencies() { func @one_cluster_no_dependencies() {

View File

@ -1,4 +1,4 @@
# RUN: tf_tfjs_translate %s -tf-input-arrays=input0,input1 -tf-input-data-types=DT_INT32,DT_INT32 -tf-input-shapes=10:10 -tf-output-arrays=Mul -o - | FileCheck %s --dump-input-on-failure # RUN: tf_tfjs_translate %s -tf-input-arrays=input0,input1 -tf-input-data-types=DT_INT32,DT_INT32 -tf-input-shapes=10:10 -tf-output-arrays=Mul -o - | FileCheck %s
# Add two tensor<4xi32> inputs and return the result # Add two tensor<4xi32> inputs and return the result
node { node {

View File

@ -1,4 +1,4 @@
# RUN: tf_tfjs_translate %s -tf-input-arrays=input0 -tf-input-data-types=DT_FLOAT -tf-input-shapes=10 -tf-output-arrays=Add -tf-custom-opdefs="name: 'Prelu' input_arg: { name: 'x' type: DT_FLOAT } input_arg: { name: 'alpha' type: DT_FLOAT } output_arg: { name: 'c' type: DT_FLOAT }" -o - | FileCheck %s --dump-input-on-failure # RUN: tf_tfjs_translate %s -tf-input-arrays=input0 -tf-input-data-types=DT_FLOAT -tf-input-shapes=10 -tf-output-arrays=Add -tf-custom-opdefs="name: 'Prelu' input_arg: { name: 'x' type: DT_FLOAT } input_arg: { name: 'alpha' type: DT_FLOAT } output_arg: { name: 'c' type: DT_FLOAT }" -o - | FileCheck %s
# Add two tensor<4xi32> inputs and return the result # Add two tensor<4xi32> inputs and return the result
node { node {

View File

@ -1,4 +1,4 @@
// RUN: tf-opt -split-input-file -verify-diagnostics -tfl-runtime-verify %s | FileCheck %s --dump-input-on-failure // RUN: tf-opt -split-input-file -verify-diagnostics -tfl-runtime-verify %s | FileCheck %s
// ----- // -----

View File

@ -1,5 +1,5 @@
// Run optimize pass only and check the results. // Run optimize pass only and check the results.
// RUN: tf-opt %s -tfjs-optimize | FileCheck %s --dump-input-on-failure // RUN: tf-opt %s -tfjs-optimize | FileCheck %s
// CHECK-LABEL: prelu_fusion // CHECK-LABEL: prelu_fusion
func @prelu_fusion(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> { func @prelu_fusion(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {

View File

@ -1,4 +1,4 @@
// RUN: tf-opt -xla-legalize-tf=allow-partial-conversion %s | FileCheck %s --dump-input-on-failure // RUN: tf-opt -xla-legalize-tf=allow-partial-conversion %s | FileCheck %s
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// tf.BatchMatMulV2 op legalizations. // tf.BatchMatMulV2 op legalizations.

View File

@ -4,7 +4,7 @@
// Lowering to STD dialect and store forwarding pass would be required to get // Lowering to STD dialect and store forwarding pass would be required to get
// rid of them. This is exactly what is done in the real MLIR GPU pipeline, but // rid of them. This is exactly what is done in the real MLIR GPU pipeline, but
// here we disable verification with `verify-each=0` to check the output IR. // here we disable verification with `verify-each=0` to check the output IR.
// RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize --verify-each=0 | FileCheck %s --dump-input-on-failure // RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize --verify-each=0 | FileCheck %s
func @select_and_scatter(%arg: memref<112x112xf32>, func @select_and_scatter(%arg: memref<112x112xf32>,
%src: memref<56x56xf32>, %src: memref<56x56xf32>,

View File

@ -1,4 +1,4 @@
// RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize -split-input-file | FileCheck %s --dump-input-on-failure // RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize -split-input-file | FileCheck %s
func @reduce(%arg: memref<100x10x5xf32>, func @reduce(%arg: memref<100x10x5xf32>,
%init: memref<f32>, %init: memref<f32>,

View File

@ -1,4 +1,4 @@
// RUN: tf-opt %s -xla-hlo-fusion -split-input-file | FileCheck %s --dump-input-on-failure // RUN: tf-opt %s -xla-hlo-fusion -split-input-file | FileCheck %s
// CHECK-LABEL: func @multi_outputs_same // CHECK-LABEL: func @multi_outputs_same
func @multi_outputs_same(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<?x?xf32>) { func @multi_outputs_same(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<?x?xf32>) {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Abs HloModule Abs
ENTRY %Abs (val: f32[2,2]) -> f32[2,2] { ENTRY %Abs (val: f32[2,2]) -> f32[2,2] {
%val = f32[2,2]{1,0} parameter(0) %val = f32[2,2]{1,0} parameter(0)

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Add HloModule Add
ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] { ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt -lowering-stage=KERNEL %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt -lowering-stage=KERNEL %s | FileCheck %s
HloModule Add HloModule Add
ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] { ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s
HloModule Add HloModule Add
ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] { ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule AddMultiply HloModule AddMultiply
ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] { ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s
HloModule AddMultiply HloModule AddMultiply
ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] { ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule AddReduce HloModule AddReduce
%add (x: f32[], y: f32[]) -> f32[] { %add (x: f32[], y: f32[]) -> f32[] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Broadcast HloModule Broadcast
ENTRY %Broadcast (x: f32[10]) -> f32[10, 5] { ENTRY %Broadcast (x: f32[10]) -> f32[10, 5] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt -verify-errors %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt -verify-errors %s | FileCheck %s
HloModule Add HloModule Add
ENTRY %Add (x: f32[2,2,2], y: f32[2,2,2]) -> f32[2,2,2] { ENTRY %Add (x: f32[2,2,2], y: f32[2,2,2]) -> f32[2,2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Ceil HloModule Ceil
ENTRY %Ceil (val: f32[2,2]) -> f32[2,2] { ENTRY %Ceil (val: f32[2,2]) -> f32[2,2] {
%val = f32[2,2]{1,0} parameter(0) %val = f32[2,2]{1,0} parameter(0)

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Compare HloModule Compare
ENTRY %Compare (x: f32[2,2], y: f32[2,2]) -> pred[2,2] { ENTRY %Compare (x: f32[2,2], y: f32[2,2]) -> pred[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Complex HloModule Complex
ENTRY %Complex (real: f32[2,2]{0,1}, imag: f32[2,2]{0,1}) -> c64[2,2] { ENTRY %Complex (real: f32[2,2]{0,1}, imag: f32[2,2]{0,1}) -> c64[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Concatenate HloModule Concatenate
ENTRY %Concatenate (x: f32[2,3], y: f32[2,2]) -> f32[2,5] { ENTRY %Concatenate (x: f32[2,3], y: f32[2,2]) -> f32[2,5] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Const HloModule Const
ENTRY %Const () -> s32[100] { ENTRY %Const () -> s32[100] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Copy HloModule Copy
ENTRY %Copy (x: f32[2,4]) -> f32[2,4] { ENTRY %Copy (x: f32[2,4]) -> f32[2,4] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule CopyTranspose HloModule CopyTranspose
ENTRY %CopyTranspose (x: f32[2,4]) -> f32[2,4]{0,1} { ENTRY %CopyTranspose (x: f32[2,4]) -> f32[2,4]{0,1} {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Cos HloModule Cos
ENTRY %Cos (val: f32[2,2]) -> f32[2,2] { ENTRY %Cos (val: f32[2,2]) -> f32[2,2] {
%val = f32[2,2]{1,0} parameter(0) %val = f32[2,2]{1,0} parameter(0)

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Exp HloModule Exp
ENTRY %Exp (x: f32[2,2]) -> f32[2,2] { ENTRY %Exp (x: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule FusedReduce HloModule FusedReduce
%add (x: f32[], y: f32[]) -> f32[] { %add (x: f32[], y: f32[]) -> f32[] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Imag HloModule Imag
ENTRY %Imag (x: c64[2,2]{0,1}) -> f32[2,2] { ENTRY %Imag (x: c64[2,2]{0,1}) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Iota HloModule Iota
ENTRY %Iota() -> s64[10, 5] { ENTRY %Iota() -> s64[10, 5] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s
HloModule AddSubtract HloModule AddSubtract
ENTRY %AddSubtract (x: s32[2,2], y: s32[2,2]) -> s32[2,2] { ENTRY %AddSubtract (x: s32[2,2], y: s32[2,2]) -> s32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Log HloModule Log
ENTRY %Log (x: f32[2,2]) -> f32[2,2] { ENTRY %Log (x: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Neg HloModule Neg
ENTRY %Neg (val: f32[2,2]) -> f32[2,2] { ENTRY %Neg (val: f32[2,2]) -> f32[2,2] {
%val = f32[2,2]{1,0} parameter(0) %val = f32[2,2]{1,0} parameter(0)

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Real HloModule Real
ENTRY %Real (x: c64[2,2]{0,1}) -> f32[2,2] { ENTRY %Real (x: c64[2,2]{0,1}) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule ReduceWindow HloModule ReduceWindow
%max (x: f32[], y: f32[]) -> f32[] { %max (x: f32[], y: f32[]) -> f32[] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Rem HloModule Rem
ENTRY %Rem(x: f32[2,2], y: f32[2,2]) -> f32[2,2] { ENTRY %Rem(x: f32[2,2], y: f32[2,2]) -> f32[2,2] {
%x = f32[2,2]{1,0} parameter(0) %x = f32[2,2]{1,0} parameter(0)

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Rsqrt HloModule Rsqrt
ENTRY %Rsqrt (x: f32[2,2]) -> f32[2,2] { ENTRY %Rsqrt (x: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Select HloModule Select
ENTRY %Select (p: pred[2,2], x: f32[2,2], y: f32[2,2]) -> f32[2,2] { ENTRY %Select (p: pred[2,2], x: f32[2,2], y: f32[2,2]) -> f32[2,2] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule SelectAndScatter HloModule SelectAndScatter
%ge (x: f32[], y: f32[]) -> pred[] { %ge (x: f32[], y: f32[]) -> pred[] {

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Sign HloModule Sign
ENTRY %Sign (val: f32[2,2]) -> f32[2,2] { ENTRY %Sign (val: f32[2,2]) -> f32[2,2] {
%val = f32[2,2]{1,0} parameter(0) %val = f32[2,2]{1,0} parameter(0)

View File

@ -1,4 +1,4 @@
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure // RUN: xla-gpu-opt %s | FileCheck %s
HloModule Sqrt HloModule Sqrt
ENTRY %Sqrt (x: f32[2,2]) -> f32[2,2] { ENTRY %Sqrt (x: f32[2,2]) -> f32[2,2] {

Some files were not shown because too many files have changed in this diff Show More