Drop more --dump-input-on-failure from FileCheck commands
This is a debug only option. Its presence does not affect test execution. Its functionality is made default by https://reviews.llvm.org/D81422 The option is no longer available. PiperOrigin-RevId: 315742913 Change-Id: Ie52ae57abe5fed324ce13f4b218cc031c8795dcb
This commit is contained in:
parent
c99a6fba17
commit
e66dabe79d
@ -1,5 +1,5 @@
|
||||
# RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=Bridge --debug_info=%s.debug.pbtxt 2>&1 | FileCheck %s -dump-input-on-failure
|
||||
# RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=None 2>&1 | FileCheck -check-prefix=OLD %s -dump-input-on-failure
|
||||
# RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=Bridge --debug_info=%s.debug.pbtxt 2>&1 | FileCheck %s
|
||||
# RUN: not tfcompile --graph=%s --config=%s.config.pbtxt --mlir_components=None 2>&1 | FileCheck -check-prefix=OLD %s
|
||||
|
||||
# Checks the error message produced by tfcompile with mlir_component
|
||||
# Checks that source debug information is used in the output error message and
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf_tfl_translate -tf-input-arrays=input -tf-input-shapes=1,1,1,256 -tf-input-data-types=DT_FLOAT -tf-inference-type=DT_QINT8 -tf-input-min-values='-33.614346' -tf-input-max-values='21.54917' -tf-output-arrays=output %s -o - --output-mlir 2>&1 | FileCheck --check-prefix=MLIR %s --dump-input-on-failure
|
||||
# RUN: tf_tfl_translate -tf-input-arrays=input -tf-input-shapes=1,1,1,256 -tf-input-data-types=DT_FLOAT -tf-inference-type=DT_QINT8 -tf-input-min-values='-33.614346' -tf-input-max-values='21.54917' -tf-output-arrays=output %s -o - --output-mlir 2>&1 | FileCheck --check-prefix=MLIR %s
|
||||
# RUN: tf_tfl_translate -tf-input-arrays=input -tf-input-shapes=1,1,1,256 -tf-input-data-types=DT_FLOAT -tf-inference-type=DT_QINT8 -tf-input-min-values='-33.614346' -tf-input-max-values='21.54917' -tf-output-arrays=output %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
node {
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf_tfl_translate -tf-input-arrays=unranked -tf-input-shapes=1,8,8,2 -tf-input-data-types=DT_INT32 -tf-output-arrays=unranked,static,static_10 %s -o - --output-mlir | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf_tfl_translate -tf-input-arrays=unranked -tf-input-shapes=1,8,8,2 -tf-input-data-types=DT_INT32 -tf-output-arrays=unranked,static,static_10 %s -o - --output-mlir | FileCheck %s
|
||||
|
||||
node {
|
||||
name: "tf.Const"
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<32x4x4x128xf32>, %arg1: tensor<1x32x42x128xf32>, %arg2: tensor<4xi32>) -> tensor<1x64x84x32xf32> {
|
||||
%0 = "tfl.custom"(%arg0, %arg1, %arg2) {custom_code = "Convolution2DTransposeBias", custom_option = opaque<"tfl", "0x010000000200000002000000"> : tensor<12xi8>} : (tensor<32x4x4x128xf32>, tensor<1x32x42x128xf32>, tensor<4xi32>) -> tensor<1x64x84x32xf32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir --use-external-constant - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir --use-external-constant - -o - | FileCheck %s
|
||||
// Ensure that `tfl.external_const` is imported when the flag `-use-external-constant` is enabled.
|
||||
|
||||
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s
|
||||
|
||||
// CHECK: %cst = constant unit
|
||||
// CHECK: %[[RES0:.*]] = "tfl.conv_2d"(%arg0, %arg1, %cst) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 0 : i32, stride_w = 0 : i32} : (tensor<256x32x32x3xf32>, tensor<16x3x3x3xf32>, none) -> tensor<256x32x32x16xf32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -input-arrays=squared_difference --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -input-arrays=squared_difference --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Tests -input-arrays flag.
|
||||
|
||||
func @main(%arg0: tensor<4xf32>) -> tensor<4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
// Ensure lstm roundtrip exactly
|
||||
|
||||
func @main(%arg0: tensor<1x4xf32>, %arg1: tensor<4x4xf32>, %arg2: tensor<4x4xf32>, %arg3: tensor<4x4xf32>, %arg4: tensor<4x4xf32>, %arg5: tensor<4x4xf32>, %arg6: tensor<4x4xf32>, %arg7: tensor<4x4xf32>, %arg8: tensor<4x4xf32>, %arg9: tensor<4xf32>, %arg10: tensor<4xf32>, %arg11: tensor<4xf32>, %arg12: tensor<1x4xf32>, %arg13: tensor<4xf32>, %arg14: tensor<4xf32>, %arg15: tensor<4xf32>, %arg16: tensor<4x4xf32>, %arg17: tensor<4xf32>, %arg18: tensor<4xf32>, %arg19: tensor<4xf32>, %arg20: tensor<4xf32>, %arg21: tensor<4xf32>) -> tensor<1x4xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
// Confirm a wide array of attribute survives the round-trip
|
||||
func @main(tensor<1x6x6x16xf32>) -> tensor<1x1x1x16xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,div,exp --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate -output-arrays=mul,div,exp --experimental-prune-unreachable-nodes-unconditionally --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
// CHECK: (%[[ARG:.*]]: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>)
|
||||
func @main(%arg0: tensor<4xf32>) -> tensor<4xf32> attributes {tf.entry_function = {inputs = "mul"}} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_translate --tflite-flatbuffer-to-mlir - -o - | FileCheck %s
|
||||
|
||||
// This test is to test for unranked function output from input, the output type should be compatible with input type.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tfl-lower-static-tensor-list %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tfl-lower-static-tensor-list %s | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: tensorlistConst
|
||||
func @tensorlistConst(%arg0 : tensor<1xi32>) -> tensor<2x3xi32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384xf32>, tensor<1x96xf32>) -> tensor<1x96xf32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-custom-ops -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-custom-ops -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
^bb0(%arg0: tensor<4xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> {
|
||||
^bb0(%arg0: tensor<1x224x224x3xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s --dump-input-on-failure
|
||||
// RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s
|
||||
|
||||
// CHECK: error: 'tf.MyCustomOp' op is neither a custom op nor a flex op
|
||||
// CHECK: error: failed while converting: 'main'
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s --dump-input-on-failure
|
||||
// RUN: not flatbuffer_translate -mlir-to-tflite-flatbuffer %s 2>&1 | FileCheck %s
|
||||
|
||||
// CHECK: error: 'tf.Div' op is neither a custom op nor a flex op
|
||||
// CHECK: error: failed while converting: 'main'
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
^bb0(%arg0: tensor<4xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops=true -emit-builtin-tflite-ops=false -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops=true -emit-builtin-tflite-ops=false -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4xf64>, tensor<4xf64>) -> tensor<4xf64> {
|
||||
^bb0(%arg0: tensor<4xf64>, %arg1: tensor<4xf64>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -emit-select-tf-ops -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4xf32>) -> tensor<4xf32> {
|
||||
^bb0(%arg0: tensor<4xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
^bb0(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> {
|
||||
^bb0(%arg0: tensor<40x37xf32>, %arg1: tensor<40x37xf32>):
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<1x528x!quant.uniform<i8:f32, 0.037248000502586365:-19>>, %arg1: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.059801999479532242>>, %arg2: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.031925998628139496>>, %arg3: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.056272000074386597>>, %arg4: tensor<2048x528x!quant.uniform<i8<-127:127>:f32, 0.063763998448848724>>, %arg5: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.013358999975025654>>, %arg6: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.022830000147223473>>, %arg7: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.032276000827550888>>, %arg8: tensor<2048x640x!quant.uniform<i8<-127:127>:f32, 0.035427000373601913>>, %arg9: tensor<2048x!quant.uniform<i32:f32, 4.2675782196965883E-7>>, %arg10: tensor<2048x!quant.uniform<i32:f32, 1.0742187583900886E-7>>, %arg11: tensor<2048x!quant.uniform<i32:f32, 1.6406249869760359E-7>>, %arg12: tensor<2048x!quant.uniform<i32:f32, 1.523437447303877E-7>>, %arg13: tensor<640x2048x!quant.uniform<i8<-127:127>:f32, 0.021174000576138496>>, %arg14: tensor<640x!quant.uniform<i32:f32, 1.601389680352559E-4>>, %arg15: tensor<2048x!quant.uniform<i16:f32, 4.3700000969693065E-4>>, %arg16: tensor<2048x!quant.uniform<i16:f32, 1.1000000085914508E-4>>, %arg17: tensor<2048x!quant.uniform<i16:f32, 1.6799999866634607E-4>>, %arg18: tensor<2048x!quant.uniform<i16:f32, 1.55999994603917E-4>>, %arg19: tensor<1x640x!quant.uniform<i8:f32, 0.09671100229024887:10>>, %arg20: tensor<1x2048x!quant.uniform<i16:f32, 4.8799999058246613E-4>>) -> tensor<1x640x!quant.uniform<i8:f32, 0.09671100229024887:10>> {
|
||||
%cst = constant unit
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
module attributes {
|
||||
tfl.metadata = {key1 = "value1", key2 = "value2"}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
// CHECK: {
|
||||
// CHECK-NEXT: version: 3,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
module {
|
||||
func @serving_default(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> attributes {tf.entry_function = {inputs = "serving_default_x", outputs = "outputs"}} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
// CHECK: {
|
||||
// CHECK-NEXT: version: 3,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<4xi32>, %arg1: tensor<32x4x4x128xf32>, %arg2: tensor<1x32x42x128xf32>) -> tensor<1x64x84x32xf32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>) -> tensor<4x4xf32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
|
||||
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck %s
|
||||
|
||||
func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) -> tensor<4 x f32> {
|
||||
// CHECK: {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @tfAssertTrue
|
||||
func @tfAssertTrue(%arg0: tensor<1x1x6x2xf32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -tf-device-cluster-formation | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -tf-device-cluster-formation | FileCheck %s
|
||||
|
||||
// Simple case, single device cluster.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -tf-device-cluster-outlining | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -tf-device-cluster-outlining | FileCheck %s
|
||||
|
||||
// Tests simple case of a single `tf_device.cluster`.
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-output-arrays=p,x -o - | FileCheck %s --check-prefix=NONE --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-input-data-types=DT_INT32,DT_BOOL -tf-output-arrays=p,x -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-output-arrays=p,x -o - | FileCheck %s --check-prefix=NONE
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -tf-input-arrays=p,x -tf-input-shapes=:1 -tf-input-data-types=DT_INT32,DT_BOOL -tf-output-arrays=p,x -o - | FileCheck %s
|
||||
|
||||
# Test the handling of the input data types. In particular, if the data type
|
||||
# for an input graph node is specified via command line options, use it.
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s
|
||||
|
||||
node {
|
||||
name: "custom_relu_func_call"
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir -tf-enable-shape-inference-on-import=false %s -o - | FileCheck %s
|
||||
|
||||
# Verify that the data_format attributes is pulled from the default value in the
|
||||
# registry when not present in the GraphDef
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -o - | FileCheck %s
|
||||
|
||||
node {
|
||||
name: "TensorListReserve/num_elements"
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-output-arrays=BatchDatasetV2 -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-output-arrays=BatchDatasetV2 -o - | FileCheck %s
|
||||
|
||||
# CHECK-LABEL: func @main() -> tensor<*x!tf.variant>
|
||||
# CHECK: %[[tensor_slice:.*]], %[[tensor_slice_control:.*]] = tf_executor.island wraps "tf.TensorSliceDataset"
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
// Check that attributes that define derived shapes are exported.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
// CHECK: op: "Split"
|
||||
// CHECK: attr {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main() -> tensor<*x!tf.resource> attributes {tf.entry_function = {inputs = "", outputs = "func_call"}} {
|
||||
%0 = tf_executor.graph {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main() {
|
||||
tf_executor.graph {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<10xi32>) -> tensor<10xi32>
|
||||
attributes {tf.entry_function = {inputs = "input0", outputs = "output0"}} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 175 : i32}} {
|
||||
func @main(%arg0: tensor<32x!tf.string>) -> (tensor<?x2xi64>) attributes {tf.entry_function = {inputs = "input0", outputs = "ParseExample/ParseExampleV2"}} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<10xi32>, %arg1: tensor<10xi32>) -> tensor<10xi32>
|
||||
attributes {tf.entry_function = {inputs = "foo,bar", outputs = "Add"}} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main() {
|
||||
tf_executor.graph {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main(%arg0: tensor<10xi32>, %arg1: tensor<10xi32>) -> tensor<10xi32>
|
||||
attributes {tf.entry_function = {inputs = "input0,input1", outputs = "Add"}} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -tf-op-fusion | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tf-op-fusion | FileCheck %s
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Conv2D + BiasAdd + <Activation> fusions.
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-resources-to-args | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-resources-to-args | FileCheck %s
|
||||
|
||||
// One resource, one read. The initial value of the resource is read.
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<i1>, %arg1: tensor<f32> {tf.resource_name = "x"}) -> tensor<2xf32>
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-var-handles-to-args | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-promote-var-handles-to-args | FileCheck %s
|
||||
|
||||
// Tests main function with multiple blocks.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-resource-op-lifting | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-resource-op-lifting | FileCheck %s
|
||||
|
||||
// Tests that resource load operations are hoisted.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tf-rewrite-tpu-embedding-ops %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -tf-rewrite-tpu-embedding-ops %s | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @recv_tpu_embedding_activations
|
||||
func @recv_tpu_embedding_activations() -> (tensor<512x256xf32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-stack-ops-decomposition | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-stack-ops-decomposition | FileCheck %s
|
||||
|
||||
// Tests simple scalar stack operations without control flow.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-array-ops-decomposition | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-array-ops-decomposition | FileCheck %s
|
||||
|
||||
// Test read and write on a tensor list.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-list-ops-decomposition | FileCheck %s -dump-input-on-failure
|
||||
// RUN: tf-opt %s -split-input-file -verify-diagnostics -tf-tensor-list-ops-decomposition | FileCheck %s
|
||||
|
||||
// Test push and pop on a tensor list which is initially empty.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s --dump-input-on-failure < %t
|
||||
// RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s < %t
|
||||
|
||||
module {
|
||||
// CHECK-LABEL: fuse_map_and_batch
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s --dump-input-on-failure < %t
|
||||
// RUN: tf-opt -tf-standard-pipeline -tf-data-optimization %s -o %t && FileCheck %s < %t
|
||||
|
||||
module {
|
||||
// CHECK-LABEL: fuse_pmap_and_batch
|
||||
|
@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
# RUN: %p/multi_arguments_results_v1 | FileCheck -dump-input-on-failure %s
|
||||
# RUN: %p/multi_arguments_results_v1 | FileCheck %s
|
||||
|
||||
# pylint: disable=missing-docstring,line-too-long
|
||||
from __future__ import absolute_import
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -tf-tpu-outside-compilation-cluster | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tf-tpu-outside-compilation-cluster | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @one_cluster_no_dependencies
|
||||
func @one_cluster_no_dependencies() {
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf_tfjs_translate %s -tf-input-arrays=input0,input1 -tf-input-data-types=DT_INT32,DT_INT32 -tf-input-shapes=10:10 -tf-output-arrays=Mul -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf_tfjs_translate %s -tf-input-arrays=input0,input1 -tf-input-data-types=DT_INT32,DT_INT32 -tf-input-shapes=10:10 -tf-output-arrays=Mul -o - | FileCheck %s
|
||||
# Add two tensor<4xi32> inputs and return the result
|
||||
|
||||
node {
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf_tfjs_translate %s -tf-input-arrays=input0 -tf-input-data-types=DT_FLOAT -tf-input-shapes=10 -tf-output-arrays=Add -tf-custom-opdefs="name: 'Prelu' input_arg: { name: 'x' type: DT_FLOAT } input_arg: { name: 'alpha' type: DT_FLOAT } output_arg: { name: 'c' type: DT_FLOAT }" -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf_tfjs_translate %s -tf-input-arrays=input0 -tf-input-data-types=DT_FLOAT -tf-input-shapes=10 -tf-output-arrays=Add -tf-custom-opdefs="name: 'Prelu' input_arg: { name: 'x' type: DT_FLOAT } input_arg: { name: 'alpha' type: DT_FLOAT } output_arg: { name: 'c' type: DT_FLOAT }" -o - | FileCheck %s
|
||||
# Add two tensor<4xi32> inputs and return the result
|
||||
|
||||
node {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -split-input-file -verify-diagnostics -tfl-runtime-verify %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -split-input-file -verify-diagnostics -tfl-runtime-verify %s | FileCheck %s
|
||||
|
||||
// -----
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
// Run optimize pass only and check the results.
|
||||
// RUN: tf-opt %s -tfjs-optimize | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -tfjs-optimize | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: prelu_fusion
|
||||
func @prelu_fusion(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt -xla-legalize-tf=allow-partial-conversion %s | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt -xla-legalize-tf=allow-partial-conversion %s | FileCheck %s
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// tf.BatchMatMulV2 op legalizations.
|
||||
|
@ -4,7 +4,7 @@
|
||||
// Lowering to STD dialect and store forwarding pass would be required to get
|
||||
// rid of them. This is exactly what is done in the real MLIR GPU pipeline, but
|
||||
// here we disable verification with `verify-each=0` to check the output IR.
|
||||
// RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize --verify-each=0 | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize --verify-each=0 | FileCheck %s
|
||||
|
||||
func @select_and_scatter(%arg: memref<112x112xf32>,
|
||||
%src: memref<56x56xf32>,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize -split-input-file | FileCheck %s --dump-input-on-failure
|
||||
// RUN: xla-opt %s -lhlo-legalize-to-parallel-loops -canonicalize -split-input-file | FileCheck %s
|
||||
|
||||
func @reduce(%arg: memref<100x10x5xf32>,
|
||||
%init: memref<f32>,
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-opt %s -xla-hlo-fusion -split-input-file | FileCheck %s --dump-input-on-failure
|
||||
// RUN: tf-opt %s -xla-hlo-fusion -split-input-file | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func @multi_outputs_same
|
||||
func @multi_outputs_same(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> (tensor<?x?xf32>, tensor<?x?xf32>) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Abs
|
||||
ENTRY %Abs (val: f32[2,2]) -> f32[2,2] {
|
||||
%val = f32[2,2]{1,0} parameter(0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Add
|
||||
|
||||
ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt -lowering-stage=KERNEL %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt -lowering-stage=KERNEL %s | FileCheck %s
|
||||
HloModule Add
|
||||
|
||||
ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s
|
||||
HloModule Add
|
||||
|
||||
ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule AddMultiply
|
||||
|
||||
ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s
|
||||
HloModule AddMultiply
|
||||
|
||||
ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule AddReduce
|
||||
|
||||
%add (x: f32[], y: f32[]) -> f32[] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Broadcast
|
||||
|
||||
ENTRY %Broadcast (x: f32[10]) -> f32[10, 5] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt -verify-errors %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt -verify-errors %s | FileCheck %s
|
||||
HloModule Add
|
||||
|
||||
ENTRY %Add (x: f32[2,2,2], y: f32[2,2,2]) -> f32[2,2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Ceil
|
||||
ENTRY %Ceil (val: f32[2,2]) -> f32[2,2] {
|
||||
%val = f32[2,2]{1,0} parameter(0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Compare
|
||||
|
||||
ENTRY %Compare (x: f32[2,2], y: f32[2,2]) -> pred[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Complex
|
||||
|
||||
ENTRY %Complex (real: f32[2,2]{0,1}, imag: f32[2,2]{0,1}) -> c64[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Concatenate
|
||||
|
||||
ENTRY %Concatenate (x: f32[2,3], y: f32[2,2]) -> f32[2,5] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Const
|
||||
|
||||
ENTRY %Const () -> s32[100] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Copy
|
||||
|
||||
ENTRY %Copy (x: f32[2,4]) -> f32[2,4] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule CopyTranspose
|
||||
|
||||
ENTRY %CopyTranspose (x: f32[2,4]) -> f32[2,4]{0,1} {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Cos
|
||||
ENTRY %Cos (val: f32[2,2]) -> f32[2,2] {
|
||||
%val = f32[2,2]{1,0} parameter(0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Exp
|
||||
|
||||
ENTRY %Exp (x: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule FusedReduce
|
||||
|
||||
%add (x: f32[], y: f32[]) -> f32[] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Imag
|
||||
|
||||
ENTRY %Imag (x: c64[2,2]{0,1}) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Iota
|
||||
|
||||
ENTRY %Iota() -> s64[10, 5] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt -lowering-stage=GPU %s | FileCheck %s
|
||||
HloModule AddSubtract
|
||||
|
||||
ENTRY %AddSubtract (x: s32[2,2], y: s32[2,2]) -> s32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Log
|
||||
|
||||
ENTRY %Log (x: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Neg
|
||||
ENTRY %Neg (val: f32[2,2]) -> f32[2,2] {
|
||||
%val = f32[2,2]{1,0} parameter(0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Real
|
||||
|
||||
ENTRY %Real (x: c64[2,2]{0,1}) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule ReduceWindow
|
||||
|
||||
%max (x: f32[], y: f32[]) -> f32[] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Rem
|
||||
ENTRY %Rem(x: f32[2,2], y: f32[2,2]) -> f32[2,2] {
|
||||
%x = f32[2,2]{1,0} parameter(0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Rsqrt
|
||||
|
||||
ENTRY %Rsqrt (x: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Select
|
||||
|
||||
ENTRY %Select (p: pred[2,2], x: f32[2,2], y: f32[2,2]) -> f32[2,2] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule SelectAndScatter
|
||||
|
||||
%ge (x: f32[], y: f32[]) -> pred[] {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Sign
|
||||
ENTRY %Sign (val: f32[2,2]) -> f32[2,2] {
|
||||
%val = f32[2,2]{1,0} parameter(0)
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s -dump-input-on-failure
|
||||
// RUN: xla-gpu-opt %s | FileCheck %s
|
||||
HloModule Sqrt
|
||||
|
||||
ENTRY %Sqrt (x: f32[2,2]) -> f32[2,2] {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user