Integrate LLVM at llvm/llvm-project@1cbf8e89b5
Updates LLVM usage to match [1cbf8e89b54d](https://github.com/llvm/llvm-project/commit/1cbf8e89b54d) PiperOrigin-RevId: 341867974 Change-Id: Iaf70623266ce86b87a31aa6bfbe65f2659acb5ea
This commit is contained in:
parent
094af07eda
commit
28835e4103
@ -411,11 +411,11 @@ versions {
|
||||
# CHECK-NEXT: constant dense<[5.000000e+00, 6.000000e+00, 7.000000e+00, 8.000000e+00]>
|
||||
# CHECK: "tf.If"{{.+}}else_branch = @cond_false_10{{.+}}is_stateless = true{{.+}}then_branch = @cond_true_10
|
||||
# CHECK: "tf.If"{{.+}}else_branch = @cond_false0{{.+}}is_stateless = false{{.+}}then_branch = @cond_true0
|
||||
# CHECK: func @cond_false_10
|
||||
# CHECK: func private @cond_false_10
|
||||
# CHECK-NEXT: tfl.div
|
||||
# CHECK: func @cond_true_10
|
||||
# CHECK: func private @cond_true_10
|
||||
# CHECK-NEXT: tfl.sub
|
||||
# CHECK: func @cond_false0
|
||||
# CHECK: func private @cond_false0
|
||||
# CHECK-NEXT: tfl.mul
|
||||
# CHECK: func @cond_true0
|
||||
# CHECK: func private @cond_true0
|
||||
# CHECK-NEXT: tfl.add
|
||||
|
@ -5,8 +5,8 @@
|
||||
// CHECK: func @main(%arg0: tensor<1xf32>) -> tensor<*xf32>
|
||||
// CHECK: %0 = "tf.While"(%arg0) {body = @body, cond = @cond, is_stateless = false} : (tensor<1xf32>) -> tensor<*xf32>
|
||||
// CHECK: return %0 : tensor<*xf32>
|
||||
// CHECK: func @cond(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK: func @body(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK: func private @cond(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK: func private @body(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
|
||||
func @main(%arg0: tensor<1xf32>) -> tensor<*xf32> {
|
||||
%0 = "tf.While"(%arg0) {cond = @cond, body = @body, is_stateless = false} : (tensor<1xf32>) -> tensor<*xf32>
|
||||
|
@ -1,6 +1,6 @@
|
||||
// RUN: tf-opt -tfl-prepare-composite-funcs-tf -tfl-fuse-tftext=true %s | FileCheck %s
|
||||
|
||||
func @whitespace_tokenizer_rank1(%arg0: tensor<1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>) attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<1>], tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf.signature.is_stateful} {
|
||||
func private @whitespace_tokenizer_rank1(%arg0: tensor<1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>) attributes {tf._input_shapes = [#tf.shape<1>], tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<[0, 1]> : tensor<2xi64>} : () -> tensor<2xi64>
|
||||
%1 = "tf.Const"() {value = dense<[]> : tensor<0xi64>} : () -> tensor<0xi64>
|
||||
%2 = "tf.Const"() {value = dense<true> : tensor<i1>} : () -> tensor<i1>
|
||||
@ -1026,11 +1026,11 @@ func @WhitespaceTokenize_RaggedGather_1_Assert_3_AssertGuard_true_23810(%arg0: t
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
|
||||
// CHECK: func @whitespace_tokenizer_rank1(%arg0: tensor<1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>) attributes {sym_visibility = "private", tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf._input_shapes = [#tf.shape<1>], tf.signature.is_stateful} {
|
||||
// CHECK: func private @whitespace_tokenizer_rank1(%arg0: tensor<1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>) attributes {tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf._input_shapes = [#tf.shape<1>], tf.signature.is_stateful} {
|
||||
// CHECK: %0:2 = "tfl.custom"(%arg0) {custom_code = "tftext:WhitespaceTokenizer", custom_option = opaque<"tfl", "0x"> : tensor<0xi8>} : (tensor<1x!tf.string>) -> (tensor<?x!tf.string>, tensor<?xi64>)
|
||||
// CHECK: return %0#0, %0#1 : tensor<?x!tf.string>, tensor<?xi64>
|
||||
|
||||
func @whitespace_tokenizer_rank2(%arg0: tensor<?x1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>, tensor<?xi64>) attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<?x1>], tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf.signature.is_stateful} {
|
||||
func private @whitespace_tokenizer_rank2(%arg0: tensor<?x1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>, tensor<?xi64>) attributes {tf._input_shapes = [#tf.shape<?x1>], tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<[]> : tensor<0xi64>} : () -> tensor<0xi64>
|
||||
%1 = "tf.Const"() {value = dense<true> : tensor<i1>} : () -> tensor<i1>
|
||||
%2 = "tf.Const"() {value = dense<-1> : tensor<i32>} : () -> tensor<i32>
|
||||
@ -2160,11 +2160,11 @@ func @WhitespaceTokenize_WhitespaceTokenize_WhitespaceTokenize_RaggedGather_1_As
|
||||
|
||||
|
||||
|
||||
// CHECK: func @whitespace_tokenizer_rank2(%arg0: tensor<?x1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>, tensor<?xi64>) attributes {sym_visibility = "private", tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf._input_shapes = [#tf.shape<?x1>], tf.signature.is_stateful} {
|
||||
// CHECK: func private @whitespace_tokenizer_rank2(%arg0: tensor<?x1x!tf.string> {tf._user_specified_name = "input"}) -> (tensor<?x!tf.string>, tensor<?xi64>, tensor<?xi64>) attributes {tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf._input_shapes = [#tf.shape<?x1>], tf.signature.is_stateful} {
|
||||
// CHECK: %0:3 = "tfl.custom"(%arg0) {custom_code = "tftext:WhitespaceTokenizer", custom_option = opaque<"tfl", "0x"> : tensor<0xi8>} : (tensor<?x1x!tf.string>) -> (tensor<?x!tf.string>, tensor<?xi64>, tensor<?xi64>)
|
||||
// CHECK: return %0#0, %0#1, %0#2 : tensor<?x!tf.string>, tensor<?xi64>, tensor<?xi64>
|
||||
|
||||
func @whitespace_tokenizer_rank0(%arg0: tensor<!tf.string> {tf._user_specified_name = "input"}) -> tensor<?x!tf.string> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>], tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf.signature.is_stateful} {
|
||||
func private @whitespace_tokenizer_rank0(%arg0: tensor<!tf.string> {tf._user_specified_name = "input"}) -> tensor<?x!tf.string> attributes {tf._input_shapes = [#tf.shape<>], tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<[0, 1]> : tensor<2xi64>} : () -> tensor<2xi64>
|
||||
%1 = "tf.Const"() {value = dense<[]> : tensor<0xi64>} : () -> tensor<0xi64>
|
||||
%2 = "tf.Const"() {value = dense<true> : tensor<i1>} : () -> tensor<i1>
|
||||
@ -3190,7 +3190,7 @@ func @WhitespaceTokenize_WhitespaceTokenize_RaggedGather_1_Assert_3_AssertGuard_
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
|
||||
// CHECK: func @whitespace_tokenizer_rank0(%arg0: tensor<!tf.string> {tf._user_specified_name = "input"}) -> tensor<?x!tf.string> attributes {sym_visibility = "private", tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf._input_shapes = [#tf.shape<>], tf.signature.is_stateful} {
|
||||
// CHECK: func private @whitespace_tokenizer_rank0(%arg0: tensor<!tf.string> {tf._user_specified_name = "input"}) -> tensor<?x!tf.string> attributes {tf._implements = #tf.func<@"tftext:WhitespaceTokenizer", {}>, tf._input_shapes = [#tf.shape<>], tf.signature.is_stateful} {
|
||||
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "tftext:WhitespaceTokenizer", custom_option = opaque<"tfl", "0x"> : tensor<0xi8>} : (tensor<!tf.string>) -> tensor<?x!tf.string>
|
||||
// CHECK: return %0 : tensor<?x!tf.string>
|
||||
|
||||
@ -3213,7 +3213,7 @@ func @ngrams(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "input"}) ->
|
||||
// CHECK: return %0 : tensor<?x!tf.string>
|
||||
// CHECK: }
|
||||
|
||||
func @ngrams_ragged_rank_2(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<3xi64> {tf._user_specified_name = "args_0"}, %arg2: tensor<?xi64> {tf._user_specified_name = "args_1"}) -> (tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>) attributes {sym_visibility = "private", tf._implements = #tf.func<@"tftext:Ngrams", {axis = -1 : i64, reduction_type = "STRING_JOIN", string_separator = "", width = 2 : i64}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<3>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
func private @ngrams_ragged_rank_2(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<3xi64> {tf._user_specified_name = "args_0"}, %arg2: tensor<?xi64> {tf._user_specified_name = "args_1"}) -> (tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>) attributes {tf._implements = #tf.func<@"tftext:Ngrams", {axis = -1 : i64, reduction_type = "STRING_JOIN", string_separator = "", width = 2 : i64}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<3>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<-1> : tensor<i32>} : () -> tensor<i32>
|
||||
%1 = "tf.Const"() {value = dense<-1> : tensor<i64>} : () -> tensor<i64>
|
||||
%2 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
|
||||
@ -3330,12 +3330,12 @@ func @ngrams_ragged_rank_2(%arg0: tensor<?x!tf.string> {tf._user_specified_name
|
||||
%71 = "tf.Identity"(%70) {device = ""} : (tensor<3xi64>) -> tensor<3xi64>
|
||||
return %68, %71, %64 : tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_true_27770(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_true_27770(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
%1 = "tf.Identity"(%0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_false_27780(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_false_27780(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<"Arguments to from_row_splits do not form a valid RaggedTensor:zero"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%1 = "tf.Const"() {value = dense<"Condition x == y did not hold element-wise:"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%2 = "tf.Const"() {value = dense<"x (RaggedFromNestedRowSplits/RaggedFromRowSplits/RowPartitionFromRowSplits/strided_slice:0) = "> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
@ -3345,12 +3345,12 @@ func @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_as
|
||||
%5 = "tf.Identity"(%4) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %5 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_true_28130(%arg0: tensor<i1>, %arg1: tensor<?xi64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<?>]} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_true_28130(%arg0: tensor<i1>, %arg1: tensor<?xi64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<?>]} {
|
||||
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
%1 = "tf.Identity"(%0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_false_28140(%arg0: tensor<i1>, %arg1: tensor<?xi64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_false_28140(%arg0: tensor<i1>, %arg1: tensor<?xi64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<"Arguments to from_row_splits do not form a valid RaggedTensor:monotonic"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%1 = "tf.Const"() {value = dense<"Condition x >= 0 did not hold element-wise:"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%2 = "tf.Const"() {value = dense<"x (RaggedFromNestedRowSplits/RaggedFromRowSplits/RowPartitionFromRowSplits/sub:0) = "> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
@ -3359,12 +3359,12 @@ func @RaggedFromNestedRowSplits_RaggedFromRowSplits_RowPartitionFromRowSplits_as
|
||||
%4 = "tf.Identity"(%3) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %4 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_assert_equal_1_Assert_AssertGuard_true_28500(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_assert_equal_1_Assert_AssertGuard_true_28500(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
%1 = "tf.Identity"(%0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_assert_equal_1_Assert_AssertGuard_false_28510(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_assert_equal_1_Assert_AssertGuard_false_28510(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<"Arguments to _from_row_partition do not form a valid RaggedTensor"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%1 = "tf.Const"() {value = dense<"Condition x == y did not hold element-wise:"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%2 = "tf.Const"() {value = dense<"x (RaggedFromNestedRowSplits/RaggedFromRowSplits/strided_slice_1:0) = "> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
@ -3374,12 +3374,12 @@ func @RaggedFromNestedRowSplits_RaggedFromRowSplits_assert_equal_1_Assert_Assert
|
||||
%5 = "tf.Identity"(%4) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %5 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_true_28900(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_true_28900(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
%1 = "tf.Identity"(%0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_false_28910(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_equal_1_Assert_AssertGuard_false_28910(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<"Arguments to from_row_splits do not form a valid RaggedTensor:zero"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%1 = "tf.Const"() {value = dense<"Condition x == y did not hold element-wise:"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%2 = "tf.Const"() {value = dense<"x (RaggedFromNestedRowSplits/RaggedFromRowSplits_1/RowPartitionFromRowSplits/strided_slice:0) = "> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
@ -3389,12 +3389,12 @@ func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_
|
||||
%5 = "tf.Identity"(%4) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %5 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_true_29260(%arg0: tensor<i1>, %arg1: tensor<2xi64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<2>]} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_true_29260(%arg0: tensor<i1>, %arg1: tensor<2xi64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<2>]} {
|
||||
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
%1 = "tf.Identity"(%0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_false_29270(%arg0: tensor<i1>, %arg1: tensor<2xi64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<2>], tf.signature.is_stateful} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_assert_non_negative_assert_less_equal_Assert_AssertGuard_false_29270(%arg0: tensor<i1>, %arg1: tensor<2xi64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<2>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<"Arguments to from_row_splits do not form a valid RaggedTensor:monotonic"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%1 = "tf.Const"() {value = dense<"Condition x >= 0 did not hold element-wise:"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%2 = "tf.Const"() {value = dense<"x (RaggedFromNestedRowSplits/RaggedFromRowSplits_1/RowPartitionFromRowSplits/sub:0) = "> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
@ -3403,12 +3403,12 @@ func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_RowPartitionFromRowSplits_
|
||||
%4 = "tf.Identity"(%3) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %4 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_assert_equal_1_Assert_AssertGuard_true_29650(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_assert_equal_1_Assert_AssertGuard_true_29650(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>]} {
|
||||
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
%1 = "tf.Identity"(%0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_assert_equal_1_Assert_AssertGuard_false_29660(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
func private @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_assert_equal_1_Assert_AssertGuard_false_29660(%arg0: tensor<i1>, %arg1: tensor<i64>, %arg2: tensor<i64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<>, #tf.shape<>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<"Arguments to _from_row_partition do not form a valid RaggedTensor"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%1 = "tf.Const"() {value = dense<"Condition x == y did not hold element-wise:"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%2 = "tf.Const"() {value = dense<"x (RaggedFromNestedRowSplits/RaggedFromRowSplits_1/strided_slice:0) = "> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
@ -3418,12 +3418,12 @@ func @RaggedFromNestedRowSplits_RaggedFromRowSplits_1_assert_equal_1_Assert_Asse
|
||||
%5 = "tf.Identity"(%4) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %5 : tensor<i1>
|
||||
}
|
||||
func @NGrams_SlidingWindow_RaggedConcat_assert_equal_2_Assert_AssertGuard_true_30330(%arg0: tensor<i1>, %arg1: tensor<?xi64>, %arg2: tensor<?xi64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<?>, #tf.shape<?>]} {
|
||||
func private @NGrams_SlidingWindow_RaggedConcat_assert_equal_2_Assert_AssertGuard_true_30330(%arg0: tensor<i1>, %arg1: tensor<?xi64>, %arg2: tensor<?xi64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<?>, #tf.shape<?>]} {
|
||||
%0 = "tf.Identity"(%arg0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
%1 = "tf.Identity"(%0) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %1 : tensor<i1>
|
||||
}
|
||||
func @NGrams_SlidingWindow_RaggedConcat_assert_equal_2_Assert_AssertGuard_false_30340(%arg0: tensor<i1>, %arg1: tensor<?xi64>, %arg2: tensor<?xi64>) -> tensor<i1> attributes {sym_visibility = "private", tf._input_shapes = [#tf.shape<>, #tf.shape<?>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
func private @NGrams_SlidingWindow_RaggedConcat_assert_equal_2_Assert_AssertGuard_false_30340(%arg0: tensor<i1>, %arg1: tensor<?xi64>, %arg2: tensor<?xi64>) -> tensor<i1> attributes {tf._input_shapes = [#tf.shape<>, #tf.shape<?>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<"Inputs must have identical ragged splits"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%1 = "tf.Const"() {value = dense<"Condition x == y did not hold element-wise:"> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
%2 = "tf.Const"() {value = dense<"x (NGrams/SlidingWindow/RaggedGetItem/RaggedRange:0) = "> : tensor<!tf.string>} : () -> tensor<!tf.string>
|
||||
@ -3433,12 +3433,12 @@ func @NGrams_SlidingWindow_RaggedConcat_assert_equal_2_Assert_AssertGuard_false_
|
||||
%5 = "tf.Identity"(%4) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %5 : tensor<i1>
|
||||
}
|
||||
// CHECK: func @ngrams_ragged_rank_2(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<3xi64> {tf._user_specified_name = "args_0"}, %arg2: tensor<?xi64> {tf._user_specified_name = "args_1"}) -> (tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>) attributes {sym_visibility = "private", tf._implements = #tf.func<@"tftext:Ngrams", {axis = -1 : i64, reduction_type = "STRING_JOIN", string_separator = "", width = 2 : i64}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<3>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
// CHECK: func private @ngrams_ragged_rank_2(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<3xi64> {tf._user_specified_name = "args_0"}, %arg2: tensor<?xi64> {tf._user_specified_name = "args_1"}) -> (tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>) attributes {tf._implements = #tf.func<@"tftext:Ngrams", {axis = -1 : i64, reduction_type = "STRING_JOIN", string_separator = "", width = 2 : i64}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<3>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
// CHECK: %0:3 = "tfl.custom"(%arg0, %arg1, %arg2) {custom_code = "tftext:Ngrams", custom_option = opaque<"tfl", "0x776964746800737472696E675F736570617261746F720000006178697300726564756374696F6E5F74797065000B535452494E475F4A4F494E0004221E373E040104FF152C0204141404082401"> : tensor<77xi8>} : (tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>) -> (tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>)
|
||||
// CHECK: return %0#0, %0#1, %0#2 : tensor<?x!tf.string>, tensor<3xi64>, tensor<?xi64>
|
||||
|
||||
|
||||
func @sgnn_projection(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<?xi64> {tf._user_specified_name = "row_splits"}) -> tensor<?x10xf64> attributes {sym_visibility = "private", tf._implements = #tf.func<@"tftext:custom:SgnnProjection", {buckets = 2147483647 : i64, hash_seed = [1902835825, -1475704015, 473120514, 1254202069, 1558833093, 1756181982, 1906603252, -1034142694, 542842690, 535515822]}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
func private @sgnn_projection(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<?xi64> {tf._user_specified_name = "row_splits"}) -> tensor<?x10xf64> attributes {tf._implements = #tf.func<@"tftext:custom:SgnnProjection", {buckets = 2147483647 : i64, hash_seed = [1902835825, -1475704015, 473120514, 1254202069, 1558833093, 1756181982, 1906603252, -1034142694, 542842690, 535515822]}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
%0 = "tf.Const"() {value = dense<[[1902835825], [-1475704015], [473120514], [1254202069], [1558833093], [1756181982], [1906603252], [-1034142694], [542842690], [535515822]]> : tensor<10x1xi64>} : () -> tensor<10x1xi64>
|
||||
%1 = "tf.StringToHashBucketFast"(%arg0) {device = "", num_buckets = 2147483647 : i64} : (tensor<?x!tf.string>) -> tensor<?xi64>
|
||||
%2 = "tf.Sgnn"(%1, %0) {device = ""} : (tensor<?xi64>, tensor<10x1xi64>) -> tensor<10x?xf64>
|
||||
@ -3448,6 +3448,6 @@ func @sgnn_projection(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "va
|
||||
}
|
||||
|
||||
|
||||
// CHECK: func @sgnn_projection(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<?xi64> {tf._user_specified_name = "row_splits"}) -> tensor<?x10xf64> attributes {sym_visibility = "private", tf._implements = #tf.func<@"tftext:custom:SgnnProjection", {buckets = 2147483647 : i64, hash_seed = [1902835825, -1475704015, 473120514, 1254202069, 1558833093, 1756181982, 1906603252, -1034142694, 542842690, 535515822]}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
// CHECK: func private @sgnn_projection(%arg0: tensor<?x!tf.string> {tf._user_specified_name = "values"}, %arg1: tensor<?xi64> {tf._user_specified_name = "row_splits"}) -> tensor<?x10xf64> attributes {tf._implements = #tf.func<@"tftext:custom:SgnnProjection", {buckets = 2147483647 : i64, hash_seed = [1902835825, -1475704015, 473120514, 1254202069, 1558833093, 1756181982, 1906603252, -1034142694, 542842690, 535515822]}>, tf._input_shapes = [#tf.shape<?>, #tf.shape<?>], tf.signature.is_stateful} {
|
||||
// CHECK: %0 = "tfl.custom"(%arg0, %arg1) {custom_code = "tftext:custom:SgnnProjection", custom_option = opaque<"tfl", "0x686173685F736565640000000A00000071F86A71318B0AA8023F331CD59AC14AC5E7E95CDE35AD68F474A4711A3C5CC2421F5B20AE52EB1F6275636B6574730002094200030000000100000002000000FFFFFF7F44000000062E0A2601"> : tensor<93xi8>} : (tensor<?x!tf.string>, tensor<?xi64>) -> tensor<?x10xf64>
|
||||
// CHECK: return %0 : tensor<?x10xf64>
|
||||
|
@ -30,9 +30,9 @@ func @while() -> tensor<1xf32>
|
||||
}) : (tensor<i32>, tensor<1xf32>) -> (tensor<i32>, tensor<1xf32>) loc("WhileOp")
|
||||
return %0#1 : tensor<1xf32>
|
||||
}
|
||||
// CHECK-LABEL: func @WhileOp_cond(
|
||||
// CHECK-LABEL: func private @WhileOp_cond(
|
||||
// CHECK: tfl.greater
|
||||
// CHECK-LABEL: func @WhileOp_body(
|
||||
// CHECK-LABEL: func private @WhileOp_body(
|
||||
// CHECK: tfl.sub
|
||||
// CHECK: tfl.add
|
||||
|
||||
@ -63,21 +63,21 @@ func @while2(%cst : tensor<i32>) -> tensor<1xf32> attributes {tf.entry_function
|
||||
return %0#1 : tensor<1xf32>
|
||||
}
|
||||
|
||||
func @WhileOp_cond(%arg0: tensor<*xi32>, %arg1: tensor<*xf32>, %arg2: tensor<i32>) -> tensor<i1> attributes {sym_visibility = "private"} {
|
||||
func private @WhileOp_cond(%arg0: tensor<*xi32>, %arg1: tensor<*xf32>, %arg2: tensor<i32>) -> tensor<i1> {
|
||||
%cst = constant dense<0> : tensor<i32>
|
||||
%0 = "tfl.greater"(%arg0, %cst) : (tensor<*xi32>, tensor<i32>) -> tensor<i1>
|
||||
return %0 : tensor<i1>
|
||||
}
|
||||
|
||||
func @WhileOp_body(%arg0: tensor<*xi32>, %arg1: tensor<*xf32>, %arg2: tensor<i32>) -> (tensor<*xi32>, tensor<*xf32>, tensor<i32>) attributes {sym_visibility = "private"} {
|
||||
func private @WhileOp_body(%arg0: tensor<*xi32>, %arg1: tensor<*xf32>, %arg2: tensor<i32>) -> (tensor<*xi32>, tensor<*xf32>, tensor<i32>) {
|
||||
%0 = "tfl.sub"(%arg0, %arg2) {fused_activation_function = "NONE"} : (tensor<*xi32>, tensor<i32>) -> tensor<*xi32>
|
||||
%1 = tfl.add %arg1, %arg1 {fused_activation_function = "NONE"} : tensor<*xf32>
|
||||
return %0, %1, %arg2 : tensor<*xi32>, tensor<*xf32>, tensor<i32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @WhileOp_cond(
|
||||
// CHECK-LABEL: func private @WhileOp_cond(
|
||||
// CHECK: tfl.greater
|
||||
// CHECK-LABEL: func @WhileOp_body(
|
||||
// CHECK-LABEL: func private @WhileOp_body(
|
||||
// CHECK: tfl.sub
|
||||
// CHECK: tfl.add
|
||||
|
||||
@ -152,14 +152,14 @@ func @rnn(%arg0: tensor<4x4x3xf32> {tf.device = "/device:CPU:0"}) -> tensor<4x?x
|
||||
// CHECK: tfl.yield
|
||||
// CHECK-SAME: (tensor<i32>, tensor<i32>, tensor<*xf32>, tensor<4x2xf32>, tensor<4x2xf32>, tensor<*xf32>, tensor<4x4x3xf32>) -> ()
|
||||
|
||||
// CHECK-LABEL: func @tfl.while_cond(
|
||||
// CHECK-SAME: [[VAL_35:%.*]]: tensor<i32>, [[VAL_36:%.*]]: tensor<i32>, [[VAL_37:%.*]]: tensor<*xf32>, [[VAL_38:%.*]]: tensor<4x2xf32>, [[VAL_39:%.*]]: tensor<4x2xf32>, [[VAL_40:%.*]]: tensor<*xf32>, [[VAL_41:%.*]]: tensor<4x4x3xf32>) -> tensor<i1> attributes {sym_visibility = "private"} {
|
||||
// CHECK-LABEL: func private @tfl.while_cond(
|
||||
// CHECK-SAME: [[VAL_35:%.*]]: tensor<i32>, [[VAL_36:%.*]]: tensor<i32>, [[VAL_37:%.*]]: tensor<*xf32>, [[VAL_38:%.*]]: tensor<4x2xf32>, [[VAL_39:%.*]]: tensor<4x2xf32>, [[VAL_40:%.*]]: tensor<*xf32>, [[VAL_41:%.*]]: tensor<4x4x3xf32>) -> tensor<i1> {
|
||||
// CHECK: return
|
||||
// CHECK-SAME: tensor<i1>
|
||||
// CHECK: }
|
||||
|
||||
// CHECK-LABEL: func @tfl.while_body(
|
||||
// CHECK-SAME: [[VAL_46:%.*]]: tensor<i32>, [[VAL_47:%.*]]: tensor<i32>, [[VAL_48:%.*]]: tensor<*xf32>, [[VAL_49:%.*]]: tensor<4x2xf32>, [[VAL_50:%.*]]: tensor<4x2xf32>, [[VAL_51:%.*]]: tensor<*xf32>, [[VAL_52:%.*]]: tensor<4x4x3xf32>) -> (tensor<i32>, tensor<i32>, tensor<*xf32>, tensor<4x2xf32>, tensor<4x2xf32>, tensor<*xf32>, tensor<4x4x3xf32>) attributes {sym_visibility = "private"} {
|
||||
// CHECK-LABEL: func private @tfl.while_body(
|
||||
// CHECK-SAME: [[VAL_46:%.*]]: tensor<i32>, [[VAL_47:%.*]]: tensor<i32>, [[VAL_48:%.*]]: tensor<*xf32>, [[VAL_49:%.*]]: tensor<4x2xf32>, [[VAL_50:%.*]]: tensor<4x2xf32>, [[VAL_51:%.*]]: tensor<*xf32>, [[VAL_52:%.*]]: tensor<4x4x3xf32>) -> (tensor<i32>, tensor<i32>, tensor<*xf32>, tensor<4x2xf32>, tensor<4x2xf32>, tensor<*xf32>, tensor<4x4x3xf32>) {
|
||||
// CHECK: [[VAL_91:%.*]] = "tfl.cast"
|
||||
// CHECK: return
|
||||
// CHECK-SAME: [[VAL_91]], [[VAL_52]] : tensor<i32>, tensor<i32>, tensor<*xf32>, tensor<4x2xf32>, tensor<4x2xf32>, tensor<*xf32>, tensor<4x4x3xf32>
|
||||
|
@ -24,9 +24,8 @@ func @single_cluster(%arg0: tensor<?xi32>) -> tensor<?xi32> {
|
||||
return %0 : tensor<?xi32>
|
||||
}
|
||||
|
||||
// CHECK: func @[[CLUSTER]]
|
||||
// CHECK: func private @[[CLUSTER]]
|
||||
// CHECK-SAME: (%[[CLUSTER_ARG_0:[a-z0-9]*]]: tensor<?xi32>) -> tensor<?xi32>
|
||||
// CHECK-SAME: sym_visibility = "private"
|
||||
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[CLUSTER_ARG_0]])
|
||||
// CHECK: return %[[B_OUTPUT]]
|
||||
|
||||
@ -67,12 +66,12 @@ func @multiple_clusters(%arg0: tensor<?xi32>) -> tensor<?xi32> {
|
||||
return %0 : tensor<?xi32>
|
||||
}
|
||||
|
||||
// CHECK: func @[[CLUSTER_0]]
|
||||
// CHECK: func private @[[CLUSTER_0]]
|
||||
// CHECK-SAME: (%[[CLUSTER_0_ARG_0:[a-z0-9]*]]: tensor<?xi32>) -> tensor<?xi32>
|
||||
// CHECK: %[[B_OUTPUT:[0-9]*]] = "tf.B"(%[[CLUSTER_0_ARG_0]])
|
||||
// CHECK: return %[[B_OUTPUT]]
|
||||
|
||||
// CHECK: func @[[CLUSTER_1]]
|
||||
// CHECK: func private @[[CLUSTER_1]]
|
||||
// CHECK-SAME: (%[[CLUSTER_1_ARG_0:[a-z0-9]*]]: tensor<?xi32>, %[[CLUSTER_1_ARG_1:[a-z0-9]*]]: tensor<?xi32>) -> tensor<?xi32>
|
||||
// CHECK: %[[E_OUTPUT:[0-9]*]] = "tf.E"(%[[CLUSTER_1_ARG_0]])
|
||||
// CHECK: %[[F_OUTPUT:[0-9]*]] = "tf.F"(%[[CLUSTER_1_ARG_1]], %[[E_OUTPUT]])
|
||||
@ -98,7 +97,7 @@ func @cluster_operands(%arg0: tensor<?xi32>) -> tensor<?xi32> {
|
||||
return %0 : tensor<?xi32>
|
||||
}
|
||||
|
||||
// CHECK: func @[[CLUSTER]]
|
||||
// CHECK: func private @[[CLUSTER]]
|
||||
// CHECK-SAME: () -> tensor<?xi32>
|
||||
// CHECK: %[[A_OUTPUT:[0-9]*]] = "tf.A"()
|
||||
// CHECK: return %[[A_OUTPUT]]
|
||||
|
@ -47,11 +47,11 @@ func @func2(%arg0 : tensor<i1>) -> tensor<i1> {
|
||||
|
||||
// CHECK: module
|
||||
// CHECK-SAME: @_tpu_v1_compat_outlined
|
||||
// CHECK-LABEL: func @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> tensor<i1>
|
||||
// CHECK-LABEL: func nested @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> tensor<i1>
|
||||
// CHECK-NEXT: tf.TPUReplicateMetadata
|
||||
// CHECK-NEXT: tf.opA
|
||||
|
||||
// CHECK-LABEL: func @_tpu_v1_compat_outlined_func1(%arg0: tensor<i1>, %arg1: tensor<f32>) -> (tensor<i1>, tensor<i32>)
|
||||
// CHECK-LABEL: func nested @_tpu_v1_compat_outlined_func1(%arg0: tensor<i1>, %arg1: tensor<f32>) -> (tensor<i1>, tensor<i32>)
|
||||
// CHECK-NEXT: tf.TPUReplicateMetadata
|
||||
// CHECK-NEXT: tf.opA
|
||||
// CHECK-NEXT: tf.opA
|
||||
|
@ -27,14 +27,14 @@ func @foo() {
|
||||
|
||||
|
||||
// In the newly cloned function, check that we have a _tf.If operation and capture the then and else branch.
|
||||
// CHECK: func @[[FUNCTIONALIZE_FUNC]]
|
||||
// CHECK: func private @[[FUNCTIONALIZE_FUNC]]
|
||||
// CHECK: "tf.If"
|
||||
// CHECK-SAME: else_branch = @[[ELSE_FUNC:[A-Za-z0-9_]*]]
|
||||
// CHECK-SAME: then_branch = @[[THEN_FUNC:[A-Za-z0-9_]*]]
|
||||
|
||||
// We expect the _tf.Add in the else func and the _tf.Mul in the then func
|
||||
|
||||
// CHECK: func @[[ELSE_FUNC]]
|
||||
// CHECK: func private @[[ELSE_FUNC]]
|
||||
// CHECK: "tf.Add"
|
||||
// CHECK: func @[[THEN_FUNC]]
|
||||
// CHECK: func private @[[THEN_FUNC]]
|
||||
// CHECK: "tf.Mul"
|
||||
|
@ -40,7 +40,7 @@ library {
|
||||
}
|
||||
}
|
||||
# Drop the control dependency on arg for the node "test"
|
||||
# CHECK-LABEL: func @foo
|
||||
# CHECK-LABEL: func private @foo
|
||||
# CHECK: tf_executor.island wraps "tf.Const"()
|
||||
node_def {
|
||||
name: "test"
|
||||
|
@ -80,6 +80,6 @@ versions {
|
||||
# CHECK-SAME: f = @[[FUNCTION:[a-zA-Z0-9_]*]]
|
||||
|
||||
# Verify that callee has the unit attribute tf._input_shapes.
|
||||
# CHECK: func @[[FUNCTION]]
|
||||
# CHECK: func private @[[FUNCTION]]
|
||||
# CHECK: attributes
|
||||
# CHECK-SAME: tf._input_shapes{{[,}]}}
|
||||
|
@ -90,6 +90,6 @@ library {
|
||||
# CHECK: tf.HashTableV2
|
||||
# CHECK-SAME: shared_name = "hash_table_node"
|
||||
|
||||
# CHECK: func @create_resource
|
||||
# CHECK: func private @create_resource
|
||||
# CHECK: tf.HashTableV2
|
||||
# CHECK-SAME: shared_name = "hash_table_node@create_resource"
|
||||
|
@ -49,5 +49,5 @@ library {
|
||||
}
|
||||
}
|
||||
|
||||
# CHECK-DAG: func @custom_relu{{[0-9]*}}(){{.+}}tf._implements = #tf.func<@tensorflow.relu, {}>}
|
||||
# CHECK-DAG: func @custom_embedding_matmul{{[0-9]*}}(){{.+}}tf._implements = #tf.func<@tensorflow.embedding_matmul, {key1 = 2 : i64, key2 = false}>}
|
||||
# CHECK-DAG: func private @custom_relu{{[0-9]*}}(){{.+}}tf._implements = #tf.func<@tensorflow.relu, {}>}
|
||||
# CHECK-DAG: func private @custom_embedding_matmul{{[0-9]*}}(){{.+}}tf._implements = #tf.func<@tensorflow.embedding_matmul, {key1 = 2 : i64, key2 = false}>}
|
||||
|
@ -13,7 +13,7 @@
|
||||
# CHECK: %[[ISLAND_2:.*]], %[[ISLAND_2_control:.*]] = tf_executor.island wraps "tf.StatefulPartitionedCall"
|
||||
# CHECK-SAME: f = @[[FUNC:[a-z0-9]*]]
|
||||
# CHECK: tf_executor.fetch %[[ISLAND_1]], %[[ISLAND_2]] : tensor<*xf32>, tensor<*xf32>
|
||||
# CHECK: func @[[FUNC]](%arg0: tensor<*xf32> {tf._user_specified_name = "inputs"}, %arg1: tensor<*x!tf.resource>) -> tensor<*xf32>
|
||||
# CHECK: func private @[[FUNC]](%arg0: tensor<*xf32> {tf._user_specified_name = "inputs"}, %arg1: tensor<*x!tf.resource>) -> tensor<*xf32>
|
||||
|
||||
node {
|
||||
name: "args_0"
|
||||
|
@ -55,4 +55,4 @@ versions {
|
||||
# site (a numerical suffix may be appended).
|
||||
|
||||
# CHECK: "tf.LegacyCall"(%outputs) {_disable_call_shape_inference = false, device = "", f = @foo0}
|
||||
# CHECK: func @foo0
|
||||
# CHECK: func private @foo0
|
||||
|
@ -74,7 +74,7 @@ library {
|
||||
}
|
||||
# The attribute "experimental_ints_on_device" and the return type INT32
|
||||
# ensure that kDeviceRetOp is used instead of kRetOp
|
||||
# CHECK-LABEL: func @foo
|
||||
# CHECK-LABEL: func private @foo
|
||||
# CHECK: tf.experimental_ints_on_device = true
|
||||
# CHECK: return %{{.*}} tensor<{{.*}}i32>
|
||||
attr {
|
||||
|
@ -5,8 +5,8 @@
|
||||
# Verify that the NameAttrList is properly turned into reference to functions on import
|
||||
# CHECK: tf.Case
|
||||
# CHECK-SAME: branches = [@[[FOO:[a-z0-9]+]], @[[BAR:[a-z0-9]+]]]
|
||||
# CHECK-DAG: func @[[FOO]]()
|
||||
# CHECK-DAG: func @[[BAR]]()
|
||||
# CHECK-DAG: func private @[[FOO]]()
|
||||
# CHECK-DAG: func private @[[BAR]]()
|
||||
|
||||
node {
|
||||
name: "predicate"
|
||||
|
@ -3,7 +3,7 @@
|
||||
# Verify that the _input_shapes attribute of the FunctionDef is respected.
|
||||
# This also checks that the output type is correctly inferred based on
|
||||
# that.
|
||||
#CHECK: func @identity_function0(%arg0: tensor<i32>) -> tensor<i32>
|
||||
#CHECK: func private @identity_function0(%arg0: tensor<i32>) -> tensor<i32>
|
||||
|
||||
node {
|
||||
name: "Placeholder"
|
||||
|
@ -124,5 +124,5 @@ versions {
|
||||
# CHECK: "tf.LegacyCall"() {_disable_call_shape_inference = false, device = "", f = @foo110}
|
||||
# CHECK: "tf.LegacyCall"() {_disable_call_shape_inference = false, device = "", f = @foo111}
|
||||
|
||||
# CHECK-LABEL: func @foo110() attributes {sym_visibility = "private"}
|
||||
# CHECK-LABEL: func @foo111() attributes {sym_visibility = "private"}
|
||||
# CHECK-LABEL: func private @foo110()
|
||||
# CHECK-LABEL: func private @foo111()
|
||||
|
@ -91,7 +91,7 @@ library {
|
||||
# CHECK-SAME: {_disable_call_shape_inference = true, device = "", f = @test_func_name0}
|
||||
# CHECK: tf_executor.fetch
|
||||
# CHECK: return
|
||||
# CHECK: func @test_func_name0
|
||||
# CHECK: func private @test_func_name0
|
||||
# CHECK-SAME: tf._resource_arg_unique_id = 0
|
||||
# CHECK-SAME: tf._resource_arg_unique_id = 0
|
||||
# CHECK: tf_executor.graph
|
||||
|
@ -4,7 +4,7 @@
|
||||
# links the function and its gradient. In MLIR a TF ops gradient function is
|
||||
# added to its list of function attributes.
|
||||
|
||||
# CHECK: func @foo0(
|
||||
# CHECK: func private @foo0(
|
||||
# CHECK: tf.gradient = @foo_grad
|
||||
|
||||
node {
|
||||
|
@ -4,8 +4,8 @@
|
||||
# functions with arg name that are the same as the graph input name
|
||||
|
||||
# CHECK: func @main(%arg0: tensor<{{.*}}i32>) -> tensor<{{.*}}i32>
|
||||
# CHECK: func @while_body
|
||||
# CHECK: func @while_cond
|
||||
# CHECK: func private @while_body
|
||||
# CHECK: func private @while_cond
|
||||
|
||||
node {
|
||||
name: "input"
|
||||
|
@ -57,7 +57,7 @@ versions {
|
||||
# CHECK: "tf.LegacyCall"() {_disable_call_shape_inference = true, device = "", f = @foo0}
|
||||
# CHECK: "tf.LegacyCall"() {_disable_call_shape_inference = false, device = "", f = @bar0}
|
||||
|
||||
# CHECK-LABEL: func @foo0() attributes {sym_visibility = "private"}
|
||||
# CHECK-LABEL: func private @foo0()
|
||||
# CHECK: "tf.LegacyCall"() {_disable_call_shape_inference = false, device = "", f = @bar0}
|
||||
|
||||
# CHECK-LABEL: func @bar0() attributes {sym_visibility = "private"}
|
||||
# CHECK-LABEL: func private @bar0()
|
||||
|
@ -106,5 +106,5 @@ versions {
|
||||
# CHECK: func @main
|
||||
# CHECK: "tf.PartitionedCall"()
|
||||
# CHECK-SAME: f = @[[FUNCTION:[A-Za-z0-9_]*]]
|
||||
# CHECK: func @[[FUNCTION]]() -> tensor<*xui8>
|
||||
# CHECK: func private @[[FUNCTION]]() -> tensor<*xui8>
|
||||
# CHECK: return {{.*}} : tensor<*xui8>
|
||||
|
@ -86,6 +86,6 @@ versions {
|
||||
# CHECK-SAME: f = @[[FUNCTION_FOO:[a-zA-Z0-9_]*]]
|
||||
|
||||
# Find callee and verify it has the stateful attribute set.
|
||||
# CHECK: func @[[FUNCTION_FOO]]
|
||||
# CHECK: func private @[[FUNCTION_FOO]]
|
||||
# CHECK-SAME: attributes
|
||||
# CHECK-SAME: tf.signature.is_stateful
|
||||
|
@ -12,7 +12,7 @@ func @f() {
|
||||
}
|
||||
|
||||
// CHECK: func @g()
|
||||
// CHECK: func @[[NEWG]]() attributes {sym_visibility = "private"}
|
||||
// CHECK: func private @[[NEWG]]()
|
||||
func @g() {
|
||||
return
|
||||
}
|
||||
@ -22,12 +22,12 @@ func @g() {
|
||||
// CHECK-LABEL: func @f
|
||||
// 2 copies of @g
|
||||
// CHECK-DAG: func @g{{.*}}
|
||||
// CHECK-DAG: func @g{{.*}}
|
||||
// CHECK-DAG: func private @g{{.*}}
|
||||
// 4 copies of @h
|
||||
// CHECK-DAG: func @h{{.*}}
|
||||
// CHECK-DAG: func @h{{.*}}
|
||||
// CHECK-DAG: func @h{{.*}}
|
||||
// CHECK-DAG: func @h{{.*}}
|
||||
// CHECK-DAG: func private @h{{.*}}
|
||||
// CHECK-DAG: func private @h{{.*}}
|
||||
// CHECK-DAG: func private @h{{.*}}
|
||||
func @f() {
|
||||
call @g() : () -> ()
|
||||
call @g() : () -> ()
|
||||
@ -47,7 +47,7 @@ func @h() {
|
||||
// -----
|
||||
// Handle error case of infinite recursion.
|
||||
// expected-error @+1 {{reached cloning limit}}
|
||||
func @f() attributes {sym_visibility = "private"} {
|
||||
func private @f() {
|
||||
call @f() : () -> ()
|
||||
call @f() : () -> ()
|
||||
return
|
||||
|
@ -33,4 +33,4 @@ func @foo(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> {
|
||||
// CHECK: "tf.Identity"([[CALL_RESULT_REG]])
|
||||
|
||||
// Match the function name
|
||||
// CHECK: func @[[FUNCTION]]
|
||||
// CHECK: func private @[[FUNCTION]]
|
||||
|
@ -299,7 +299,7 @@ func @main(%arg0: tensor<i32>) -> tensor<2xf32> {
|
||||
%2 = "tf.PartitionedCall"(%0) {config = "", config_proto = "", executor_type = "", f = @callee} : (tensor<!tf.resource<tensor<2xf32>>>) -> tensor<2xf32>
|
||||
return %2 : tensor<2xf32>
|
||||
}
|
||||
func @callee(%arg0: tensor<!tf.resource<tensor<2xf32>>>) -> tensor<2xf32> attributes {sym_visibility = "private"} {
|
||||
func private @callee(%arg0: tensor<!tf.resource<tensor<2xf32>>>) -> tensor<2xf32> {
|
||||
%0 = "tf.ReadVariableOp"(%arg0) : (tensor<!tf.resource<tensor<2xf32>>>) -> tensor<2xf32>
|
||||
return %0 : tensor<2xf32>
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
// RUN: tf-opt %s -tf-region-control-flow-to-functional -split-input-file | FileCheck %s
|
||||
|
||||
// Simple IfRegion
|
||||
// CHECK: func @tf.IfRegion_else(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK: func private @tf.IfRegion_else(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: "tf.Neg"
|
||||
// CHECK: func @tf.IfRegion_then(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK: func private @tf.IfRegion_then(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: "tf.Abs"
|
||||
func @testSimple(%arg0: tensor<i1>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
|
||||
// CHECK: "tf.If"
|
||||
@ -24,9 +24,9 @@ func @testSimple(%arg0: tensor<i1>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
|
||||
// -----
|
||||
|
||||
// Use if condition inside the regions
|
||||
// CHECK: func @tf.IfRegion_else(%arg0: tensor<i1>, %arg1: tensor<2xf32>, %arg2: tensor<2xf32>, %arg3: tensor<2xf32>) -> tensor<2xf32>
|
||||
// CHECK: func private @tf.IfRegion_else(%arg0: tensor<i1>, %arg1: tensor<2xf32>, %arg2: tensor<2xf32>, %arg3: tensor<2xf32>) -> tensor<2xf32>
|
||||
// CHECK-NEXT: "tf.Select"(%arg0, %arg2, %arg3)
|
||||
// CHECK: func @tf.IfRegion_then(%arg0: tensor<i1>, %arg1: tensor<2xf32>, %arg2: tensor<2xf32>, %arg3: tensor<2xf32>) -> tensor<2xf32>
|
||||
// CHECK: func private @tf.IfRegion_then(%arg0: tensor<i1>, %arg1: tensor<2xf32>, %arg2: tensor<2xf32>, %arg3: tensor<2xf32>) -> tensor<2xf32>
|
||||
// CHECK-NEXT: "tf.Select"(%arg0, %arg1, %arg2)
|
||||
func @testIfCondition(%arg0: tensor<i1>, %arg1: tensor<2xf32>) -> tensor<2xf32> {
|
||||
%0 = "tf.Add"(%arg1, %arg1) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
|
||||
@ -48,9 +48,9 @@ func @testIfCondition(%arg0: tensor<i1>, %arg1: tensor<2xf32>) -> tensor<2xf32>
|
||||
|
||||
// Constant sinking for IfRegion
|
||||
|
||||
// CHECK: func @tf.IfRegion_else() -> tensor<2xf32>
|
||||
// CHECK: func private @tf.IfRegion_else() -> tensor<2xf32>
|
||||
// CHECK-NEXT: constant dense<1.0
|
||||
// CHECK: func @tf.IfRegion_then() -> tensor<2xf32>
|
||||
// CHECK: func private @tf.IfRegion_then() -> tensor<2xf32>
|
||||
// CHECK-NEXT: constant dense<0.0
|
||||
func @testIfConstant(%arg0: tensor<i1>) -> tensor<2xf32> {
|
||||
%cst_zero = constant dense<0.0> : tensor<2xf32>
|
||||
@ -67,18 +67,18 @@ func @testIfConstant(%arg0: tensor<i1>) -> tensor<2xf32> {
|
||||
// -----
|
||||
|
||||
// Nested IfRegions
|
||||
// CHECK: func @tf.IfRegion1_else
|
||||
// CHECK: func private @tf.IfRegion1_else
|
||||
// CHECK-NEXT: "tf.Acos"
|
||||
// CHECK-NEXT: "tf.Abs"
|
||||
|
||||
// CHECK: func @tf.IfRegion1_then
|
||||
// CHECK: func private @tf.IfRegion1_then
|
||||
// CHECK-NEXT: "tf.LogicalNot"
|
||||
// CHECK-NEXT: "tf.Asin"
|
||||
// CHECK-NEXT: "tf.If"({{.+}}) {else_branch = @tf.IfRegion_else, {{.+}} then_branch = @tf.IfRegion_then}
|
||||
|
||||
// CHECK: func @tf.IfRegion_else
|
||||
// CHECK: func private @tf.IfRegion_else
|
||||
// CHECK-NEXT: "tf.Neg"
|
||||
// CHECK: func @tf.IfRegion_then
|
||||
// CHECK: func private @tf.IfRegion_then
|
||||
// CHECK-NEXT: "tf.Abs"
|
||||
|
||||
func @testNested(%arg0: tensor<i1>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
|
||||
@ -169,10 +169,10 @@ func @testIf2Result(%arg0: tensor<i1>, %arg1: tensor<2xf32>) -> tensor<2xf32> {
|
||||
// -----
|
||||
|
||||
// No inputs, some outputs for IfRegion
|
||||
// CHECK: func @tf.IfRegion_else() -> tensor<2xf32>
|
||||
// CHECK: func private @tf.IfRegion_else() -> tensor<2xf32>
|
||||
// CHECK-NEXT: constant dense<1.000000e+00>
|
||||
// CHECK-NEXT: "tf.Neg"
|
||||
// CHECK: func @tf.IfRegion_then() -> tensor<2xf32>
|
||||
// CHECK: func private @tf.IfRegion_then() -> tensor<2xf32>
|
||||
// CHECK-NEXT: constant dense<0.000000e+00>
|
||||
// CHECK-NEXT: "tf.Abs"
|
||||
func @testSimple(%arg0: tensor<i1>) -> tensor<2xf32> {
|
||||
@ -193,9 +193,9 @@ func @testSimple(%arg0: tensor<i1>) -> tensor<2xf32> {
|
||||
|
||||
// No outputs, some inputs for IfRegion
|
||||
//
|
||||
// CHECK: func @tf.IfRegion_else(%arg0: tensor<*xf32>)
|
||||
// CHECK: func private @tf.IfRegion_else(%arg0: tensor<*xf32>)
|
||||
// CHECK-NEXT: "tf.Neg"
|
||||
// CHECK: func @tf.IfRegion_then(%arg0: tensor<*xf32>)
|
||||
// CHECK: func private @tf.IfRegion_then(%arg0: tensor<*xf32>)
|
||||
// CHECK-NEXT: "tf.Abs"
|
||||
func @printer(tensor<*xf32>) -> ()
|
||||
func @testNoOutputs(%arg0: tensor<i1>, %arg1: tensor<*xf32>) -> () {
|
||||
@ -214,9 +214,9 @@ func @testNoOutputs(%arg0: tensor<i1>, %arg1: tensor<*xf32>) -> () {
|
||||
|
||||
// -----
|
||||
// Check ToBool folding for IfRegion
|
||||
// CHECK: func @tf.IfRegion_else(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK: func private @tf.IfRegion_else(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: "tf.Neg"
|
||||
// CHECK: func @tf.IfRegion_then(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK: func private @tf.IfRegion_then(%arg0: tensor<*xf32>) -> tensor<*xf32>
|
||||
// CHECK-NEXT: "tf.Abs"
|
||||
// CHECK-LABEL: @testToBoolFold
|
||||
func @testToBoolFold(%arg0: tensor<i32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
|
||||
@ -237,11 +237,11 @@ func @testToBoolFold(%arg0: tensor<i32>, %arg1: tensor<*xf32>) -> tensor<*xf32>
|
||||
// -----
|
||||
|
||||
// Simple WhileRegion
|
||||
// CHECK: func @tf.WhileRegion_body{{.+}}{sym_visibility = "private"}
|
||||
// CHECK: func private @tf.WhileRegion_body{{.+}}
|
||||
// CHECK: "tf.Add"
|
||||
// CHECK: constant dense<1>
|
||||
// CHECK: "tf.Sub"
|
||||
// CHECK:func @tf.WhileRegion_cond{{.+}}{sym_visibility = "private"}
|
||||
// CHECK:func private @tf.WhileRegion_cond{{.+}}
|
||||
// CHECK: constant dense<0>
|
||||
// CHECK: "tf.NotEqual"
|
||||
// CHECK-LABEL: testValidWhileRegion
|
||||
@ -275,11 +275,11 @@ func @testValidWhileRegion(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>) -> tensor
|
||||
// -----
|
||||
|
||||
// WhileRegion with type mismatch
|
||||
// CHECK: func @tf.WhileRegion_body{{.+}}{sym_visibility = "private"}
|
||||
// CHECK: func private @tf.WhileRegion_body{{.+}}
|
||||
// CHECK: "tf.Add"
|
||||
// CHECK: constant dense<1>
|
||||
// CHECK: "tf.Sub"
|
||||
// CHECK:func @tf.WhileRegion_cond{{.+}}{sym_visibility = "private"}
|
||||
// CHECK:func private @tf.WhileRegion_cond{{.+}}
|
||||
// CHECK: constant dense<0>
|
||||
// CHECK: "tf.NotEqual"
|
||||
// CHECK-LABEL: testWhileRegionTypeMismatch
|
||||
@ -309,11 +309,11 @@ func @testWhileRegionTypeMismatch(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>) ->
|
||||
// -----
|
||||
|
||||
// WhileRegion with constant sinking
|
||||
// CHECK: func @tf.WhileRegion_body{{.+}}{sym_visibility = "private"}
|
||||
// CHECK: func private @tf.WhileRegion_body{{.+}}
|
||||
// CHECK: constant dense<1>
|
||||
// CHECK: "tf.Add"
|
||||
// CHECK: "tf.Sub"
|
||||
// CHECK:func @tf.WhileRegion_cond{{.+}}{sym_visibility = "private"}
|
||||
// CHECK:func private @tf.WhileRegion_cond{{.+}}
|
||||
// CHECK: constant dense<0>
|
||||
// CHECK: "tf.NotEqual"
|
||||
// CHECK-LABEL: testWhileRegionConstantSink
|
||||
@ -342,12 +342,12 @@ func @testWhileRegionConstantSink(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>) ->
|
||||
// -----
|
||||
|
||||
// WhileRegion with implicitly captured extern value in cond
|
||||
// CHECK: func @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: "tf.Add"
|
||||
// CHECK: constant dense<1>
|
||||
// CHECK: "tf.Sub"
|
||||
// CHECK: return %{{.+}}, %{{.+}}, %arg2 : tensor<*xf32>, tensor<i32>, tensor<i32>
|
||||
// CHECK: func @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: "tf.NotEqual"(%arg1, %arg2)
|
||||
// CHECK-LABEL: testWhileRegionExternInCond
|
||||
func @testWhileRegionExternInCond(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>, %arg2 : tensor<i32>) -> tensor<*xf32> {
|
||||
@ -376,12 +376,12 @@ func @testWhileRegionExternInCond(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>, %a
|
||||
// -----
|
||||
|
||||
// WhileRegion with implicitly captured extern value in body
|
||||
// CHECK: func @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: %0 = "tf.Add"(%arg0, %arg0)
|
||||
// CHECK: %1 = "tf.Sub"(%arg1, %arg2)
|
||||
// CHECK: return %0, %1, %arg2
|
||||
|
||||
// CHECK: func @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: constant dense<0>
|
||||
// CHECK: "tf.NotEqual"
|
||||
|
||||
@ -412,9 +412,9 @@ func @testWhileRegionExternInBody(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>, %a
|
||||
// -----
|
||||
|
||||
// WhileRegion with implicitly captured extern value in cond and body
|
||||
// CHECK: func @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>, %arg3: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>, %arg3: tensor<i32>)
|
||||
// CHECK: return %{{.+}}, %{{.+}}, %arg2, %arg3
|
||||
// CHECK: func @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>, %arg3: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>, %arg3: tensor<i32>)
|
||||
// CHECK-LABEL: testWhileRegionExternInBodyAndCond
|
||||
func @testWhileRegionExternInBodyAndCond(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>, %arg2 : tensor<i32>) -> tensor<*xf32> {
|
||||
%cst = constant dense<4> : tensor<i32>
|
||||
@ -443,9 +443,9 @@ func @testWhileRegionExternInBodyAndCond(%arg0 : tensor<*xf32>, %arg1 : tensor<i
|
||||
// -----
|
||||
|
||||
// WhileRegion with same value implicitly captured in cond and body
|
||||
// CHECK: func @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_body(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: return %{{.+}}, %{{.+}}, %arg2
|
||||
// CHECK: func @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK: func private @tf.WhileRegion_cond(%arg0: tensor<*xf32>, %arg1: tensor<i32>, %arg2: tensor<i32>)
|
||||
// CHECK-LABEL: testWhileRegionSameExternInBodyAndCond
|
||||
func @testWhileRegionSameExternInBodyAndCond(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>, %arg2 : tensor<i32>) -> tensor<*xf32> {
|
||||
%cst = constant dense<4> : tensor<i32>
|
||||
@ -559,9 +559,9 @@ func @testWhileRegionTrivialMultipleCasts(%arg0 : tensor<*xf32>, %arg1 : tensor<
|
||||
// -----
|
||||
|
||||
// Almost trivially transformable with extern values
|
||||
// CHECK: func @tf.WhileRegion_body
|
||||
// CHECK: func private @tf.WhileRegion_body
|
||||
// CHECK: call @while_body
|
||||
// CHECK: @tf.WhileRegion_cond
|
||||
// CHECK: func private @tf.WhileRegion_cond
|
||||
// CHECK: call @while_cond
|
||||
// CHECK-LABEL: testWhileRegionExtern
|
||||
func @while_cond(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>) -> tensor<i1>
|
||||
@ -589,9 +589,9 @@ func @testWhileRegionExtern(%arg0 : tensor<*xf32>, %arg1 : tensor<i32>) -> tenso
|
||||
// -----
|
||||
|
||||
// Almost trivially transformable, mismatching block arguments
|
||||
// CHECK: func @tf.WhileRegion_body
|
||||
// CHECK: func private @tf.WhileRegion_body
|
||||
// CHECK: call @while_body
|
||||
// CHECK: @tf.WhileRegion_cond
|
||||
// CHECK: func private @tf.WhileRegion_cond
|
||||
// CHECK: call @while_cond
|
||||
// CHECK-LABEL: testWhileRegionBlockArgMismatch
|
||||
func @while_cond(%arg0 : tensor<i32>, %arg1 : tensor<*xf32>) -> tensor<i1>
|
||||
|
@ -17,8 +17,8 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
|
||||
return %1 : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK-NOT: func @callee
|
||||
func @callee(%arg0: tensor<!tf.resource>) -> tensor<*xf32> attributes {sym_visibility = "private", tf.signature.is_stateful} {
|
||||
// CHECK-NOT: func private @callee
|
||||
func private @callee(%arg0: tensor<!tf.resource>) -> tensor<*xf32> attributes {tf.signature.is_stateful} {
|
||||
%0 = "tf.ReadVariableOp"(%arg0) {device = ""} : (tensor<!tf.resource>) -> tensor<*xf32>
|
||||
return %0 : tensor<*xf32>
|
||||
}
|
||||
|
@ -644,7 +644,7 @@ func @callee(%arg0: tensor<f32>, %arg1: tensor<*x!tf.resource<tensor<f32>>>, %ar
|
||||
%2 = "tf.AddV2"(%1, %arg2) : (tensor<f32>, tensor<f32>) -> tensor<f32>
|
||||
return %2 : tensor<f32>
|
||||
}
|
||||
// CHECK: func @callee_resource_lifted(%[[A0:.*]]: tensor<f32>, %[[A1:.*]]: tensor<f32>, %[[A2:.*]]: tensor<f32>) -> tensor<f32>
|
||||
// CHECK: func private @callee_resource_lifted(%[[A0:.*]]: tensor<f32>, %[[A1:.*]]: tensor<f32>, %[[A2:.*]]: tensor<f32>) -> tensor<f32>
|
||||
// CHECK-NEXT: %[[ADD0:.*]] = "tf.AddV2"(%[[A1]], %[[A0]])
|
||||
// CHECK-NEXT: %[[ADD1:.*]] = "tf.AddV2"(%[[ADD0]], %[[A2]])
|
||||
// CHECK-NEXT: return %[[ADD1]]
|
||||
@ -691,7 +691,7 @@ func @callee(%arg0: tensor<*x!tf.resource<tensor<f32>>>, %arg1: tensor<*x!tf.res
|
||||
"tf.AssignVariableOp"(%arg0, %1) {dtype = i32} : (tensor<*x!tf.resource<tensor<f32>>>, tensor<f32>) -> ()
|
||||
return %arg0 : tensor<*x!tf.resource<tensor<f32>>>
|
||||
}
|
||||
// CHECK: func @callee_resource_lifted(%[[A0:.*]]: tensor<f32>, %[[A1:.*]]: tensor<f32>, %[[A2:.*]]: tensor<f32>) -> tensor<f32>
|
||||
// CHECK: func private @callee_resource_lifted(%[[A0:.*]]: tensor<f32>, %[[A1:.*]]: tensor<f32>, %[[A2:.*]]: tensor<f32>) -> tensor<f32>
|
||||
// CHECK-NEXT: %[[ADD:.*]] = "tf.AddV2"(%[[A1]], %[[A2]])
|
||||
// CHECK-NEXT: return %[[ADD]]
|
||||
|
||||
@ -743,7 +743,7 @@ func @callee(%arg0: tensor<*x!tf.resource<tensor<f32>>>) -> tensor<f32> {
|
||||
return %1 : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK: func @callee_resource_lifted(%[[A0:.*]]: tensor<f32>) -> tensor<f32>
|
||||
// CHECK: func private @callee_resource_lifted(%[[A0:.*]]: tensor<f32>) -> tensor<f32>
|
||||
// CHECK-NEXT: return %[[A0]]
|
||||
|
||||
// -----
|
||||
|
@ -287,14 +287,14 @@ func @main(%arg0: tensor<i1>) -> () {
|
||||
}
|
||||
|
||||
// CHECK: func @callee(%[[AARG0:.*]]: tensor<!tf.resource>, %[[AARG1:.*]]: tensor<i1>) -> tensor<!tf.resource>
|
||||
func @callee(%arg0: tensor<!tf.resource>, %arg1: tensor<i1>) -> tensor<!tf.resource> attributes {sym_visibility = "public"} {
|
||||
func @callee(%arg0: tensor<!tf.resource>, %arg1: tensor<i1>) -> tensor<!tf.resource> {
|
||||
%elem = "tf._SomeOp"(%arg1) : (tensor<i1>) -> tensor<f32>
|
||||
// CHECK: tf.StackPushV2"
|
||||
%push = "tf.StackPushV2"(%arg0, %elem) {swap_memory = false} : (tensor<!tf.resource>, tensor<f32>) -> tensor<f32>
|
||||
return %arg0 : tensor<!tf.resource>
|
||||
}
|
||||
|
||||
// CHECK: func @callee_stack_decomposed(%[[ARG0:.*]]: tensor<!tf.resource<tensor<10xf32>>>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<!tf.resource<tensor<1xi32>>>)
|
||||
// CHECK: func private @callee_stack_decomposed(%[[ARG0:.*]]: tensor<!tf.resource<tensor<10xf32>>>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<!tf.resource<tensor<1xi32>>>)
|
||||
// CHECK-NOT: "tf.StackPushV2"
|
||||
// CHECK: %[[UPDATE:.*]] = "tf.XlaDynamicUpdateSlice"
|
||||
// CHECK: "tf.AssignVariableOp"(%[[TARG0:.*]], %[[UPDATE]])
|
||||
@ -326,8 +326,8 @@ func @main(%arg0: tensor<i1>) -> () {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: func @callee(%[[ARG0:.*]]: tensor<!tf.resource<tensor<10xf32>>>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<!tf.resource<tensor<1xi32>>>)
|
||||
func @callee(%arg0: tensor<!tf.resource>, %arg1: tensor<i1>) -> tensor<!tf.resource> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee(%[[ARG0:.*]]: tensor<!tf.resource<tensor<10xf32>>>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<!tf.resource<tensor<1xi32>>>)
|
||||
func private @callee(%arg0: tensor<!tf.resource>, %arg1: tensor<i1>) -> tensor<!tf.resource> {
|
||||
%elem = "tf._SomeOp"(%arg1) : (tensor<i1>) -> tensor<f32>
|
||||
// CHECK-NOT: "tf.StackPushV2"
|
||||
// CHECK: %[[UPDATE:.*]] = "tf.XlaDynamicUpdateSlice"
|
||||
@ -348,7 +348,7 @@ func @main() -> () {
|
||||
return
|
||||
}
|
||||
// CHECK: func @callee()
|
||||
func @callee() -> () attributes {sym_visibility = "public"} {
|
||||
func @callee() -> () {
|
||||
%max_size = "tf.Const"() {value = dense<10> : tensor<i32>} : () -> tensor<i32>
|
||||
// CHECK-NOT: tf.Stack
|
||||
%stack = "tf.StackV2"(%max_size) {elem_type = f32, stack_name = "s"} : (tensor<i32>) -> tensor<!tf.resource>
|
||||
|
@ -432,7 +432,7 @@ func @main() -> () {
|
||||
}
|
||||
// CHECK-LABEL: func @callee
|
||||
// CHECK-SAME: (%[[OCARG0:.*]]: tensor<!tf.resource>) -> tensor<!tf.resource>
|
||||
func @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> attributes {sym_visibility = "public"} {
|
||||
func @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> {
|
||||
%const1 = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
|
||||
%elem = "tf._SomeOp"() : () -> tensor<3xf32>
|
||||
%flow = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
|
||||
@ -442,7 +442,7 @@ func @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> attributes {sy
|
||||
%gwrite2 = "tf.TensorArrayWriteV3"(%grad2#0, %const1, %elem, %grad2#1) : (tensor<!tf.resource>, tensor<i32>, tensor<3xf32>, tensor<f32>) -> tensor<f32>
|
||||
return %arg0 : tensor<!tf.resource>
|
||||
}
|
||||
// CHECK: func @callee_tensorarray_decomposed(%[[CARG0:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG1:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG2:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>)
|
||||
// CHECK: func private @callee_tensorarray_decomposed(%[[CARG0:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG1:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG2:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>)
|
||||
// CHECK: %[[READ1:.*]] = "tf.ReadVariableOp"(%[[CARG1]]) : (tensor<!tf.resource<tensor<5x3xf32>>>) -> tensor<5x3xf32>
|
||||
// CHECK: %[[UPDATE1:.*]] = "tf.XlaDynamicUpdateSlice"(%[[READ1]],
|
||||
// CHECK: "tf.AssignVariableOp"(%[[CARG1]], %[[UPDATE1]])
|
||||
@ -480,8 +480,8 @@ func @main() -> () {
|
||||
%read = "tf.TensorArrayReadV3"(%call2, %index, %ta#1) : (tensor<!tf.resource>, tensor<i32>, tensor<f32>) -> tensor<3xf32>
|
||||
return
|
||||
}
|
||||
// CHECK: func @callee(%[[CARG0:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG1:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG2:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>)
|
||||
func @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee(%[[CARG0:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG1:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[CARG2:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>)
|
||||
func private @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> {
|
||||
// CHECK: %[[READ1:.*]] = "tf.ReadVariableOp"(%[[CARG1]]) : (tensor<!tf.resource<tensor<5x3xf32>>>) -> tensor<5x3xf32>
|
||||
// CHECK: %[[UPDATE1:.*]] = "tf.XlaDynamicUpdateSlice"(%[[READ1]],
|
||||
// CHECK: "tf.AssignVariableOp"(%[[CARG1]], %[[UPDATE1]])
|
||||
@ -508,8 +508,8 @@ func @main() -> () {
|
||||
%call = "tf.PartitionedCall"() {f = @callee, config = "", config_proto = "", executor_type = ""} : () -> tensor<i32>
|
||||
return
|
||||
}
|
||||
// CHECK: func @callee() -> tensor<i32>
|
||||
func @callee() -> tensor<i32> attributes {sym_visibility = "public"} {
|
||||
// CHECK: func private @callee() -> tensor<i32>
|
||||
func @callee() -> tensor<i32> {
|
||||
%size = "tf.Const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32>
|
||||
// CHECK: "tf.MlirLocalVarOp"() : () -> tensor<!tf.resource<tensor<5xf32>>>
|
||||
// CHECK: "tf.AssignVariableOp"
|
||||
@ -567,7 +567,7 @@ func @main() -> () {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @callee
|
||||
// CHECK-LABEL: func private @callee
|
||||
// CHECK-SAME: %[[VAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[GVAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>
|
||||
func @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> attributes {sym_visibility = "private"} {
|
||||
%index = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
|
||||
|
@ -472,14 +472,14 @@ func @main(%arg0: tensor<i1>) -> () {
|
||||
}
|
||||
|
||||
// CHECK: func @callee(%[[AARG0:.*]]: tensor<!tf.variant<tensor<f32>>>, %[[AARG1:.*]]: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>>
|
||||
func @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>> attributes {sym_visibility = "public"} {
|
||||
func @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>> {
|
||||
%elem = "tf._SomeOp"(%arg1) : (tensor<i1>) -> tensor<f32>
|
||||
// CHECK: "tf.TensorListPushBack"
|
||||
%push = "tf.TensorListPushBack"(%arg0, %elem) : (tensor<!tf.variant<tensor<f32>>>, tensor<f32>) -> tensor<!tf.variant<tensor<f32>>>
|
||||
return %push : tensor<!tf.variant<tensor<f32>>>
|
||||
}
|
||||
|
||||
// CHECK: func @callee_tensorlist_decomposed(%[[ARG0:.*]]: tensor<10xf32>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<1xi32>) -> (tensor<10xf32>, tensor<1xi32>)
|
||||
// CHECK: func private @callee_tensorlist_decomposed(%[[ARG0:.*]]: tensor<10xf32>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<1xi32>) -> (tensor<10xf32>, tensor<1xi32>)
|
||||
// CHECK-NOT: "tf.TensorListPushBack"
|
||||
// CHECK: %[[UPDATE:.*]] = "tf.XlaDynamicUpdateSlice"
|
||||
// CHECK: %[[CONST1:.*]] = "tf.Const"() {value = dense<1> : tensor<1xi32>} : () -> tensor<1xi32>
|
||||
@ -514,7 +514,7 @@ func @main(%arg0: tensor<i1>) -> () {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: func @callee(%[[ARG0:.*]]: tensor<10xf32>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<1xi32>) -> (tensor<10xf32>, tensor<1xi32>)
|
||||
// CHECK: func private @callee(%[[ARG0:.*]]: tensor<10xf32>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<1xi32>) -> (tensor<10xf32>, tensor<1xi32>)
|
||||
func @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>> attributes {sym_visibility = "private"} {
|
||||
%elem = "tf._SomeOp"(%arg1) : (tensor<i1>) -> tensor<f32>
|
||||
|
||||
@ -533,12 +533,12 @@ func @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tens
|
||||
// Tests PartitionedCall op with no signature change on callee.
|
||||
|
||||
// CHECK-LABEL: func @main
|
||||
func @main() -> () {
|
||||
func @main() {
|
||||
"tf.PartitionedCall"() {f = @callee, config = "", config_proto = "", executor_type = ""} : () -> ()
|
||||
return
|
||||
}
|
||||
// CHECK: func @callee()
|
||||
func @callee() -> () attributes {sym_visibility = "public"} {
|
||||
// CHECK: func private @callee()
|
||||
func @callee() {
|
||||
%elem_shape = "tf.Const"() {value = dense<> : tensor<0xi32>} : () -> tensor<0xi32>
|
||||
%max_size = "tf.Const"() {value = dense<10> : tensor<i32>} : () -> tensor<i32>
|
||||
// CHECK-NOT: tf.EmptyTensorList
|
||||
|
@ -62,7 +62,7 @@ class TestModule(tf.Module):
|
||||
# CHECK-SAME: attributes{{.*}}tf_saved_model.exported_names = ["caller"]
|
||||
# CHECK: "tf.StatefulPartitionedCall"{{.*}}f = @[[CALLEE_INTERNAL]]
|
||||
#
|
||||
# CHECK: func @[[CALLEE_INTERNAL]]
|
||||
# CHECK: func private @[[CALLEE_INTERNAL]]
|
||||
# CHECK-NOT: tf_saved_model.exported_names
|
||||
|
||||
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
|
||||
|
@ -35,8 +35,8 @@ from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common_v1
|
||||
# CHECK-SAME: else_branch = @[[else]]
|
||||
# CHECK-SAME: then_branch = @[[then]]
|
||||
|
||||
# CHECK: func @[[else]](
|
||||
# CHECK: func @[[then]](
|
||||
# CHECK: func private @[[else]](
|
||||
# CHECK: func private @[[then]](
|
||||
|
||||
|
||||
def Test():
|
||||
|
@ -111,14 +111,14 @@ module attributes {tf_saved_model.semantics} {
|
||||
return %val : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK: func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
|
||||
%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_callee_callee} : (tensor<*x!tf.resource>) -> (tensor<f32>)
|
||||
return %val : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK: func @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func private @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
|
||||
%c0 = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32>
|
||||
"tf.AssignVariableOp"(%arg0, %c0) : (tensor<*x!tf.resource>, tensor<f32>) -> ()
|
||||
return %c0 : tensor<f32>
|
||||
@ -145,14 +145,14 @@ module attributes {tf_saved_model.semantics} {
|
||||
return %val : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK: func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
|
||||
%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_callee_callee} : (tensor<*x!tf.resource>) -> (tensor<f32>)
|
||||
return %val : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK: func @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func private @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
|
||||
%c0 = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32>
|
||||
"tf.AssignVariableOp"(%arg0, %c0) : (tensor<*x!tf.resource>, tensor<f32>) -> ()
|
||||
return %c0 : tensor<f32>
|
||||
@ -178,14 +178,14 @@ module attributes {tf_saved_model.semantics} {
|
||||
}
|
||||
|
||||
|
||||
// CHECK: func @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func private @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
|
||||
%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @g} : (tensor<*x!tf.resource>) -> (tensor<f32>)
|
||||
return %val : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK: func @g(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func @g(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @g(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func private @g(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
|
||||
%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f} : (tensor<*x!tf.resource>) -> (tensor<f32>)
|
||||
return %val : tensor<f32>
|
||||
}
|
||||
@ -211,8 +211,8 @@ module attributes {tf_saved_model.semantics} {
|
||||
}
|
||||
|
||||
|
||||
// CHECK: func @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32>
|
||||
func private @f(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
|
||||
%c0 = "tf.Const"() { value = dense<1.0> : tensor<f32> } : () -> tensor<f32>
|
||||
"tf.AssignAddVariableOp"(%arg0, %c0) : (tensor<*x!tf.resource>, tensor<f32>) -> ()
|
||||
return %c0 : tensor<f32>
|
||||
|
@ -859,7 +859,7 @@ module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:wor
|
||||
// CHECK-SAME: mlir_module
|
||||
// CHECK-SAME: func @main
|
||||
// CHECK-SAME: tf.B
|
||||
// CHECK-SAME: func @nested_func
|
||||
// CHECK-SAME: func private @nested_func
|
||||
// CHECK-SAME: tf.D
|
||||
// CHECK-NOT: func = @tpu0_func
|
||||
// CHECK: device = "/job:worker/replica:0/task:0/device:CPU:0"
|
||||
@ -908,7 +908,7 @@ module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:wor
|
||||
// CHECK-SAME: mlir_module
|
||||
// CHECK-SAME: func @main
|
||||
// CHECK-SAME: tf.B
|
||||
// CHECK-SAME: func @referenced_func
|
||||
// CHECK-SAME: func private @referenced_func
|
||||
// CHECK-SAME: tf.D
|
||||
// CHECK-NOT: func = @tpu0_func
|
||||
// CHECK: "tf_device.launch"
|
||||
@ -1007,7 +1007,7 @@ module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:wor
|
||||
// CHECK-SAME: func @main
|
||||
// CHECK-SAME: tf.B
|
||||
// CHECK-COUNT-2: call @referenced_func
|
||||
// CHECK-COUNT-1: func @referenced_func
|
||||
// CHECK-COUNT-1: func private @referenced_func
|
||||
// CHECK-SAME: tf.D
|
||||
// CHECK-NOT: func = @tpu0_func
|
||||
// CHECK: "tf_device.launch"
|
||||
@ -1161,13 +1161,13 @@ module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:wor
|
||||
// CHECK-SAME: mlir_module
|
||||
// CHECK-SAME: func @main
|
||||
// CHECK-SAME: tf.B
|
||||
// CHECK-SAME: func @referenced_func3
|
||||
// CHECK-SAME: func private @referenced_func3
|
||||
// CHECK-SAME: tf.I
|
||||
// CHECK-SAME: func @referenced_func2
|
||||
// CHECK-SAME: func private @referenced_func2
|
||||
// CHECK-SAME: tf.H
|
||||
// CHECK-SAME: func @referenced_func1
|
||||
// CHECK-SAME: func private @referenced_func1
|
||||
// CHECK-SAME: tf.G
|
||||
// CHECK-SAME: func @referenced_func0
|
||||
// CHECK-SAME: func private @referenced_func0
|
||||
// CHECK-SAME: tf.F
|
||||
// CHECK: "tf_device.launch"
|
||||
// CHECK-NEXT: "tf.TPUCompileSucceededAssert"(%[[COMPILE_OUTPUT]]#0)
|
||||
|
@ -44,9 +44,9 @@ module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0"
|
||||
%10 = "tf.Identity"(%9) {device = ""} : (tensor<i1>) -> tensor<i1>
|
||||
return %10 : tensor<i1>
|
||||
}
|
||||
// CHECK-LABEL: func @_func
|
||||
// CHECK-SAME: [[FUNCINPUT0:.*]]: tensor<2x112x112x12xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, [[FUNCINPUT1:%.*]]: tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, [[FUNCINPUT2:%.*]]: tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, [[VAL_59:%.*]]: tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) attributes {sym_visibility = "private"} {
|
||||
func @_func(%arg0: tensor<2x224x224x3xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg1: tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg2: tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg3: tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) attributes {sym_visibility = "private"} {
|
||||
// CHECK-LABEL: func private @_func
|
||||
// CHECK-SAME: [[FUNCINPUT0:.*]]: tensor<2x112x112x12xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, [[FUNCINPUT1:%.*]]: tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, [[FUNCINPUT2:%.*]]: tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, [[VAL_59:%.*]]: tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
|
||||
func private @_func(%arg0: tensor<2x224x224x3xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg1: tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg2: tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg3: tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<7x7x3x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<i64> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
|
||||
%0 = "tf.Const"() {value = dense<1> : tensor<i64>} : () -> tensor<i64>
|
||||
%1 = "tf.Const"() {value = dense<0> : tensor<1x1xi32>} : () -> tensor<1x1xi32>
|
||||
%2 = "tf.Const"() {value = dense<[7, 7, 3, 64]> : tensor<4xi32>} : () -> tensor<4xi32>
|
||||
@ -112,9 +112,9 @@ module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:COMPOSI
|
||||
}
|
||||
return
|
||||
}
|
||||
// CHECK-LABEL: func @_func
|
||||
// CHECK-SAME: [[FUNCINPUT00:.*]]: tensor<2x112x112x12xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg1: tensor<2x1xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg2: tensor<7x7x3x64xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg3: tensor<64x1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg4: tensor<1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg5: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg6: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg7: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg8: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) attributes {sym_visibility = "private"} {
|
||||
func @_func(%arg0: tensor<2x224x224x3xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg1: tensor<2x1xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg2: tensor<7x7x3x64xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg3: tensor<64x1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg4: tensor<1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg5: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg6: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg7: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg8: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) attributes {sym_visibility = "private"} {
|
||||
// CHECK-LABEL: func private @_func
|
||||
// CHECK-SAME: [[FUNCINPUT00:.*]]: tensor<2x112x112x12xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg1: tensor<2x1xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg2: tensor<7x7x3x64xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg3: tensor<64x1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg4: tensor<1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg5: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg6: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg7: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg8: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
|
||||
func private @_func(%arg0: tensor<2x224x224x3xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg1: tensor<2x1xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg2: tensor<7x7x3x64xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg3: tensor<64x1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg4: tensor<1001xf32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg5: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg6: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg7: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, %arg8: tensor<f32> {mhlo.is_same_data_across_replicas, mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) -> (tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}, tensor<f32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
|
||||
%0 = "tf.Const"() {value = dense<2.000000e+00> : tensor<f32>} : () -> tensor<f32>
|
||||
%1 = "tf.Const"() {value = dense<1.000000e+00> : tensor<f32>} : () -> tensor<f32>
|
||||
%2 = "tf.Const"() {value = dense<-1> : tensor<i32>} : () -> tensor<i32>
|
||||
|
@ -118,6 +118,7 @@ cc_library(
|
||||
"@llvm-project//mlir:SCFToStandard",
|
||||
"@llvm-project//mlir:ShapeTransforms",
|
||||
"@llvm-project//mlir:StandardOps",
|
||||
"@llvm-project//mlir:StandardOpsTransforms",
|
||||
"@llvm-project//mlir:AllPassesAndDialects",
|
||||
"@llvm-project//mlir:Support",
|
||||
"@llvm-project//mlir:Transforms",
|
||||
|
@ -19,6 +19,7 @@ limitations under the License.
|
||||
#include "mlir/Dialect/GPU/GPUDialect.h" // from @llvm-project
|
||||
#include "mlir/Dialect/LLVMIR/LLVMDialect.h" // from @llvm-project
|
||||
#include "mlir/Dialect/StandardOps/IR/Ops.h" // from @llvm-project
|
||||
#include "mlir/Dialect/StandardOps/Transforms/Passes.h" // from @llvm-project
|
||||
#include "tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.h"
|
||||
#include "tensorflow/compiler/mlir/tools/kernel_gen/transforms/passes.h"
|
||||
#include "tensorflow/compiler/mlir/tools/kernel_gen/transforms/rewriters.h"
|
||||
@ -41,20 +42,23 @@ class TFKernelToLLVMPass : public TFKernelToLLVMPassBase<TFKernelToLLVMPass> {
|
||||
ModuleOp m = getOperation();
|
||||
|
||||
// Populate type conversions.
|
||||
LLVMTypeConverter type_converter(m.getContext());
|
||||
MLIRContext* ctx = m.getContext();
|
||||
LLVMTypeConverter type_converter(ctx);
|
||||
type_converter.addConversion([&](tf_framework::OpKernelContextType type) {
|
||||
return LLVM::LLVMType::getInt8PtrTy(m.getContext());
|
||||
return LLVM::LLVMType::getInt8PtrTy(ctx);
|
||||
});
|
||||
|
||||
// Populate patterns.
|
||||
OwningRewritePatternList patterns;
|
||||
|
||||
populateStdExpandOpsPatterns(ctx, patterns);
|
||||
populateStdToLLVMConversionPatterns(type_converter, patterns);
|
||||
tf_framework::PopulateTFFrameworkToLLVMConversionPatterns(&type_converter,
|
||||
&patterns);
|
||||
populateGpuToLLVMConversionPatterns(type_converter, patterns, "gpu.binary");
|
||||
|
||||
// Set target.
|
||||
ConversionTarget target(getContext());
|
||||
ConversionTarget target(*ctx);
|
||||
target.addLegalDialect<LLVM::LLVMDialect>();
|
||||
target.addIllegalDialect<gpu::GPUDialect, StandardOpsDialect,
|
||||
tf_framework::TFFrameworkDialect>();
|
||||
|
@ -266,8 +266,8 @@ func @main(%arg0: tensor<i32>) -> tensor<i32> {
|
||||
return %0 : tensor<i32>
|
||||
}
|
||||
|
||||
// CHECK: func @callee([[CALLEE_ARG0:%.*]]: tensor<i32>, [[CALLEE_ARG1:%.*]]: !mhlo.token) -> (tensor<i32>, !mhlo.token)
|
||||
func @callee(%arg0: tensor<i32>) -> tensor<i32> attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee([[CALLEE_ARG0:%.*]]: tensor<i32>, [[CALLEE_ARG1:%.*]]: !mhlo.token) -> (tensor<i32>, !mhlo.token)
|
||||
func private @callee(%arg0: tensor<i32>) -> tensor<i32> {
|
||||
// CHECK-NOT: "mhlo.create_token"
|
||||
|
||||
// CHECK: [[SEND_ARG0_TOKEN:%.*]] = "mhlo.send"([[CALLEE_ARG0]], [[CALLEE_ARG1]])
|
||||
@ -319,7 +319,7 @@ func @callee(%arg0: tensor<i32>) -> tensor<i32> {
|
||||
return %0 : tensor<i32>
|
||||
}
|
||||
|
||||
// CHECK: func [[CALLEE_CLONE]]([[CALLEE_CLONE_ARG0:%.*]]: tensor<i32>, [[CALLEE_CLONE_ARG1:%.*]]: !mhlo.token) -> (tensor<i32>, !mhlo.token)
|
||||
// CHECK: func private [[CALLEE_CLONE]]([[CALLEE_CLONE_ARG0:%.*]]: tensor<i32>, [[CALLEE_CLONE_ARG1:%.*]]: !mhlo.token) -> (tensor<i32>, !mhlo.token)
|
||||
// CHECK-NOT: "mhlo.create_token"
|
||||
|
||||
// CHECK: [[CLONE_SEND_ARG0_TOKEN:%.*]] = "mhlo.send"([[CALLEE_CLONE_ARG0]], [[CALLEE_CLONE_ARG1]])
|
||||
@ -352,8 +352,8 @@ func @main(%arg0: tensor<i32>) {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: func @callee([[CALLEE_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func @callee() attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee([[CALLEE_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func private @callee() {
|
||||
// CHECK-NOT: "mhlo.create_token"
|
||||
|
||||
// CHECK: [[ZERO:%.*]] = mhlo.constant dense<0>
|
||||
@ -370,8 +370,8 @@ func @callee() attributes {sym_visibility = "private"} {
|
||||
|
||||
// Test only the top level function generates a token.
|
||||
|
||||
// CHECK: func @callee0()
|
||||
func @callee0() attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee0()
|
||||
func private @callee0() {
|
||||
// CHECK: [[INIT_TOKEN:%.*]] = "mhlo.create_token"
|
||||
|
||||
// CHECK: call @callee1([[INIT_TOKEN]])
|
||||
@ -379,8 +379,8 @@ func @callee0() attributes {sym_visibility = "private"} {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: func @callee1([[CALLEE1_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func @callee1() attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee1([[CALLEE1_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func private @callee1() {
|
||||
// CHECK-NOT: "mhlo.create_token"
|
||||
|
||||
// CHECK: [[CALL_2:%.*]] = call @callee2([[CALLEE1_ARG0]])
|
||||
@ -390,8 +390,8 @@ func @callee1() attributes {sym_visibility = "private"} {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: func @callee2([[CALLEE2_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func @callee2() attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee2([[CALLEE2_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func private @callee2() {
|
||||
// CHECK-NOT: "mhlo.create_token"
|
||||
|
||||
// CHECK: [[RECV_TUPLE:%.*]] = "mhlo.recv"([[CALLEE2_ARG0]])
|
||||
@ -430,8 +430,8 @@ func @callee4() {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: func @callee5([[CALLEE5_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func @callee5() attributes {sym_visibility = "private"} {
|
||||
// CHECK: func private @callee5([[CALLEE5_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func private @callee5() {
|
||||
// CHECK-NOT: "mhlo.create_token"
|
||||
|
||||
// CHECK: [[RECV_TUPLE:%.*]] = "mhlo.recv"([[CALLEE5_ARG0]])
|
||||
@ -445,7 +445,7 @@ func @callee5() attributes {sym_visibility = "private"} {
|
||||
return
|
||||
}
|
||||
|
||||
// CHECK: func @callee4{{.+}}([[CALLEE4_ARG0:%.*]]: !mhlo.token) -> !mhlo.token attributes {sym_visibility = "private"}
|
||||
// CHECK: func private @callee4{{.+}}([[CALLEE4_ARG0:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
// CHECK-NOT: "mhlo.create_token"
|
||||
// CHECK: [[CALL_5:%.*]] = call @callee5([[CALLEE4_ARG0]])
|
||||
// CHECK: return [[CALL_5]]
|
||||
@ -784,9 +784,9 @@ func @if_function_call(%arg0: tensor<i1>, %arg1: tensor<f32>) -> tensor<f32> {
|
||||
return %0 : tensor<f32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @callee
|
||||
// CHECK-LABEL: func private @callee
|
||||
// CHECK-SAME: ([[CALLEE_ARG0:%.*]]: tensor<f32>, [[CALLEE_ARG1:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
func @callee(%arg0: tensor<f32>) attributes {sym_visibility = "private"} {
|
||||
func private @callee(%arg0: tensor<f32>) {
|
||||
// CHECK: [[SEND_TOKEN:%.*]] = "mhlo.send"
|
||||
"tf.XlaSendToHost"(%arg0) {key = "send_key"} : (tensor<f32>) -> ()
|
||||
|
||||
@ -1068,7 +1068,7 @@ func @unsupported_ancestor(%arg0: tensor<?x?xf32>, %arg1: tensor<f32>) {
|
||||
return
|
||||
}
|
||||
|
||||
func @callee() attributes {sym_visibility = "private"} {
|
||||
func private @callee() {
|
||||
"tf._XlaHostComputeMlir"() {recv_key = "host_compute_channel_recv", send_key = "host_compute_channel_send", tpu_core = 0 : i64} : () -> ()
|
||||
return
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// RUN: tf-mlir-translate -hlo-text-to-mlir-hlo %s -o - | FILECHECK_OPTS="" FileCheck %s -DPRIVATE="attributes {sym_visibility = \"private\"}"
|
||||
// RUN: tf-mlir-translate -hlo-text-to-mlir-hlo %s -o - | FILECHECK_OPTS="" FileCheck %s
|
||||
|
||||
HloModule main
|
||||
|
||||
@ -7,8 +7,7 @@ ENTRY %dummy_main (Arg_0.1: f32[]) -> f32[] {
|
||||
ROOT %Arg_0.1 = f32[] parameter(0)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_simple
|
||||
// CHECK-SAME: [[PRIVATE]]
|
||||
// CHECK-LABEL: func private @test_simple
|
||||
%test_simple (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[] {
|
||||
%Arg_0.1 = f32[4]{0} parameter(0)
|
||||
%Arg_1.2 = f32[4]{0} parameter(1)
|
||||
@ -21,8 +20,8 @@ ENTRY %dummy_main (Arg_0.1: f32[]) -> f32[] {
|
||||
ROOT %dot.4 = f32[] dot(f32[4]{0} %add.42, f32[4]{0} %Arg_1.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_after_all
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: !mhlo.token, [[VAL_1:%.*]]: !mhlo.token) -> !mhlo.token [[PRIVATE]]
|
||||
// CHECK-LABEL: func private @test_after_all
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: !mhlo.token, [[VAL_1:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
%test_after_all (token0: token[], token1: token[] ) -> token[] {
|
||||
token0 = token[] parameter(0)
|
||||
token1 = token[] parameter(1)
|
||||
@ -37,7 +36,7 @@ add {
|
||||
ROOT add = f32[] add(lhs, rhs)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_all_reduce
|
||||
// CHECK-LABEL: func private @test_all_reduce
|
||||
// CHECK-SAME: ([[INPUT:%.*]]: tensor<8xf32>)
|
||||
%test_all_reduce {
|
||||
input = f32[8] parameter(0)
|
||||
@ -52,7 +51,7 @@ add {
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: func @test_and
|
||||
// CHECK-LABEL: func private @test_and
|
||||
%test_and (Arg_0.1: pred[4], Arg_1.2: pred[4]) -> pred[4] {
|
||||
%Arg_0.1 = pred[4] parameter(0)
|
||||
%Arg_1.2 = pred[4] parameter(1)
|
||||
@ -61,7 +60,7 @@ add {
|
||||
ROOT %and.3 = pred[4] and(pred[4] %Arg_0.1, pred[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_atan2
|
||||
// CHECK-LABEL: func private @test_atan2
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: tensor<4xi32>, [[VAL_1:%.*]]: tensor<4xi32>) -> tensor<4xi32>
|
||||
%test_atan2 (Arg_0.1: s32[4], Arg_1.2: s32[4]) -> s32[4] {
|
||||
%Arg_0.1 = s32[4] parameter(0)
|
||||
@ -71,7 +70,7 @@ add {
|
||||
ROOT %atan2 = s32[4] atan2(s32[4] %Arg_0.1, s32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_broadcast_in_dim
|
||||
// CHECK-LABEL: func private @test_broadcast_in_dim
|
||||
%test_broadcast_in_dim {
|
||||
%Arg_0.1 = f32[1, 2] parameter(0)
|
||||
|
||||
@ -82,7 +81,7 @@ add {
|
||||
ROOT broadcast.4 = f32[3,1,2] broadcast(%Arg_0.1), dimensions={1, 2}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_batch_norm_grad
|
||||
// CHECK-LABEL: func private @test_batch_norm_grad
|
||||
%test_batch_norm_grad (input: f32[2,2,2,2], scale: f32[2], mean: f32[2], variance: f32[2], grad_output: f32[2,2,2,2]) -> (f32[2,2,2,2], f32[2], f32[2]) {
|
||||
%input = f32[2,2,2,2] parameter(0)
|
||||
%scale = f32[2] parameter(1)
|
||||
@ -96,20 +95,20 @@ add {
|
||||
ROOT %batch-norm-grad = (f32[2,2,2,2], f32[2], f32[2]) batch-norm-grad(f32[2,2,2,2] %input, f32[2] %scale, f32[2] %mean, f32[2] %variance, f32[2,2,2,2] %grad_output), epsilon=0.001, feature_index=1
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @call(%arg0: tensor<i64>) -> tensor<i64>
|
||||
// CHECK-LABEL: func private @call(%arg0: tensor<i64>) -> tensor<i64>
|
||||
%call (arg_1: s64[]) -> s64[] {
|
||||
%arg_1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
ROOT %compare.2 = s64[] add(%arg_1, %arg_1), metadata={op_type="Less" op_name="Less"}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_call
|
||||
// CHECK-LABEL: func private @test_call
|
||||
%test_call (arg0.1: s64[]) -> s64[] {
|
||||
%arg0.1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
// CHECK-NEXT: call @call(%arg0) : (tensor<i64>) -> tensor<i64>
|
||||
ROOT %call.2 = s64[] call(%arg0.1), to_apply=%call
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_cholesky
|
||||
// CHECK-LABEL: func private @test_cholesky
|
||||
// CHECK-SAME: ([[ARG:%.*]]: tensor<1x291x291xf32>) -> tensor<1x291x291xf32>
|
||||
%test_cholesky (a: f32[1,291,291]) -> f32[1,291,291] {
|
||||
%a = f32[1,291,291] parameter(0)
|
||||
@ -118,7 +117,7 @@ add {
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: func @test_clamp(
|
||||
// CHECK-LABEL: func private @test_clamp(
|
||||
%test_clamp (Arg_0.1: f32[], Arg_1.2: f32[4], Arg_1.3: f32[]) -> f32[4] {
|
||||
%Arg_0.1 = f32[] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -128,7 +127,7 @@ add {
|
||||
ROOT %clamp.3 = f32[4] clamp(f32[] %Arg_0.1, f32[4] %Arg_1.2, f32[] %Arg_2.3)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_collective_permute
|
||||
// CHECK-LABEL: func private @test_collective_permute
|
||||
// CHECK-SAME: ([[ARG:%.*]]: tensor<128x32xf32>) -> tensor<128x32xf32>
|
||||
%test_collective_permute (input: f32[128,32]) -> f32[128,32] {
|
||||
%input = f32[128,32]{1,0} parameter(0)
|
||||
@ -137,7 +136,7 @@ add {
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: func @test_compare(%arg0: tensor<3xf32>, %arg1: tensor<3xf32>, %arg2: tensor<3xf32>) -> tensor<3xi1>
|
||||
// CHECK-LABEL: func private @test_compare(%arg0: tensor<3xf32>, %arg1: tensor<3xf32>, %arg2: tensor<3xf32>) -> tensor<3xi1>
|
||||
%test_compare (Arg_0.1: f32[3], Arg_1.2: f32[3], Arg_2.3: f32[3]) -> pred[3] {
|
||||
%Arg_0.1 = f32[3] parameter(0)
|
||||
%Arg_1.2 = f32[3] parameter(1)
|
||||
@ -154,7 +153,7 @@ add {
|
||||
ROOT %compare.6 = pred[3] compare(Arg_0.1, Arg_2.3), direction=GT
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_complex
|
||||
// CHECK-LABEL: func private @test_complex
|
||||
%test_complex (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> c64[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -163,7 +162,7 @@ add {
|
||||
ROOT %complex.3 = c64[4] complex(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_concat(%arg0: tensor<4x1xf32>, %arg1: tensor<4x2xf32>) -> tensor<4x3xf32>
|
||||
// CHECK-LABEL: func private @test_concat(%arg0: tensor<4x1xf32>, %arg1: tensor<4x2xf32>) -> tensor<4x3xf32>
|
||||
%test_concat (Arg_0.1: f32[4, 1], Arg_1.2: f32[4, 2]) -> f32[4, 3] {
|
||||
%Arg_0.1 = f32[4, 1] parameter(0)
|
||||
%Arg_1.2 = f32[4, 2] parameter(1)
|
||||
@ -172,7 +171,7 @@ add {
|
||||
ROOT %concatenate.3 = f32[4, 3] concatenate(f32[4, 1] %Arg_0.1, f32[4, 2] %Arg_1.2), dimensions={1}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_constant
|
||||
// CHECK-LABEL: func private @test_constant
|
||||
%test_constant {
|
||||
|
||||
// Scalar/0D tensor constant
|
||||
@ -202,8 +201,8 @@ add {
|
||||
|
||||
// TODO(b/129422361) Potentially update when copy, reshape, and conv have actual
|
||||
// implementations with attributes, etc.
|
||||
// CHECK-LABEL: func @test_conv(
|
||||
// CHECK-SAME: %[[VAL_0:.*]]: tensor<256x32x32x6xf32>) -> tuple<tensor<256x30x30x16xf32>> attributes {sym_visibility = "private"} {
|
||||
// CHECK-LABEL: func private @test_conv(
|
||||
// CHECK-SAME: %[[VAL_0:.*]]: tensor<256x32x32x6xf32>) -> tuple<tensor<256x30x30x16xf32>> {
|
||||
%test_conv {
|
||||
%arg0.1 = f32[256,32,32,6]{3,2,1,0} parameter(0), metadata={op_name="HLO_Args"}
|
||||
|
||||
@ -250,7 +249,7 @@ add {
|
||||
}
|
||||
|
||||
// Test for padding attribute shape in convolution
|
||||
// CHECK-LABEL: func @test_convolve1D_padding
|
||||
// CHECK-LABEL: func private @test_convolve1D_padding
|
||||
%test_convolve1D_padding (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,5,1] {
|
||||
%input = f32[1,2,1] parameter(0)
|
||||
%filter = f32[1,1,1] parameter(1)
|
||||
@ -259,7 +258,7 @@ add {
|
||||
ROOT %convolution = f32[1,5,1] convolution(f32[1,2,1] %input, f32[1,1,1] %filter), feature_group_count=1, dim_labels=b0f_0io->b0f, window={pad=1_2 size=1}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_convert(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf64>
|
||||
// CHECK-LABEL: func private @test_convert(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf64>
|
||||
%test_convert (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f64[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -274,7 +273,7 @@ add {
|
||||
ROOT %add.5 = f64[4] add(f64[4] %convert.3, f64[4] %convert.4)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_cosine(%arg0: tensor<1x16x16x3xf32>) -> tensor<1x16x16x3xf32>
|
||||
// CHECK-LABEL: func private @test_cosine(%arg0: tensor<1x16x16x3xf32>) -> tensor<1x16x16x3xf32>
|
||||
%test_cosine (arg0.1: f32[1,16,16,3]) -> f32[1,16,16,3] {
|
||||
%arg0.1 = f32[1,16,16,3]{3,2,1,0} parameter(0), metadata={op_name="HLO_Args"}
|
||||
|
||||
@ -282,7 +281,7 @@ add {
|
||||
ROOT %cosine.3 = f32[1,16,16,3]{3,2,1,0} cosine(f32[1,16,16,3]{3,2,1,0} %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_custom_call
|
||||
// CHECK-LABEL: func private @test_custom_call
|
||||
// CHECK-SAME: [[ARG_0:%.*]]: tensor<2x3xf32>, [[ARG_1:%.*]]: tensor<5x5xf32>) -> tensor<1x2x3xf32>
|
||||
%test_custom_call (arg1: f32[2,3], arg2: f32[5,5]) -> f32[1,2,3] {
|
||||
%arg1 = f32[2,3] parameter(0)
|
||||
@ -291,7 +290,7 @@ add {
|
||||
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[2,3] %arg1, f32[5,5] %arg2), custom_call_target="foo", backend_config="bar", custom_call_has_side_effect=true
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_div(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_div(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
%test_div (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -300,7 +299,7 @@ add {
|
||||
ROOT %divide.3 = f32[4] divide(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_dot(%arg0: tensor<1x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<f32>
|
||||
// CHECK-LABEL: func private @test_dot(%arg0: tensor<1x4xf32>, %arg1: tensor<4x1xf32>) -> tensor<f32>
|
||||
%test_dot (Arg_0.1: f32[1, 4], Arg_1.2: f32[4, 1]) -> f32[] {
|
||||
%Arg_0.1 = f32[1, 4] parameter(0)
|
||||
%Arg_1.2 = f32[4, 1] parameter(1)
|
||||
@ -340,7 +339,7 @@ add {
|
||||
ROOT %dot.6 = f32[] dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={0}, rhs_contracting_dims={1}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_dynamic_slice
|
||||
// CHECK-LABEL: func private @test_dynamic_slice
|
||||
// CHECK-SAME: [[OPERAND:%.*]]: tensor<2x2x258xi32>, [[START_IDX_1:%.*]]: tensor<i32>, [[START_IDX_2:%.*]]: tensor<i32>, [[START_IDX_3:%.*]]: tensor<i32>
|
||||
%test_dynamic_slice (operand: s32[2,2,258], start_indices: s32[3]) -> s32[1,1,32] {
|
||||
%operand = s32[2,2,258] parameter(0)
|
||||
@ -352,7 +351,7 @@ add {
|
||||
ROOT %dynamic-slice = s32[1,1,32] dynamic-slice(s32[2,2,258] %operand, s32[] %start_idx_1, s32[] %start_idx_2, s32[] %start_idx_3), dynamic_slice_sizes={1,1,32}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_dynamic_update_slice_1(%arg0: tensor<4x4xf32>, %arg1: tensor<1x4xf32>, %arg2: tensor<i32>, %arg3: tensor<i32>) -> tensor<4x4xf32>
|
||||
// CHECK-LABEL: func private @test_dynamic_update_slice_1(%arg0: tensor<4x4xf32>, %arg1: tensor<1x4xf32>, %arg2: tensor<i32>, %arg3: tensor<i32>) -> tensor<4x4xf32>
|
||||
%test_dynamic_update_slice_1 (Arg_0.1: f32[4, 4], Arg_1.2: f32[1, 4], Arg_2.3: f32[], Arg_3.4: f32[]) -> f32[4, 4] {
|
||||
%Arg_0.1 = f32[4, 4] parameter(0)
|
||||
%Arg_1.2 = f32[1, 4] parameter(1)
|
||||
@ -363,7 +362,7 @@ add {
|
||||
ROOT %dynamic-update-slice.5 = f32[4, 4] dynamic-update-slice(%Arg_0.1, %Arg_1.2, %Arg_2.3, %Arg_3.4)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_dynamic_update_slice_2(%arg0: tensor<4xf32>, %arg1: tensor<2xf32>, %arg2: tensor<i32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_dynamic_update_slice_2(%arg0: tensor<4xf32>, %arg1: tensor<2xf32>, %arg2: tensor<i32>) -> tensor<4xf32>
|
||||
%test_dynamic_update_slice_2 (Arg_0.1: f32[4], Arg_1.2: f32[2], Arg_2.3: f32[]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[2] parameter(1)
|
||||
@ -373,7 +372,7 @@ add {
|
||||
ROOT %dynamic-update-slice.5 = f32[4] dynamic-update-slice(%Arg_0.1, %Arg_1.2, %Arg_2.3)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_exponential(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
// CHECK-LABEL: func private @test_exponential(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
%test_exponential (arg0.1: f32[16]) -> f32[16] {
|
||||
%arg0.1 = f32[16] parameter(0)
|
||||
|
||||
@ -381,7 +380,7 @@ add {
|
||||
ROOT %exp.2 = f32[16] exponential(f32[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_expm1(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
// CHECK-LABEL: func private @test_expm1(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
%test_expm1 (arg0.1: f32[16]) -> f32[16] {
|
||||
%arg0.1 = f32[16] parameter(0)
|
||||
|
||||
@ -389,14 +388,14 @@ add {
|
||||
ROOT %expm1.2 = f32[16] exponential-minus-one(f32[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_fft(%arg0: tensor<3x9xf32>) -> tensor<3x5xcomplex<f32>>
|
||||
// CHECK-LABEL: func private @test_fft(%arg0: tensor<3x9xf32>) -> tensor<3x5xcomplex<f32>>
|
||||
%test_fft {
|
||||
%arg0.1 = f32[3,9]{1,0} parameter(0), parameter_replication={false}, metadata={op_name="XLA_Args"}
|
||||
// CHECK: "mhlo.fft"(%arg0) {fft_length = dense<9> : tensor<1xi64>, fft_type = "RFFT"
|
||||
ROOT %fft.2 = c64[3,5]{1,0} fft(%arg0.1), fft_type=RFFT, fft_length={9}, metadata={op_type="RFFT" op_name="rfft"}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_floor(
|
||||
// CHECK-LABEL: func private @test_floor(
|
||||
// CHECK-SAME: [[A0:%.+]]: tensor<16xf32>) -> tensor<16xf32>
|
||||
%test_floor (arg0.1: f32[16]) -> f32[16] {
|
||||
%arg0.1 = f32[16] parameter(0)
|
||||
@ -405,7 +404,7 @@ add {
|
||||
ROOT %floor.2 = f32[16] floor(f32[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_gather(
|
||||
// CHECK-LABEL: func private @test_gather(
|
||||
// CHECK-SAME: [[ARG0:%.+]]: tensor<200x100x300xf32>, [[ARG1:%.+]]: tensor<10x2xi32>) -> tensor<10x300xf32>
|
||||
%test_gather (arg.0: f32[200,100,300], arg.1: s32[10,2]) -> f32[10,300] {
|
||||
%arg.0 = f32[200,100,300] parameter(0)
|
||||
@ -427,7 +426,7 @@ add {
|
||||
slice_sizes={1,1,300}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_get_dimension_size
|
||||
// CHECK-LABEL: func private @test_get_dimension_size
|
||||
// CHECK-SAME: ([[ARG:%.*]]: tensor<4x2xf32>)
|
||||
%test_get_dimension_size (Arg_0.1: f32[4,2]) -> s32[] {
|
||||
%Arg_0.1 = f32[4,2] parameter(0)
|
||||
@ -435,7 +434,7 @@ add {
|
||||
ROOT %get-dimension-size.2 = s32[] get-dimension-size(f32[4,2] %Arg_0.1), dimensions={1}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_imag
|
||||
// CHECK-LABEL: func private @test_imag
|
||||
%test_imag (Arg_0.1: c64[4]) -> f32[4] {
|
||||
%Arg_0.1 = c64[4] parameter(0)
|
||||
|
||||
@ -443,7 +442,7 @@ add {
|
||||
ROOT %imag.3 = f32[4] imag(c64[4] %Arg_0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_infeed
|
||||
// CHECK-LABEL: func private @test_infeed
|
||||
// CHECK-SAME: ([[TOKEN:%.*]]: !mhlo.token) -> tuple<tensor<3xi32>, !mhlo.token>
|
||||
%test_infeed (token0: token[]) -> (s32[3], token[]) {
|
||||
%token0 = token[] parameter(0)
|
||||
@ -453,19 +452,19 @@ add {
|
||||
}
|
||||
|
||||
|
||||
// CHECK-LABEL: func @test_iota_1() -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_iota_1() -> tensor<4xf32>
|
||||
%test_iota_1 () -> f32[4] {
|
||||
// CHECK-NEXT: "mhlo.iota"() {iota_dimension = 0 : i64} : () -> tensor<4xf32>
|
||||
ROOT %iota.0 = f32[4] iota(), iota_dimension=0
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_iota_2() -> tensor<4x5xf32>
|
||||
// CHECK-LABEL: func private @test_iota_2() -> tensor<4x5xf32>
|
||||
%test_iota_2 () -> f32[4, 5] {
|
||||
// CHECK-NEXT: "mhlo.iota"() {iota_dimension = 1 : i64} : () -> tensor<4x5xf32>
|
||||
ROOT %iota.0 = f32[4, 5] iota(), iota_dimension=1
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_log(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
// CHECK-LABEL: func private @test_log(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
%test_log (arg0.1: f32[16]) -> f32[16] {
|
||||
%arg0.1 = f32[16] parameter(0)
|
||||
|
||||
@ -473,7 +472,7 @@ add {
|
||||
ROOT %log.2 = f32[16] log(f32[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_log1p(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
// CHECK-LABEL: func private @test_log1p(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
%test_log1p (arg0.1: f32[16]) -> f32[16] {
|
||||
%arg0.1 = f32[16] parameter(0)
|
||||
|
||||
@ -488,7 +487,7 @@ add {
|
||||
ROOT add = f32[] add(lhs, rhs)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_map
|
||||
// CHECK-LABEL: func private @test_map
|
||||
// CHECK-SAME: [[ARG_0:%.*]]: tensor<4xf32>, [[ARG_1:%.*]]: tensor<4xf32>) -> tensor<4xf32>
|
||||
%test_map {
|
||||
param0 = f32[4]{0} parameter(0)
|
||||
@ -503,7 +502,7 @@ add {
|
||||
|
||||
|
||||
|
||||
// CHECK-LABEL: func @test_maximum(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_maximum(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
%test_maximum (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -512,7 +511,7 @@ add {
|
||||
ROOT %maximum.3 = f32[4] maximum(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_minimum(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_minimum(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
%test_minimum (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -521,7 +520,7 @@ add {
|
||||
ROOT %minimum.3 = f32[4] minimum(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_multiply(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_multiply(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
%test_multiply (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -530,7 +529,7 @@ add {
|
||||
ROOT %multiply.3 = f32[4] multiply(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_negate(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
// CHECK-LABEL: func private @test_negate(%arg0: tensor<16xf32>) -> tensor<16xf32>
|
||||
%test_negate (arg0.1: f32[16]) -> f32[16] {
|
||||
%arg0.1 = f32[16] parameter(0)
|
||||
|
||||
@ -538,7 +537,7 @@ add {
|
||||
ROOT %negate.2 = f32[16] negate(f32[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_not(%arg0: tensor<16xi1>) -> tensor<16xi1>
|
||||
// CHECK-LABEL: func private @test_not(%arg0: tensor<16xi1>) -> tensor<16xi1>
|
||||
%test_not (arg0.1: pred[16]) -> pred[16] {
|
||||
%arg0.1 = pred[16] parameter(0)
|
||||
|
||||
@ -546,7 +545,7 @@ add {
|
||||
ROOT %not.2 = pred[16] not(pred[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_or
|
||||
// CHECK-LABEL: func private @test_or
|
||||
%test_or (Arg_0.1: pred[4], Arg_1.2: pred[4]) -> pred[4] {
|
||||
%Arg_0.1 = pred[4] parameter(0)
|
||||
%Arg_1.2 = pred[4] parameter(1)
|
||||
@ -555,7 +554,7 @@ add {
|
||||
ROOT %or.3 = pred[4] or(pred[4] %Arg_0.1, pred[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_outfeed
|
||||
// CHECK-LABEL: func private @test_outfeed
|
||||
// CHECK-SAME: ([[DATA:%.*]]: tensor<3xi32>, [[TOKEN:%.*]]: !mhlo.token) -> !mhlo.token
|
||||
%test_outfeed (Arg_0.1: s32[3], Arg_1.2: token[]) -> token[] {
|
||||
%Arg_0.1 = s32[3] parameter(0)
|
||||
@ -565,7 +564,7 @@ add {
|
||||
ROOT %outfeed.3 = token[] outfeed(s32[3] %Arg_0.1, token[] %Arg_1.2), outfeed_config="foobar"
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_pad(%arg0: tensor<4xf32>, %arg1: tensor<f32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_pad(%arg0: tensor<4xf32>, %arg1: tensor<f32>) -> tensor<4xf32>
|
||||
%test_pad (Arg_0.1: f32[4], Arg_1.2: f32[]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[] parameter(1)
|
||||
@ -574,7 +573,7 @@ add {
|
||||
ROOT %pad.3 = f32[4] pad(%Arg_0.1, %Arg_1.2), padding=0_0_0
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_pad_edge(%arg0: tensor<4x4x4xf32>, %arg1: tensor<f32>) -> tensor<7x11x15xf32>
|
||||
// CHECK-LABEL: func private @test_pad_edge(%arg0: tensor<4x4x4xf32>, %arg1: tensor<f32>) -> tensor<7x11x15xf32>
|
||||
%test_pad_edge (Arg_0.1: f32[4, 4, 4], Arg_1.2: f32[]) -> f32[7, 11, 15] {
|
||||
%Arg_0.1 = f32[4, 4, 4] parameter(0)
|
||||
%Arg_1.2 = f32[] parameter(1)
|
||||
@ -583,7 +582,7 @@ add {
|
||||
ROOT %pad.3 = f32[7, 11, 15] pad(%Arg_0.1, %Arg_1.2), padding=1_2x3_4x5_6
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_pad_interior(%arg0: tensor<4xf32>, %arg1: tensor<f32>) -> tensor<10xf32>
|
||||
// CHECK-LABEL: func private @test_pad_interior(%arg0: tensor<4xf32>, %arg1: tensor<f32>) -> tensor<10xf32>
|
||||
%test_pad_interior (Arg_0.1: f32[4], Arg_1.2: f32[]) -> f32[10] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[] parameter(1)
|
||||
@ -592,7 +591,7 @@ add {
|
||||
ROOT %pad.3 = f32[10] pad(%Arg_0.1, %Arg_1.2), padding=0_0_2
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_popcnt(%arg0: tensor<16xi32>) -> tensor<16xi32>
|
||||
// CHECK-LABEL: func private @test_popcnt(%arg0: tensor<16xi32>) -> tensor<16xi32>
|
||||
%test_popcnt (arg0.1: s32[16]) -> s32[16] {
|
||||
%arg0.1 = s32[16] parameter(0)
|
||||
|
||||
@ -600,7 +599,7 @@ add {
|
||||
ROOT %popcnt.2 = s32[16] popcnt(s32[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_pow(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_pow(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32>
|
||||
%test_pow (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -609,7 +608,7 @@ add {
|
||||
ROOT %power.3 = f32[4] power(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_rng_normal
|
||||
// CHECK-LABEL: func private @test_rng_normal
|
||||
// CHECK-SAME: ([[ARG0:%.*]]: tensor<f32>, [[ARG1:%.*]]: tensor<f32>) -> tensor<2x3x5xf32>
|
||||
%test_rng_normal (Arg_0.1: f32[], Arg_1.2: f32[]) -> f32[2,3,5] {
|
||||
%Arg_0.1 = f32[] parameter(0)
|
||||
@ -619,7 +618,7 @@ add {
|
||||
ROOT %rng.4 = f32[2,3,5] rng(f32[] %Arg_0.1, f32[] %Arg_1.2), distribution=rng_normal
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_rng_uniform
|
||||
// CHECK-LABEL: func private @test_rng_uniform
|
||||
// CHECK-SAME: ([[ARG0:%.*]]: tensor<f32>, [[ARG1:%.*]]: tensor<f32>) -> tensor<2x3x5xf32>
|
||||
%test_rng_uniform (Arg_0.1: f32[], Arg_1.2: f32[]) -> f32[2,3,5] {
|
||||
%Arg_0.1 = f32[] parameter(0)
|
||||
@ -629,7 +628,7 @@ add {
|
||||
ROOT %rng.4 = f32[2,3,5] rng(f32[] %Arg_0.1, f32[] %Arg_1.2), distribution=rng_uniform
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_real
|
||||
// CHECK-LABEL: func private @test_real
|
||||
%test_real (Arg_0.1: c64[4]) -> f32[4] {
|
||||
%Arg_0.1 = c64[4] parameter(0)
|
||||
|
||||
@ -660,7 +659,7 @@ add {
|
||||
ROOT %add.3 = f32[] add(f32[] %Arg_0.1, f32[] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_reduce
|
||||
// CHECK-LABEL: func private @test_reduce
|
||||
// CHECK-SAME: ([[ARG0:%.*]]: tensor<4x4xf32>, [[ARG1:%.*]]: tensor<4xf32>, [[ARG2:%.*]]: tensor<f32>) -> tuple<tuple<tensor<f32>, tensor<f32>>, tensor<f32>>
|
||||
%test_reduce (Arg_0.1: f32[4, 4], Arg_1.2: f32[4], Arg_2.3: f32[]) -> ((f32[], f32[]), f32[]) {
|
||||
%Arg_0.1 = f32[4, 4] parameter(0)
|
||||
@ -694,7 +693,7 @@ add {
|
||||
ROOT %tuple.6 = ((f32[], f32[]), f32[]) tuple(%reduce.1, %sub.5)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_reduce_window
|
||||
// CHECK-LABEL: func private @test_reduce_window
|
||||
// CHECK-SAME: ([[ARG0:%.*]]: tensor<2x17x31x7xf32>, [[ARG1:%.*]]: tensor<f32>)
|
||||
%test_reduce_window (Arg_0.1: f32[2,17,31,7], Arg_1.2: f32[]) -> f32[2,5,8,7] {
|
||||
%Arg_0.1 = f32[2,17,31,7] parameter(0)
|
||||
@ -712,7 +711,7 @@ add {
|
||||
ROOT %reduce-window.1 = f32[2,5,8,7] reduce-window(f32[2,17,31,7] %Arg_0.1, f32[] %Arg_1.2), window={size=1x2x2x1 stride=1x4x4x1 pad=0_0x2_0x0_2x0_0 rhs_dilate=1x2x2x1}, to_apply=%reduce_helper.3
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_remainder
|
||||
// CHECK-LABEL: func private @test_remainder
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: tensor<4xf32>, [[VAL_1:%.*]]: tensor<4xf32>)
|
||||
%test_remainder (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
@ -721,7 +720,7 @@ add {
|
||||
ROOT %remainder.3 = f32[4] remainder(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_reverse_1d(%arg0: tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-LABEL: func private @test_reverse_1d(%arg0: tensor<4xf32>) -> tensor<4xf32>
|
||||
%test_reverse_1d (Arg_0.1: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
|
||||
@ -729,7 +728,7 @@ add {
|
||||
ROOT reverse.2 = f32[4] reverse(%Arg_0.1), dimensions={0}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_reverse_2d(%arg0: tensor<4x4xf32>) -> tensor<4x4xf32
|
||||
// CHECK-LABEL: func private @test_reverse_2d(%arg0: tensor<4x4xf32>) -> tensor<4x4xf32
|
||||
%test_reverse_2d (Arg_0.1: f32[4, 4]) -> f32[4, 4] {
|
||||
%Arg_0.1 = f32[4, 4] parameter(0)
|
||||
|
||||
@ -737,7 +736,7 @@ add {
|
||||
ROOT reverse.2 = f32[4, 4] reverse(%Arg_0.1), dimensions={0, 1}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_rsqrt(
|
||||
// CHECK-LABEL: func private @test_rsqrt(
|
||||
// CHECK-SAME: [[ARG0:%.+]]: tensor<16xf32>) -> tensor<16xf32>
|
||||
%test_rsqrt (arg0.1: f32[16]) -> f32[16] {
|
||||
%arg0.1 = f32[16] parameter(0)
|
||||
@ -746,7 +745,7 @@ add {
|
||||
ROOT %rsqrt.2 = f32[16] rsqrt(f32[16] %arg0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_scalar(%arg0: tensor<f32>) -> tensor<f32>
|
||||
// CHECK-LABEL: func private @test_scalar(%arg0: tensor<f32>) -> tensor<f32>
|
||||
%test_scalar (Arg_0.1: f32[]) -> f32[] {
|
||||
// CHECK-NEXT: return %arg0 : tensor<f32>
|
||||
ROOT %Arg_0.1 = f32[] parameter(0)
|
||||
@ -766,7 +765,7 @@ add {
|
||||
ROOT %scatter = f32[200,100,300] scatter(f32[200,100,300] %input_tensor, s64[10,2] %scatter_indices, f32[10,300] %updates), update_window_dims={1}, inserted_window_dims={0,1}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, to_apply=%update_computation
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_scatter
|
||||
// CHECK-LABEL: func private @test_scatter
|
||||
// CHECK-SAME: [[ARG_0:%.*]]: tensor<200x100x300xf32>, [[ARG_1:%.*]]: tensor<10x2xi64>, [[ARG_2:%.*]]: tensor<10x300xf32>) -> tensor<200x100x300xf32>
|
||||
// CHECK: "mhlo.scatter"([[ARG_0]], [[ARG_1]], [[ARG_2]]) ( {
|
||||
// CHECK: ^bb0([[LHS:%.*]]: tensor<f32>, [[RHS:%.*]]: tensor<f32>):
|
||||
@ -783,7 +782,7 @@ add {
|
||||
// CHECK-SAME: unique_indices = false
|
||||
|
||||
|
||||
// CHECK-LABEL: func @test_select(%arg0: tensor<2x3xi1>, %arg1: tensor<2x3xi32>, %arg2: tensor<2x3xi32>) -> tensor<2x3xi32>
|
||||
// CHECK-LABEL: func private @test_select(%arg0: tensor<2x3xi1>, %arg1: tensor<2x3xi32>, %arg2: tensor<2x3xi32>) -> tensor<2x3xi32>
|
||||
%test_select {
|
||||
%Arg_0.1 = pred[2,3] parameter(0)
|
||||
%Arg_1.2 = s32[2,3] parameter(1)
|
||||
@ -806,7 +805,7 @@ add {
|
||||
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_select_and_scatter
|
||||
// CHECK-LABEL: func private @test_select_and_scatter
|
||||
// CHECK-SAME: [[INPUT:%.*]]: tensor<4x5xf32>, [[SOURCE:%.*]]: tensor<2x2xf32>, [[INIT_VAL:%.*]]: tensor<f32>
|
||||
%test_select_and_scatter {
|
||||
%input = f32[4,5] parameter(0)
|
||||
@ -831,7 +830,7 @@ add {
|
||||
// CHECK: return [[RESULT:%.*]] : tensor<4x5xf32>
|
||||
|
||||
|
||||
// CHECK-LABEL: func @test_set_dimension_size
|
||||
// CHECK-LABEL: func private @test_set_dimension_size
|
||||
// CHECK-SAME: ([[ARG:%.*]]: tensor<4x4xf32>, [[SIZE:%.*]]: tensor<i32>)
|
||||
%test_set_dimension_size (Arg_0.1: f32[4,4], Arg_1.2: s32[]) -> f32[4,<=4] {
|
||||
%Arg_0.1 = f32[4,4] parameter(0)
|
||||
@ -840,7 +839,7 @@ add {
|
||||
ROOT %set-dimension-size.2 = f32[4,<=4] set-dimension-size(f32[4,4] %Arg_0.1, s32[] %Arg_1.2), dimensions={1}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_sine(%arg0: tensor<1x16x16x3xf32>) -> tensor<1x16x16x3xf32>
|
||||
// CHECK-LABEL: func private @test_sine(%arg0: tensor<1x16x16x3xf32>) -> tensor<1x16x16x3xf32>
|
||||
%test_sine (arg0.1: f32[1,16,16,3]) -> f32[1,16,16,3] {
|
||||
%arg0.1 = f32[1,16,16,3]{3,2,1,0} parameter(0), metadata={op_name="HLO_Args"}
|
||||
|
||||
@ -859,7 +858,7 @@ add {
|
||||
x = f32[1024]{0} parameter(0)
|
||||
ROOT sorted = f32[1024]{0} sort(x), dimensions={0}, is_stable=true, to_apply=compare
|
||||
}
|
||||
// CHECK-LABEL: func @test_sort
|
||||
// CHECK-LABEL: func private @test_sort
|
||||
// CHECK-SAME: [[ARG:%.*]]: tensor<1024xf32>) -> tensor<1024xf32>
|
||||
// CHECK: "mhlo.sort"([[ARG]]) ( {
|
||||
// CHECK: ^bb0([[ARG0:%.*]]: tensor<f32>, [[ARG1:%.*]]: tensor<f32>):
|
||||
@ -867,7 +866,7 @@ add {
|
||||
// CHECK: "mhlo.return"([[CMP]]) : (tensor<i1>) -> ()
|
||||
// CHECK: }) {dimension = 0 : i64, is_stable = true} : (tensor<1024xf32>) -> tensor<1024xf32>
|
||||
|
||||
// CHECK-LABEL: func @test_subtract
|
||||
// CHECK-LABEL: func private @test_subtract
|
||||
%test_subtract (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -876,7 +875,7 @@ add {
|
||||
ROOT %subtract.3 = f32[4] subtract(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_tanh(%arg0: tensor<1x16x16x3xf32>) -> tensor<1x16x16x3xf32>
|
||||
// CHECK-LABEL: func private @test_tanh(%arg0: tensor<1x16x16x3xf32>) -> tensor<1x16x16x3xf32>
|
||||
%test_tanh (arg0.1: f32[1,16,16,3]) -> f32[1,16,16,3] {
|
||||
%arg0.1 = f32[1,16,16,3]{3,2,1,0} parameter(0), metadata={op_name="HLO_Args"}
|
||||
|
||||
@ -884,7 +883,7 @@ add {
|
||||
ROOT %tanh.3 = f32[1,16,16,3]{3,2,1,0} tanh(f32[1,16,16,3]{3,2,1,0} %arg0.1), metadata={op_type="Tanh" op_name="embedded_inference/tanh_model/Tanh"}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_transpose(%arg0: tensor<1x2x3x4xi32>) -> tensor<2x1x4x3xi32>
|
||||
// CHECK-LABEL: func private @test_transpose(%arg0: tensor<1x2x3x4xi32>) -> tensor<2x1x4x3xi32>
|
||||
%test_transpose {
|
||||
%Arg_0.1 = s32[1,2,3,4] parameter(0)
|
||||
|
||||
@ -892,7 +891,7 @@ add {
|
||||
ROOT %transpose.2 = s32[2,1,4,3] transpose(s32[1,2,3,4] %Arg_0.1), dimensions={1,0,3,2}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_triangular_solve
|
||||
// CHECK-LABEL: func private @test_triangular_solve
|
||||
// CHECK-SAME: ([[ARG_A:%.*]]: tensor<4x4xf32>, [[ARG_B:%.*]]: tensor<4x3xf32>) -> tensor<4x3xf32>
|
||||
%test_triangular_solve (Arg_0.1: f32[4,4], Arg_1.2: f32[4,3]) -> f32[4,3] {
|
||||
%Arg_0.1 = f32[4,4] parameter(0)
|
||||
@ -905,7 +904,7 @@ add {
|
||||
ROOT %triangular-solve.3 = f32[4,3] triangular-solve(f32[4,4] %Arg_0.1, f32[4,3] %Arg_1.2), left_side=true, lower=true, transpose_a=NO_TRANSPOSE, unit_diagonal=true
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_tuple(%arg0: tensor<1xi32>, %arg1: tensor<1x2xf32>) -> tuple<tensor<1xi32>, tensor<1x2xf32>>
|
||||
// CHECK-LABEL: func private @test_tuple(%arg0: tensor<1xi32>, %arg1: tensor<1x2xf32>) -> tuple<tensor<1xi32>, tensor<1x2xf32>>
|
||||
%test_tuple(Arg_0.1: s32[1], Arg_1.2: f32[1, 2]) -> (s32[1], f32[1,2]) {
|
||||
%Arg_0.1 = s32[1] parameter(0)
|
||||
%Arg_1.2 = f32[1, 2] parameter(1)
|
||||
@ -918,19 +917,19 @@ add {
|
||||
}
|
||||
|
||||
// Test while op
|
||||
// CHECK-LABEL: func @cond
|
||||
// CHECK-LABEL: func private @cond
|
||||
%cond (arg_1: s64[]) -> pred[] {
|
||||
%arg_1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
ROOT %compare.2 = pred[] compare(%arg_1, %arg_1), direction=LT, metadata={op_type="Less" op_name="Less"}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @loop
|
||||
// CHECK-LABEL: func private @loop
|
||||
%loop (arg_1: s64[]) -> s64[] {
|
||||
%arg_1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
ROOT %compare.2 = s64[] add(%arg_1, %arg_1), metadata={op_type="Less" op_name="Less"}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_while(%arg0: tensor<i64>) -> tensor<i64>
|
||||
// CHECK-LABEL: func private @test_while(%arg0: tensor<i64>) -> tensor<i64>
|
||||
%test_while (arg0.1: s64[]) -> s64[] {
|
||||
%arg0.1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
// CHECK-NEXT: "mhlo.while"(%arg0) ( {
|
||||
@ -945,7 +944,7 @@ add {
|
||||
ROOT %while.2 = s64[] while(%arg0.1), body=%loop, condition=%cond
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_xor
|
||||
// CHECK-LABEL: func private @test_xor
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: tensor<4xi1>, [[VAL_1:%.*]]: tensor<4xi1>) -> tensor<4xi1>
|
||||
%test_xor (Arg_0.1: pred[4], Arg_1.2: pred[4]) -> pred[4] {
|
||||
%Arg_0.1 = pred[4] parameter(0)
|
||||
@ -955,7 +954,7 @@ add {
|
||||
ROOT %xor.3 = pred[4] xor(pred[4] %Arg_0.1, pred[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_shiftleft
|
||||
// CHECK-LABEL: func private @test_shiftleft
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: tensor<4xi32>, [[VAL_1:%.*]]: tensor<4xi32>) -> tensor<4xi32>
|
||||
%test_shiftleft (Arg_0.1: s32[4], Arg_1.2: s32[4]) -> s32[4] {
|
||||
%Arg_0.1 = s32[4] parameter(0)
|
||||
@ -965,7 +964,7 @@ add {
|
||||
ROOT %shiftleft = s32[4] shift-left(s32[4] %Arg_0.1, s32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_shiftright_arithmetic
|
||||
// CHECK-LABEL: func private @test_shiftright_arithmetic
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: tensor<4xi32>, [[VAL_1:%.*]]: tensor<4xi32>) -> tensor<4xi32>
|
||||
%test_shiftright_arithmetic (Arg_0.1: s32[4], Arg_1.2: s32[4]) -> s32[4] {
|
||||
%Arg_0.1 = s32[4] parameter(0)
|
||||
@ -975,7 +974,7 @@ add {
|
||||
ROOT %shiftright.arithmetic = s32[4] shift-right-arithmetic(s32[4] %Arg_0.1, s32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @test_shiftright_logical
|
||||
// CHECK-LABEL: func private @test_shiftright_logical
|
||||
// CHECK-SAME: ([[VAL_0:%.*]]: tensor<4xi32>, [[VAL_1:%.*]]: tensor<4xi32>) -> tensor<4xi32>
|
||||
%test_shiftright_logical (Arg_0.1: s32[4], Arg_1.2: s32[4]) -> s32[4] {
|
||||
%Arg_0.1 = s32[4] parameter(0)
|
||||
@ -985,7 +984,7 @@ add {
|
||||
ROOT %shiftright.logical = s32[4] shift-right-logical(s32[4] %Arg_0.1, s32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @complex_type
|
||||
// CHECK-LABEL: func private @complex_type
|
||||
// CHECK-SAME: (%[[ARG0:.*]]: tensor<2xcomplex<f32>>, %[[ARG1:.*]]: tensor<2xcomplex<f64>>) -> tuple<tensor<2xf32>, tensor<2xf64>>
|
||||
%complex_type (Arg_0.1: c64[2], Arg_1.2: c128[2]) -> (f32[2], f64[2]) {
|
||||
%Arg_0.1 = c64[2] parameter(0)
|
||||
@ -998,7 +997,7 @@ add {
|
||||
ROOT %tuple.5 = (f32[2], f64[2]) tuple(f32[2] %abs.3, f64[2] %abs.4)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @unsigned_int
|
||||
// CHECK-LABEL: func private @unsigned_int
|
||||
// CHECK-SAME: (%[[ARG0:.*]]: tensor<4xui16>)
|
||||
%unsigned_int(Arg_0.1: u16[4]) -> u16[4] {
|
||||
%Arg_0.1 = u16[4] parameter(0)
|
||||
@ -1007,7 +1006,7 @@ add {
|
||||
ROOT %not.2 = u16[4] not(u16[4] %Arg_0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @rngbitgen
|
||||
// CHECK-LABEL: func private @rngbitgen
|
||||
// CHECK-SAME: (%[[ARG0:.*]]: tensor<3xui64>)
|
||||
%rngbitgen (Arg_0.1: u64[3]) -> (u64[3], u32[2,2]) {
|
||||
%Arg_0.1 = u64[3] parameter(0)
|
||||
@ -1015,7 +1014,7 @@ add {
|
||||
ROOT %rng-bit-generator.2 = (u64[3], u32[2,2]) rng-bit-generator(u64[3] %Arg_0.1), algorithm=rng_philox
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @cbrt
|
||||
// CHECK-LABEL: func private @cbrt
|
||||
// CHECK-SAME: (%[[ARG0:.*]]: tensor<3x4xf32>)
|
||||
%cbrt (Arg_0.1: f32[3,4]) -> f32[3,4] {
|
||||
%Arg_0.1 = f32[3,4] parameter(0)
|
||||
@ -1023,7 +1022,7 @@ add {
|
||||
ROOT %cbrt = f32[3,4] cbrt(f32[3,4] %Arg_0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @bitcast
|
||||
// CHECK-LABEL: func private @bitcast
|
||||
// CHECK-SAME: (%[[ARG0:.*]]: tensor<3x4xf32>) -> tensor<3x4x1xf32>
|
||||
%bitcast (Arg_0.1: f32[3,4]) -> f32[3,4,1] {
|
||||
%Arg_0.1 = f32[3,4] parameter(0)
|
||||
@ -1031,7 +1030,7 @@ add {
|
||||
ROOT %bitcast = f32[3,4,1] bitcast(f32[3,4] %Arg_0.1)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @reduce_precision
|
||||
// CHECK-LABEL: func private @reduce_precision
|
||||
// CHECK-SAME: (%[[ARG0:.*]]: tensor<3x4xf32>)
|
||||
%reduce_precision (Arg_0.1: f32[3,4]) -> f32[3,4] {
|
||||
%Arg_0.1 = f32[3,4] parameter(0)
|
||||
|
@ -68,7 +68,7 @@ class MLIRConcreteFunctionImportTest(test.TestCase):
|
||||
tensor_spec.TensorSpec(None, dtypes.float32))
|
||||
mlir_module = mlir.convert_function(concrete_function)
|
||||
self.assertRegex(mlir_module, r'func @.*caller.*\(')
|
||||
self.assertRegex(mlir_module, r'func @.*callee.*\(')
|
||||
self.assertRegex(mlir_module, r'func private @.*callee.*\(')
|
||||
|
||||
def testImportWithControlRet(self):
|
||||
|
||||
|
@ -686,8 +686,8 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
|
||||
)
|
||||
|
||||
# Check out LLVM and MLIR from llvm-project.
|
||||
LLVM_COMMIT = "f147f59cd377a6be68e5ca5c343eb11df8e7ee6f"
|
||||
LLVM_SHA256 = "22cb626398e60d5bcb75ce61f59ae9df56ffedc75c40525214ff890e3e27e3d2"
|
||||
LLVM_COMMIT = "1cbf8e89b54de939420d53d7a528bec6fbaf0a55"
|
||||
LLVM_SHA256 = "8ec5f5a1330f69ec7b4a0365109a7b6b543df7ca98c02b1c5e13c7de4e58f662"
|
||||
LLVM_URLS = [
|
||||
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
|
||||
|
108
third_party/mlir/BUILD
vendored
108
third_party/mlir/BUILD
vendored
@ -3295,6 +3295,7 @@ cc_library(
|
||||
":StandardOpsTransformsPassIncGen",
|
||||
":StandardToLLVM",
|
||||
":StandardToSPIRVTransforms",
|
||||
":TosaDialect",
|
||||
":Transforms",
|
||||
":TransformsPassIncGen",
|
||||
":VectorOps",
|
||||
@ -3337,6 +3338,7 @@ cc_binary(
|
||||
"@llvm-project//mlir/test:TestPass",
|
||||
"@llvm-project//mlir/test:TestReducer",
|
||||
"@llvm-project//mlir/test:TestSPIRV",
|
||||
"@llvm-project//mlir/test:TestTosaDialect",
|
||||
"@llvm-project//mlir/test:TestTransforms",
|
||||
"@llvm-project//mlir/test:TestTypeDialect",
|
||||
],
|
||||
@ -4234,6 +4236,112 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
gentbl(
|
||||
name = "TosaDialectIncGen",
|
||||
strip_include_prefix = "include",
|
||||
tbl_outs = [
|
||||
(
|
||||
"-gen-op-decls",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaOps.h.inc",
|
||||
),
|
||||
(
|
||||
"-gen-op-defs",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaOps.cpp.inc",
|
||||
),
|
||||
(
|
||||
"-gen-struct-attr-decls",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaStructs.h.inc",
|
||||
),
|
||||
(
|
||||
"-gen-struct-attr-defs",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaStructs.cpp.inc",
|
||||
),
|
||||
(
|
||||
"-gen-dialect-decls",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaOpsDialect.h.inc",
|
||||
),
|
||||
(
|
||||
"-gen-op-doc",
|
||||
"g3doc/Dialects/Tosa/TosaOps.md",
|
||||
),
|
||||
],
|
||||
tblgen = ":mlir-tblgen",
|
||||
td_file = "include/mlir/Dialect/Tosa/IR/TosaOps.td",
|
||||
td_srcs = [
|
||||
":OpBaseTdFiles",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaOpBase.td",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaInterfaces.td",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaTypesBase.td",
|
||||
"include/mlir/Interfaces/SideEffectInterfaces.td",
|
||||
"include/mlir/Interfaces/LoopLikeInterface.td",
|
||||
],
|
||||
)
|
||||
|
||||
gentbl(
|
||||
name = "TosaInterfacesIncGen",
|
||||
strip_include_prefix = "include",
|
||||
tbl_outs = [
|
||||
(
|
||||
"-gen-op-interface-decls",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaInterfaces.h.inc",
|
||||
),
|
||||
(
|
||||
"-gen-op-interface-defs",
|
||||
"include/mlir/Dialect/Tosa/IR/TosaInterfaces.cpp.inc",
|
||||
),
|
||||
],
|
||||
tblgen = ":mlir-tblgen",
|
||||
td_file = "include/mlir/Dialect/Tosa/IR/TosaInterfaces.td",
|
||||
td_srcs = [
|
||||
":OpBaseTdFiles",
|
||||
],
|
||||
)
|
||||
|
||||
gentbl(
|
||||
name = "TosaPassIncGen",
|
||||
strip_include_prefix = "include",
|
||||
tbl_outs = [
|
||||
(
|
||||
"-gen-pass-decls -name TosaOpt",
|
||||
"include/mlir/Dialect/Tosa/Transforms/Passes.h.inc",
|
||||
),
|
||||
],
|
||||
tblgen = ":mlir-tblgen",
|
||||
td_file = "include/mlir/Dialect/Tosa/Transforms/Passes.td",
|
||||
td_srcs = [
|
||||
":PassBaseTdFiles",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "TosaDialect",
|
||||
srcs = glob([
|
||||
"lib/Dialect/Tosa/IR/*.cpp",
|
||||
"lib/Dialect/Tosa/IR/*.h",
|
||||
"lib/Dialect/Tosa/Utils/*.cpp",
|
||||
"lib/Dialect/Tosa/Transforms/*.cpp",
|
||||
]),
|
||||
hdrs = glob([
|
||||
"include/mlir/Dialect/Tosa/IR/*.h",
|
||||
"include/mlir/Dialect/Tosa/Utils/*.h",
|
||||
"include/mlir/Dialect/Tosa/Transforms/*.h",
|
||||
]),
|
||||
includes = ["include"],
|
||||
deps = [
|
||||
":Dialect",
|
||||
":IR",
|
||||
":LoopLikeInterface",
|
||||
":Pass",
|
||||
":QuantOps",
|
||||
":SideEffectInterfaces",
|
||||
":StandardOps",
|
||||
":TosaDialectIncGen",
|
||||
":TosaInterfacesIncGen",
|
||||
":TosaPassIncGen",
|
||||
":TransformUtils",
|
||||
],
|
||||
)
|
||||
|
||||
# To reference all tablegen files here when checking for updates to them.
|
||||
filegroup(
|
||||
name = "TdFiles",
|
||||
|
14
third_party/mlir/test.BUILD
vendored
14
third_party/mlir/test.BUILD
vendored
@ -286,3 +286,17 @@ cc_library(
|
||||
"@llvm-project//mlir:LLVMDialect",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "TestTosaDialect",
|
||||
srcs = glob([
|
||||
"lib/Dialect/Tosa/*.cpp",
|
||||
]),
|
||||
deps = [
|
||||
"@llvm-project//mlir:IR",
|
||||
"@llvm-project//mlir:Pass",
|
||||
"@llvm-project//mlir:StandardOps",
|
||||
"@llvm-project//mlir:TosaDialect",
|
||||
"@llvm-project//mlir:Transforms",
|
||||
],
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user