[NFC] Update TF MLIR tests to use visibility keyword instead of sym_visibility attribute for functions. This is in preparation of https://reviews.llvm.org/D94200

PiperOrigin-RevId: 350786273
Change-Id: Iacac85c76c204459e381e9dfe67ce97f686ca461
This commit is contained in:
Rahul Joshi 2021-01-08 10:10:24 -08:00 committed by TensorFlower Gardener
parent 44de669463
commit 16407535d1
11 changed files with 39 additions and 39 deletions

View File

@ -13,12 +13,12 @@ func @main(%arg0: tensor<f32>, %arg1: tensor<f32>) -> (tensor<f32>) {
return %3 : tensor<f32> return %3 : tensor<f32>
} }
func @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} { func private @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
%0 = "tf.Add"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %0 = "tf.Add"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
return %0 : tensor<*xf32> return %0 : tensor<*xf32>
} }
func @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} { func private @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
%0 = "tf.Sub"(%arg0, %arg1) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %0 = "tf.Sub"(%arg0, %arg1) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
return %0 : tensor<*xf32> return %0 : tensor<*xf32>
} }
@ -40,23 +40,23 @@ func @main(%arg0: tensor<f32>, %arg1: tensor<f32>) -> (tensor<f32>) {
return %3 : tensor<f32> return %3 : tensor<f32>
} }
func @addormul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} { func private @addormul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
%0 = constant dense<false> : tensor<i1> %0 = constant dense<false> : tensor<i1>
%1 = "tf.If"(%0, %arg1, %arg0) {else_branch = @mul, then_branch = @add, is_stateless = true} : (tensor<i1>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %1 = "tf.If"(%0, %arg1, %arg0) {else_branch = @mul, then_branch = @add, is_stateless = true} : (tensor<i1>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
return %1 : tensor<*xf32> return %1 : tensor<*xf32>
} }
func @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} { func private @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
%0 = "tf.Sub"(%arg0, %arg1) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %0 = "tf.Sub"(%arg0, %arg1) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
return %0 : tensor<*xf32> return %0 : tensor<*xf32>
} }
func @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} { func private @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
%0 = "tf.Add"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %0 = "tf.Add"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
return %0 : tensor<*xf32> return %0 : tensor<*xf32>
} }
func @mul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} { func private @mul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
%0 = "tf.Multiply"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> %0 = "tf.Multiply"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
return %0 : tensor<*xf32> return %0 : tensor<*xf32>
} }
@ -82,12 +82,12 @@ func @main(%arg0: tensor<3x15x14x3xf32>) -> tensor<3x15x14x8xf32>
return %4 : tensor<3x15x14x8xf32> return %4 : tensor<3x15x14x8xf32>
} }
func @_functionalize_if_else_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} { func private @_functionalize_if_else_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%cst = constant dense<false> : tensor<i1> %cst = constant dense<false> : tensor<i1>
return %cst : tensor<i1> return %cst : tensor<i1>
} }
func @_functionalize_if_then_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} { func private @_functionalize_if_then_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%cst = constant dense<true> : tensor<i1> %cst = constant dense<true> : tensor<i1>
return %cst : tensor<i1> return %cst : tensor<i1>
} }
@ -115,12 +115,12 @@ func @main(%arg0: tensor<3x15x14x3xf32>) -> tensor<3x15x14x8xf32>
return %4 : tensor<3x15x14x8xf32> return %4 : tensor<3x15x14x8xf32>
} }
func @_functionalize_if_else_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} { func private @_functionalize_if_else_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%cst = constant dense<false> : tensor<i1> %cst = constant dense<false> : tensor<i1>
return %cst : tensor<i1> return %cst : tensor<i1>
} }
func @_functionalize_if_then_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} { func private @_functionalize_if_then_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%0 = "tf.blah"() : () -> tensor<i1> %0 = "tf.blah"() : () -> tensor<i1>
return %0 : tensor<i1> return %0 : tensor<i1>
} }
@ -151,12 +151,12 @@ func @main(%arg0: tensor<3x15x14x3xf32>) -> tensor<3x15x14x8xf32>
return %4 : tensor<3x15x14x8xf32> return %4 : tensor<3x15x14x8xf32>
} }
func @_functionalize_if_else_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} { func private @_functionalize_if_else_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%cst = constant dense<false> : tensor<i1> %cst = constant dense<false> : tensor<i1>
return %cst : tensor<i1> return %cst : tensor<i1>
} }
func @_functionalize_if_then_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} { func private @_functionalize_if_then_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> {
%0 = "tf.blah"() : () -> tensor<i1> %0 = "tf.blah"() : () -> tensor<i1>
return %0 : tensor<i1> return %0 : tensor<i1>
} }

View File

@ -35,11 +35,11 @@ module {
} }
// CHECK-NOT: _tpu_v1_compat_outlined // CHECK-NOT: _tpu_v1_compat_outlined
module @_tpu_v1_compat_outlined { module @_tpu_v1_compat_outlined {
func @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> tensor<i1> attributes {sym_visibility = "nested"} { func nested @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> tensor<i1> {
%0 = "tf.opA"(%arg0) : (tensor<i1>) -> tensor<i1> %0 = "tf.opA"(%arg0) : (tensor<i1>) -> tensor<i1>
return %0 : tensor<i1> return %0 : tensor<i1>
} }
func @_tpu_v1_compat_outlined_func1(%arg0: tensor<i1>, %arg1: tensor<f32>) -> (tensor<i1>, tensor<i32>) attributes {sym_visibility = "nested"} { func nested @_tpu_v1_compat_outlined_func1(%arg0: tensor<i1>, %arg1: tensor<f32>) -> (tensor<i1>, tensor<i32>) {
%0 = "tf.opA"(%arg0) : (tensor<i1>) -> tensor<i1> %0 = "tf.opA"(%arg0) : (tensor<i1>) -> tensor<i1>
%1 = "tf.opA"(%0) : (tensor<i1>) -> tensor<i1> %1 = "tf.opA"(%0) : (tensor<i1>) -> tensor<i1>
%2 = "tf.SomeOp"(%arg0, %arg1) : (tensor<i1>, tensor<f32>) -> tensor<i32> %2 = "tf.SomeOp"(%arg0, %arg1) : (tensor<i1>, tensor<f32>) -> tensor<i32>

View File

@ -12,7 +12,7 @@ module {
return %0#0 : tensor<i32> return %0#0 : tensor<i32>
} }
module @_tpu_v1_compat_outlined { module @_tpu_v1_compat_outlined {
func @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) attributes {sym_visibility = "nested"} { func nested @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) {
"tf.TPUReplicateMetadata"() {_tpu_replicate = "cluster", device = "device", num_replicas = 1 : i64, topology = "topology"} : () -> () "tf.TPUReplicateMetadata"() {_tpu_replicate = "cluster", device = "device", num_replicas = 1 : i64, topology = "topology"} : () -> ()
%0 = "tf.opA"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i1>) -> tensor<i32> %0 = "tf.opA"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i1>) -> tensor<i32>
%1 = "tf.While"(%0) {body = @while_body_with_cluster_attr, cond = @while_cond_with_cluster_attr, is_stateless = false, name = "A", parallel_iterations = 10 : i64} : (tensor<i32>) -> tensor<i32> %1 = "tf.While"(%0) {body = @while_body_with_cluster_attr, cond = @while_cond_with_cluster_attr, is_stateless = false, name = "A", parallel_iterations = 10 : i64} : (tensor<i32>) -> tensor<i32>

View File

@ -2,7 +2,7 @@
// Test that simple TF operations can be inlined. // Test that simple TF operations can be inlined.
func @inline_simple_callee() -> tensor<2xi32> attributes {sym_visibility = "private"} { func private @inline_simple_callee() -> tensor<2xi32> {
%cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32> %cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32>
return %cst : tensor<2xi32> return %cst : tensor<2xi32>
} }
@ -17,7 +17,7 @@ func @inline_simple() -> tensor<2xi32> {
// Test that TPUParitionedCallOp is not inlined. // Test that TPUParitionedCallOp is not inlined.
func @simple_callee() -> tensor<2xi32> attributes {sym_visibility = "private"} { func private @simple_callee() -> tensor<2xi32> {
%cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32> %cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32>
return %cst : tensor<2xi32> return %cst : tensor<2xi32>
} }
@ -35,7 +35,7 @@ func @dont_inline_tpu_partitioned_call() -> tensor<2xi32> {
// Check that TF call operations can be inlined, even when the shape of the // Check that TF call operations can be inlined, even when the shape of the
// argument or result is different than the called function. // argument or result is different than the called function.
func @inline_shape_cast_callee(%arg : tensor<*xi32>) -> tensor<*xi32> attributes {sym_visibility = "private"} { func private @inline_shape_cast_callee(%arg : tensor<*xi32>) -> tensor<*xi32> {
return %arg : tensor<*xi32> return %arg : tensor<*xi32>
} }
@ -51,12 +51,12 @@ func @inline_shape_cast(%arg: tensor<2xi32>) -> tensor<2xi32> {
// Check that functions can be inlined into islands. // Check that functions can be inlined into islands.
func @inline_simple_callee1() -> tensor<2xi32> attributes {sym_visibility = "private"} { func private @inline_simple_callee1() -> tensor<2xi32> {
%cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32> %cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32>
return %cst : tensor<2xi32> return %cst : tensor<2xi32>
} }
func @inline_into_island_multi_block_callee() -> tensor<2xi32> attributes {sym_visibility = "private"} { func private @inline_into_island_multi_block_callee() -> tensor<2xi32> {
br ^bb1 br ^bb1
^bb1: ^bb1:

View File

@ -13,7 +13,7 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
return return
} }
func @indexed_case_branch0_40(%arg0: tensor<i32>) -> tensor<*xi32> attributes {sym_visibility = "private"} { func private @indexed_case_branch0_40(%arg0: tensor<i32>) -> tensor<*xi32> {
%0 = tf_executor.graph { %0 = tf_executor.graph {
%outputs, %control = tf_executor.island wraps "tf.Const"() {device = "", value = dense<1> : tensor<i32>} : () -> tensor<i32> %outputs, %control = tf_executor.island wraps "tf.Const"() {device = "", value = dense<1> : tensor<i32>} : () -> tensor<i32>
%outputs_0, %control_1 = tf_executor.island wraps "tf.AddV2"(%arg0, %outputs) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<*xi32> %outputs_0, %control_1 = tf_executor.island wraps "tf.AddV2"(%arg0, %outputs) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<*xi32>
@ -22,7 +22,7 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
return %0 : tensor<*xi32> return %0 : tensor<*xi32>
} }
func @indexed_case_branch1_50(%arg0: tensor<i32>) -> tensor<*xi32> attributes {sym_visibility = "private"} { func private @indexed_case_branch1_50(%arg0: tensor<i32>) -> tensor<*xi32> {
%0 = tf_executor.graph { %0 = tf_executor.graph {
%outputs, %control = tf_executor.island wraps "tf.Const"() {device = "", value = dense<2> : tensor<i32>} : () -> tensor<i32> %outputs, %control = tf_executor.island wraps "tf.Const"() {device = "", value = dense<2> : tensor<i32>} : () -> tensor<i32>
%outputs_0, %control_1 = tf_executor.island wraps "tf.AddV2"(%arg0, %outputs) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<*xi32> %outputs_0, %control_1 = tf_executor.island wraps "tf.AddV2"(%arg0, %outputs) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<*xi32>

View File

@ -557,7 +557,7 @@ func @main() -> () {
%call = "tf.PartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @callee} : () -> (tensor<*xf32>) %call = "tf.PartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @callee} : () -> (tensor<*xf32>)
return return
} }
func @callee() -> (tensor<*xf32>) attributes {sym_visibility = "private"} { func private @callee() -> (tensor<*xf32>) {
%size = "tf.Const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32> %size = "tf.Const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32>
// CHECK: %[[LOCAL_VAR:.*]] = "tf.MlirLocalVarOp"() : () -> tensor<!tf.resource<tensor<5x3xf32>>> // CHECK: %[[LOCAL_VAR:.*]] = "tf.MlirLocalVarOp"() : () -> tensor<!tf.resource<tensor<5x3xf32>>>
%ta:2 = "tf.TensorArrayV3"(%size) {dtype = f32, element_shape = #tf.shape<*>, dynamic_size = false, clear_after_read = true, identical_element_shapes = true, tensor_array_name = "ta"} : (tensor<i32>) -> (tensor<!tf.resource<tensor<*xf32>>>, tensor<f32>) %ta:2 = "tf.TensorArrayV3"(%size) {dtype = f32, element_shape = #tf.shape<*>, dynamic_size = false, clear_after_read = true, identical_element_shapes = true, tensor_array_name = "ta"} : (tensor<i32>) -> (tensor<!tf.resource<tensor<*xf32>>>, tensor<f32>)
@ -598,7 +598,7 @@ func @main() -> () {
// CHECK-LABEL: func private @callee // CHECK-LABEL: func private @callee
// CHECK-SAME: %[[VAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[GVAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>> // CHECK-SAME: %[[VAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[GVAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>
func @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> attributes {sym_visibility = "private"} { func private @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> {
%index = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32> %index = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
%elem = "tf._SomeOp"() : () -> tensor<3xf32> %elem = "tf._SomeOp"() : () -> tensor<3xf32>
%flow = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32> %flow = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>

View File

@ -515,7 +515,7 @@ func @main(%arg0: tensor<i1>) -> () {
} }
// CHECK: func private @callee(%[[ARG0:.*]]: tensor<10xf32>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<1xi32>) -> (tensor<10xf32>, tensor<1xi32>) // CHECK: func private @callee(%[[ARG0:.*]]: tensor<10xf32>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<1xi32>) -> (tensor<10xf32>, tensor<1xi32>)
func @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>> attributes {sym_visibility = "private"} { func private @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>> {
%elem = "tf._SomeOp"(%arg1) : (tensor<i1>) -> tensor<f32> %elem = "tf._SomeOp"(%arg1) : (tensor<i1>) -> tensor<f32>
// CHECK-NOT: "tf.TensorListPushBack" // CHECK-NOT: "tf.TensorListPushBack"

View File

@ -64,7 +64,7 @@ module attributes {tf_saved_model.semantics} {
return return
} }
func @f_callee(%arg0: tensor<!tf.resource<tensor<f32>>>) attributes {sym_visibility = "private"} { func private @f_callee(%arg0: tensor<!tf.resource<tensor<f32>>>) {
return return
} }
} }

View File

@ -46,7 +46,7 @@ module attributes {tf_saved_model.semantics} {
return %arg0 : tensor<f32> return %arg0 : tensor<f32>
} }
func @f() attributes {sym_visibility = "private"} { func private @f() attributes {
return return
} }

View File

@ -3,7 +3,7 @@
module attributes {tf_saved_model.semantics} { module attributes {tf_saved_model.semantics} {
// expected-error@+1 {{unknown tf_saved_model dialect arg attribute 'tf_saved_model.not_a_real_arg_attr'}} // expected-error@+1 {{unknown tf_saved_model dialect arg attribute 'tf_saved_model.not_a_real_arg_attr'}}
func @f(%arg0: tensor<f32> {tf_saved_model.not_a_real_arg_attr = 1 : i32}) attributes {sym_visibility = "private"} { func private @f(%arg0: tensor<f32> {tf_saved_model.not_a_real_arg_attr = 1 : i32}) {
return return
} }
@ -245,8 +245,8 @@ module attributes {tf_saved_model.semantics} {
"tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> () "tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
// expected-error@+1 {{can only apply 'tf_saved_model' argument attributes to exported functions}} // expected-error@+1 {{can only apply 'tf_saved_model' argument attributes to exported functions}}
func @f(%arg0: tensor<!tf.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v}) func private @f(%arg0: tensor<!tf.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
-> (tensor<?xf32> {tf_saved_model.index_path = []}) attributes {sym_visibility = "private"} { -> (tensor<?xf32> {tf_saved_model.index_path = []}) {
%0 = "tf.ReadVariableOp"(%arg0) : (tensor<!tf.resource<tensor<?xf32>>>) -> tensor<?xf32> %0 = "tf.ReadVariableOp"(%arg0) : (tensor<!tf.resource<tensor<?xf32>>>) -> tensor<?xf32>
return %0 : tensor<?xf32> return %0 : tensor<?xf32>
} }
@ -286,7 +286,7 @@ module attributes {tf_saved_model.semantics} {
// expected-error@+1 {{the initializer function should have no output}} // expected-error@+1 {{the initializer function should have no output}}
"tf_saved_model.session_initializer"() { initializers = [@init] } : () -> () "tf_saved_model.session_initializer"() { initializers = [@init] } : () -> ()
func @init() -> tensor<1xf32> attributes {sym_visibility = "private"} { func private @init() -> tensor<1xf32> {
%0 = "tf.Const"() {value = dense<[1.0]> : tensor<1xf32> } : () -> tensor<1xf32> %0 = "tf.Const"() {value = dense<[1.0]> : tensor<1xf32> } : () -> tensor<1xf32>
return %0 : tensor<1xf32> return %0 : tensor<1xf32>
} }
@ -299,7 +299,7 @@ module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() { initializer = @init } : () -> () "tf_saved_model.session_initializer"() { initializer = @init } : () -> ()
// expected-error@+1 {{there must be no more than one session_initializer op}} // expected-error@+1 {{there must be no more than one session_initializer op}}
"tf_saved_model.session_initializer"() { initializers = [@init] } : () -> () "tf_saved_model.session_initializer"() { initializers = [@init] } : () -> ()
func @init() -> tensor<1xf32> attributes {sym_visibility = "private"} { func private @init() -> tensor<1xf32> {
%0 = "tf.Const"() {value = dense<[1.0]> : tensor<1xf32> } : () -> tensor<1xf32> %0 = "tf.Const"() {value = dense<[1.0]> : tensor<1xf32> } : () -> tensor<1xf32>
return %0 : tensor<1xf32> return %0 : tensor<1xf32>
} }
@ -310,9 +310,9 @@ module attributes {tf_saved_model.semantics} {
module attributes {tf_saved_model.semantics, tf_saved_model.under_construction} { module attributes {tf_saved_model.semantics, tf_saved_model.under_construction} {
// expected-error@+1 {{exported function @f should be public}} // expected-error@+1 {{exported function @f should be public}}
func @f( func private @f(
%arg0: tensor<f32> {tf.resource_name = "resource"} %arg0: tensor<f32> {tf.resource_name = "resource"}
) attributes { sym_visibility = "private", tf_saved_model.exported_names = ["foo.some_func"] } { ) attributes {tf_saved_model.exported_names = ["foo.some_func"] } {
return return
} }
@ -372,7 +372,7 @@ module attributes {tf_saved_model.semantics} {
// expected-error@+1 {{the initializer function should be exported}} // expected-error@+1 {{the initializer function should be exported}}
"tf_saved_model.session_initializer"() { initializers = [@init] } : () -> () "tf_saved_model.session_initializer"() { initializers = [@init] } : () -> ()
func @init() attributes {sym_visibility = "private"} { func private @init() {
return return
} }
} }

View File

@ -20,12 +20,12 @@ module attributes {tf_saved_model.semantics} {
return %val : tensor<f32> return %val : tensor<f32>
} }
func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} { func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
%val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_callee_callee} : (tensor<*x!tf.resource>) -> (tensor<f32>) %val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_callee_callee} : (tensor<*x!tf.resource>) -> (tensor<f32>)
return %val : tensor<f32> return %val : tensor<f32>
} }
func @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} { func private @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
%val = "tf.ReadVariableOp"(%arg0) : (tensor<*x!tf.resource>) -> tensor<f32> %val = "tf.ReadVariableOp"(%arg0) : (tensor<*x!tf.resource>) -> tensor<f32>
return %val : tensor<f32> return %val : tensor<f32>
} }
@ -59,7 +59,7 @@ module attributes {tf_saved_model.semantics} {
return %val : tensor<f32> return %val : tensor<f32>
} }
func @f_common(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} { func private @f_common(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
%val = "tf.ReadVariableOp"(%arg0) : (tensor<*x!tf.resource>) -> tensor<f32> %val = "tf.ReadVariableOp"(%arg0) : (tensor<*x!tf.resource>) -> tensor<f32>
return %val : tensor<f32> return %val : tensor<f32>
} }
@ -85,7 +85,7 @@ module attributes {tf_saved_model.semantics} {
return %val_2 : tensor<f32> return %val_2 : tensor<f32>
} }
func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} { func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> {
%cst_1 = constant dense<2.0> : tensor<f32> %cst_1 = constant dense<2.0> : tensor<f32>
return %cst_1 : tensor<f32> return %cst_1 : tensor<f32>
} }