Remove the "name" attribute when the ops are imported
When the graph is exported, the "name" attribute is created from the op location. PiperOrigin-RevId: 287195656 Change-Id: Iec65f930198b4ff9ca021f4aeace099302a92472
This commit is contained in:
parent
be7d68dfe3
commit
7330d9f21f
@ -10,7 +10,9 @@ glob_lit_tests(
|
||||
driver = "@local_config_mlir//:run_lit.sh",
|
||||
test_file_exts = [
|
||||
"pbtxt",
|
||||
"py",
|
||||
# TODO(fengliuai): reenable these tests after the fused loc is
|
||||
# supported in the diagnostic handler.
|
||||
# "py",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -38,6 +38,6 @@ versions {
|
||||
|
||||
# CHECK: func @main(%arg0: tensor<4xi32>, %arg1: tensor<4xi32>) -> tensor<*xi32>
|
||||
# CHECK: attributes {tf.entry_function = {inputs = "input0,input1", outputs = "output"}} {
|
||||
# CHECK-NEXT: %0 = "tf.BannaPotatoSaladWithColeslaw"(%arg0, %arg1) {T = i32, device = "", name = "output"} : (tensor<4xi32>, tensor<4xi32>) -> tensor<*xi32>
|
||||
# CHECK-NEXT: %0 = "tf.BannaPotatoSaladWithColeslaw"(%arg0, %arg1) {T = i32, device = ""} : (tensor<4xi32>, tensor<4xi32>) -> tensor<*xi32>
|
||||
# CHECK-NEXT: return %0 : tensor<*xi32>
|
||||
# CHECK-NEXT: }
|
||||
|
@ -1,11 +1,11 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-input-arrays=a,b -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-input-shapes=':' -tf-output-arrays=StatefulIf,StatelessIf -o - | FileCheck %s
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-input-arrays=a,b -tf-input-data-types=DT_FLOAT,DT_FLOAT -tf-input-shapes=':' -tf-output-arrays=StatefulIf,StatelessIf -o - -mlir-print-debuginfo | FileCheck %s
|
||||
|
||||
# Verify that TensorFlow If and StatelessIf ops are mapped to the
|
||||
# composite If op in MLIR with is_stateless attribute set accordingly to
|
||||
# distinguish between them.
|
||||
|
||||
# CHECK-DAG: "tf.If"{{.*}} is_stateless = false, name = "StatefulIf"
|
||||
# CHECK-DAG: "tf.If"{{.*}} is_stateless = true, name = "StatelessIf"
|
||||
# CHECK-DAG: "tf.If"{{.*}} is_stateless = false{{.*}} loc("StatefulIf")
|
||||
# CHECK-DAG: "tf.If"{{.*}} is_stateless = true{{.*}} loc("StatelessIf")
|
||||
|
||||
node {
|
||||
name: "tf.Less"
|
||||
|
@ -1,11 +1,11 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-input-arrays=iter,val -tf-input-data-types=DT_INT32,DT_FLOAT -tf-input-shapes=':' -tf-output-arrays=StatefulWhile:1,StatelessWhile:1 -o - | FileCheck %s
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -tf-input-arrays=iter,val -tf-input-data-types=DT_INT32,DT_FLOAT -tf-input-shapes=':' -tf-output-arrays=StatefulWhile:1,StatelessWhile:1 -o - -mlir-print-debuginfo | FileCheck %s
|
||||
|
||||
# Verify that TensorFlow While and StatelessWhile ops are mapped to the
|
||||
# composite While op in MLIR with is_stateless attribute set accordingly to
|
||||
# distinguish between them.
|
||||
|
||||
# CHECK-DAG: "tf.While"{{.*}} is_stateless = false, name = "StatefulWhile"
|
||||
# CHECK-DAG: "tf.While"{{.*}} is_stateless = true, name = "StatelessWhile"
|
||||
# CHECK-DAG: "tf.While"{{.*}} is_stateless = false{{.*}} loc("StatefulWhile")
|
||||
# CHECK-DAG: "tf.While"{{.*}} is_stateless = true{{.*}} loc("StatelessWhile")
|
||||
|
||||
node {
|
||||
name: "StatefulWhile"
|
||||
|
@ -1,7 +1,7 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s | FileCheck %s
|
||||
|
||||
# CHECK:"tf.MlirPassthroughOp"
|
||||
# CHECK: mlir_module = "\0Afunc @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {\0A %add = \22tf.Add\22(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>\0A %ret = \22magic.op\22(%add, %add) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>\0A return %ret : tensor<10x10xf32>\0A}\0A", name = "MlirPassthroughOp"} : (tensor<10xf32>, tensor<10xf32>) -> tensor<*xf32>
|
||||
# CHECK: mlir_module = "\0Afunc @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {\0A %add = \22tf.Add\22(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>\0A %ret = \22magic.op\22(%add, %add) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>\0A return %ret : tensor<10x10xf32>\0A}\0A"} : (tensor<10xf32>, tensor<10xf32>) -> tensor<*xf32>
|
||||
|
||||
node {
|
||||
name: "x"
|
||||
|
@ -90,6 +90,6 @@ library {
|
||||
}
|
||||
|
||||
# TODO(b/142400497): What is the semantic contract for locations?
|
||||
# CHECK: "tf.Const"{{.*}}value = dense<2>{{.*}}loc(fused["n1@f1", "n2@f2"])
|
||||
# CHECK: "tf.Const"{{.*}}value = dense<2>{{.*}}loc(fused["n1@f1", "n2@f2", "fused_node_outside_function"])
|
||||
# CHECK: "tf.Const"{{.*}}value = dense<0>{{.*}}loc("node_outside_function")
|
||||
# CHECK: "tf.Const"{{.*}}value = dense<1>{{.*}}loc("node_inside_function@foo")
|
||||
|
@ -1,4 +1,4 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -o - | FileCheck %s
|
||||
# RUN: tf-mlir-translate -graphdef-to-mlir %s -o - -mlir-print-debuginfo | FileCheck %s
|
||||
|
||||
node {
|
||||
name: "Quantized_Constant"
|
||||
@ -28,5 +28,5 @@ versions {
|
||||
}
|
||||
|
||||
# CHECK: tf.Const
|
||||
# CHECK-SAME: name = "Quantized_Constant"
|
||||
# CHECK-SAME: value = opaque<"tf", "{{0[xX][0-9a-fA-F]*}}"> : tensor<!tf.quint8>
|
||||
# CHECK-SAME: loc("Quantized_Constant")
|
||||
|
@ -1,13 +1,13 @@
|
||||
# RUN: tf-mlir-translate -graphdef-to-splatted-mlir %s -o - | FileCheck %s --dump-input-on-failure
|
||||
# RUN: tf-mlir-translate -graphdef-to-splatted-mlir %s -o - -mlir-print-debuginfo | FileCheck %s --dump-input-on-failure
|
||||
|
||||
# CHECK: tf_executor.SwitchN
|
||||
# CHECK-SAME: of 3 : tensor<i32>
|
||||
# CHECK-SAME: T = i32
|
||||
# CHECK-SAME: name = "Case/branch_index/_3"
|
||||
# CHECK-SAME: loc("Case/branch_index/_3")
|
||||
# CHECK: tf_executor.SwitchN
|
||||
# CHECK-SAME: of 2 : tensor<f32>
|
||||
# CHECK-SAME: T = f32
|
||||
# CHECK-SAME: name = "Case/Case/input_0/_7"
|
||||
# CHECK-SAME: loc("Case/Case/input_0/_7")
|
||||
|
||||
node {
|
||||
name: "Case/branch_index"
|
||||
|
@ -1,8 +1,8 @@
|
||||
// RUN: tf-mlir-translate -mlir-to-graphdef %s -o - | FileCheck %s
|
||||
|
||||
func @main() -> (tensor<1x2xf16>, tensor<2xf16>) {
|
||||
%0:2 = "_tf.Const"() {device = "", name = "foo", dtype = "tfdtype$DT_HALF", value = dense<1.0> : tensor<1x2xf16>} : () -> (tensor<1x2xf16>, !_tf.control)
|
||||
%1:2 = "_tf.Const"() {device = "", name = "bar", dtype = "tfdtype$DT_HALF", value = dense<[1.0, 2.0]> : tensor<2xf16>} : () -> (tensor<2xf16>, !_tf.control)
|
||||
%0:2 = "_tf.Const"() {device = "", dtype = "tfdtype$DT_HALF", value = dense<1.0> : tensor<1x2xf16>} : () -> (tensor<1x2xf16>, !_tf.control) loc("foo")
|
||||
%1:2 = "_tf.Const"() {device = "", dtype = "tfdtype$DT_HALF", value = dense<[1.0, 2.0]> : tensor<2xf16>} : () -> (tensor<2xf16>, !_tf.control) loc("bar")
|
||||
return %0#0, %1#0 : tensor<1x2xf16>, tensor<2xf16>
|
||||
|
||||
// CHECK: node {
|
||||
@ -13,4 +13,4 @@ func @main() -> (tensor<1x2xf16>, tensor<2xf16>) {
|
||||
// CHECK-NEXT: op: "Const"
|
||||
// CHECK: half_val: 15360
|
||||
// CHECK: half_val: 16384
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
func @main() -> tensor<*x!tf.resource> attributes {tf.entry_function = {inputs = "", outputs = "func_call"}} {
|
||||
%0 = tf_executor.graph {
|
||||
%outputs, %control = tf_executor.island wraps "tf.VarHandleOp"() {container = "a", device = "/CPU:0", dtype = i64, name = "x", shape = "tfshape$", shared_name = "x"} : () -> tensor<!tf.resource<tensor<i64>>>
|
||||
%outputs, %control = tf_executor.island wraps "tf.VarHandleOp"() {container = "a", device = "/CPU:0", dtype = i64, shape = "tfshape$", shared_name = "x"} : () -> tensor<!tf.resource<tensor<i64>>> loc("x")
|
||||
%outputs_0, %control_1 = tf_executor.island wraps "tf.LegacyCall"(%outputs, %outputs) {_disable_call_shape_inference = true, f = @test_func_name0} : (tensor<!tf.resource<tensor<i64>>>, tensor<!tf.resource<tensor<i64>>>) -> tensor<*x!tf.resource>
|
||||
tf_executor.fetch %outputs_0 : tensor<*x!tf.resource>
|
||||
}
|
||||
|
@ -2,15 +2,15 @@
|
||||
|
||||
func @main(%arg0: tensor<*x!tf.resource>, %arg1: tensor<*x!tf.resource<tensor<3x3x1x32xf32>>>, %arg2: tensor<*xf32>, %arg3: tensor<2x4x6x8xi32>) -> (tensor<f32>, tensor<f32>)
|
||||
attributes {tf.entry_function = {inputs = "args_0,args_1,args_2,args_3", outputs = "rets_0_RetVal,rets_1_RetVal"}} {
|
||||
%0:2 = "_tf.Const"() {device = "", dtype = "tfdtype$DT_FLOAT", name = "const", value = dense<0.000000e+00> : tensor<f32>} : () -> (tensor<f32>, !_tf.control)
|
||||
%1:2 = "_tf.Identity"(%0#0) {T = "tfdtype$DT_FLOAT", device = "", name = "identity"} : (tensor<f32>) -> (tensor<f32>, !_tf.control)
|
||||
%2:2 = "_tf.StatefulPartitionedCall"(%0#0, %arg1) {Tin = ["tfdtype$DT_FLOAT", "tfdtype$DT_RESOURCE"], Tout = ["tfdtype$DT_FLOAT"], _gradient_op_type = "PartitionedCall-1205", config = "", config_proto = "\0A\07\0A\03GPU\10\00\0A\07\0A\03CPU\10\012\02J\008\01", device = "", executor_type = "", f = @function0, name = "statefulpartitionedcall"} : (tensor<f32>, tensor<*x!tf.resource<tensor<3x3x1x32xf32>>>) -> (tensor<f32>, !_tf.control)
|
||||
return %1#0, %2#0 : tensor<f32>, tensor<f32>
|
||||
%0 = "tf.Const"() {device = "", dtype = "tfdtype$DT_FLOAT", value = dense<0.000000e+00> : tensor<f32>} : () -> tensor<f32> loc("const")
|
||||
%1 = "tf.Identity"(%0) {T = "tfdtype$DT_FLOAT", device = ""} : (tensor<f32>) -> tensor<f32> loc("identity")
|
||||
%2 = "tf.StatefulPartitionedCall"(%0, %arg1) {Tin = ["tfdtype$DT_FLOAT", "tfdtype$DT_RESOURCE"], Tout = ["tfdtype$DT_FLOAT"], _gradient_op_type = "PartitionedCall-1205", config = "", config_proto = "\0A\07\0A\03GPU\10\00\0A\07\0A\03CPU\10\012\02J\008\01", device = "", executor_type = "", f = @function0} : (tensor<f32>, tensor<*x!tf.resource<tensor<3x3x1x32xf32>>>) -> tensor<f32> loc("statefulpartitionedcall")
|
||||
return %1, %2 : tensor<f32>, tensor<f32>
|
||||
}
|
||||
|
||||
func @function0(%arg0: tensor<*xf32>, %arg1: tensor<*x!tf.resource>) -> tensor<*xf32>
|
||||
attributes {tf.signature.is_stateful} {
|
||||
%0:2 = "_tf.Identity"(%arg0) {T = "tfdtype$DT_FLOAT", device = "", name = "Identity"} : (tensor<*xf32>) -> (tensor<*xf32>, !_tf.control)
|
||||
%0 = "tf.Identity"(%arg0) {T = "tfdtype$DT_FLOAT", device = ""} : (tensor<*xf32>) -> tensor<*xf32> loc("Identity@function0")
|
||||
return %0#0 : tensor<*xf32>
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@ func @main() {
|
||||
%0 = "tf.Const"() {dtype = "tfdtype$DT_INT32", value = dense<0> : tensor<i32>} : () -> (tensor<i32>) loc("^foo")
|
||||
// CHECK: name: "fo.o"
|
||||
%1 = "tf.Const"() {dtype = "tfdtype$DT_INT32", value = dense<1> : tensor<i32>} : () -> (tensor<i32>) loc("fo{o")
|
||||
// CHECK: name: "foo.1"
|
||||
// CHECK: name: "foo"
|
||||
%2 = "tf.Const"() {dtype = "tfdtype$DT_INT32", value = dense<2> : tensor<i32>} : () -> (tensor<i32>) loc("foo@1")
|
||||
// CHECK: name: "ba.r"
|
||||
%3 = "tf.Const"() {dtype = "tfdtype$DT_INT32", value = dense<2> : tensor<i32>} : () -> (tensor<i32>) loc("ba r")
|
||||
|
@ -32,6 +32,7 @@ limitations under the License.
|
||||
#include "mlir/IR/Builders.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Function.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Identifier.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Location.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Module.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Operation.h" // TF:local_config_mlir
|
||||
#include "mlir/IR/Types.h" // TF:local_config_mlir
|
||||
@ -110,25 +111,28 @@ std::string LegalizeNodeName(llvm::StringRef name) {
|
||||
return legalized_name;
|
||||
}
|
||||
|
||||
// TODO(jpienaar): unify and move from here to be able to reuse with tflite
|
||||
std::string GetName(Operation* inst) {
|
||||
// TODO(prakalps): b/137006652 prevents us from using location info (derived
|
||||
// from experimental_debug_info) to generate node names. Until it is fixed,
|
||||
// first check for "name" attribute to get node name.
|
||||
|
||||
// Default name is Operation type.
|
||||
auto name = inst->getName().getStringRef();
|
||||
if (auto attr = inst->getAttrOfType<mlir::StringAttr>("name")) {
|
||||
name = attr.getValue();
|
||||
} else if (auto name_loc = inst->getLoc().dyn_cast<mlir::NameLoc>()) {
|
||||
name = name_loc.getName().strref();
|
||||
} else if (auto call_loc = inst->getLoc().dyn_cast<mlir::CallSiteLoc>()) {
|
||||
llvm::StringRef GetNameFromLoc(mlir::Location loc,
|
||||
llvm::StringRef default_name) {
|
||||
if (auto name_loc = loc.dyn_cast<mlir::NameLoc>()) {
|
||||
return name_loc.getName().strref().split('@').first;
|
||||
} else if (auto call_loc = loc.dyn_cast<mlir::CallSiteLoc>()) {
|
||||
// Return name if CallSiteLoc's callee has a NameLoc (as should be the case
|
||||
// if imported with DebugInfo), else use the fallback naming scheme below.
|
||||
if (auto name_loc = call_loc.getCallee().dyn_cast<mlir::NameLoc>())
|
||||
name = name_loc.getName().strref();
|
||||
return name_loc.getName().strref().split('@').first;
|
||||
} else if (auto fused_loc = loc.dyn_cast<mlir::FusedLoc>()) {
|
||||
// According to the importer, the last location of a fused location is
|
||||
// the name from the node_def and the rests are from the experimental debug
|
||||
// info.
|
||||
return GetNameFromLoc(fused_loc.getLocations().back(), default_name);
|
||||
}
|
||||
return default_name;
|
||||
}
|
||||
|
||||
// TODO(jpienaar): unify and move from here to be able to reuse with tflite
|
||||
std::string GetName(Operation* inst) {
|
||||
// Default name is Operation type.
|
||||
auto name = GetNameFromLoc(inst->getLoc(), inst->getName().getStringRef());
|
||||
return LegalizeNodeName(name);
|
||||
}
|
||||
|
||||
|
@ -1318,15 +1318,21 @@ mlir::Location ImporterBase::GetLocation(const NodeDef& node_def) {
|
||||
return create_location(node_def.name(), function_name_for_debug_info_);
|
||||
} else {
|
||||
// If the original nodes are defined, then we use them to get a list of
|
||||
// call sites, and then fuse them to a single fused location.
|
||||
llvm::SmallVector<mlir::Location, 4> node_call_sites;
|
||||
node_call_sites.reserve(original_nodes.size());
|
||||
// call sites, and then fuse them to a single fused location, with the name
|
||||
// of the node_def.
|
||||
llvm::SmallVector<mlir::Location, 4> node_locations;
|
||||
node_locations.reserve(original_nodes.size() + 1);
|
||||
|
||||
// store the names in the experimental_debug_info
|
||||
for (int i = 0, e = original_nodes.size(); i != e; ++i) {
|
||||
auto node_name = original_nodes[i];
|
||||
auto func_name = (i < original_funcs.size()) ? original_funcs[i] : "";
|
||||
node_call_sites.push_back(create_location(node_name, func_name));
|
||||
node_locations.push_back(create_location(node_name, func_name));
|
||||
}
|
||||
return mlir::FusedLoc::get(node_call_sites, context_);
|
||||
// store the name of the node_def
|
||||
node_locations.push_back(
|
||||
create_location(node_def.name(), function_name_for_debug_info_));
|
||||
return mlir::FusedLoc::get(node_locations, context_);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1566,8 +1572,6 @@ Status ImporterBase::ConvertNode(const Node& node) {
|
||||
&result.attributes));
|
||||
}
|
||||
|
||||
result.attributes.push_back(builder_.getNamedAttr(
|
||||
"name", builder_.getStringAttr(std::string(node.name()))));
|
||||
result.attributes.push_back(builder_.getNamedAttr(
|
||||
"device", builder_.getStringAttr(std::string(node_def.device()))));
|
||||
|
||||
|
@ -65,8 +65,12 @@ Status ConvertLocation(mlir::Location inst_loc,
|
||||
debug_info->add_original_node_names(name_loc.getName().c_str());
|
||||
}
|
||||
} else if (auto fused = inst_loc.dyn_cast<mlir::FusedLoc>()) {
|
||||
for (auto loc : fused.getLocations()) {
|
||||
TF_RETURN_IF_ERROR(ConvertLocation(loc, debug_info));
|
||||
auto locations = fused.getLocations();
|
||||
if (locations.size() <= 1)
|
||||
return errors::InvalidArgument("expected experimental debuf info.");
|
||||
// skip the first one, which is the name of the node_def.
|
||||
for (int i = 0; i < locations.size() - 1; ++i) {
|
||||
TF_RETURN_IF_ERROR(ConvertLocation(locations[i], debug_info));
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
|
Loading…
Reference in New Issue
Block a user