Replace tf2xla/dump_graph.h with core/util/dump_graph.h.
This changes the location of the environment variable to enable dumping in the bridge from "TF_XLA_FLAGS=--tf_dump_graph_prefix=/tmp/" to "TF_DUMP_GRAPH_PREFIX=/tmp/". This then allows both Tensorflow and TF2XLA bridge dumping to use the same environment variable to specify the path, rather than having 2 different flags with the same intent. PiperOrigin-RevId: 236829860
This commit is contained in:
parent
9d914a58d0
commit
38b6e548e7
@ -168,7 +168,6 @@ cc_library(
|
||||
":xla_tensor",
|
||||
"//tensorflow/compiler/jit/ops:xla_ops",
|
||||
"//tensorflow/compiler/tf2xla:common",
|
||||
"//tensorflow/compiler/tf2xla:dump_graph",
|
||||
"//tensorflow/compiler/tf2xla:tf2xla_util",
|
||||
"//tensorflow/compiler/tf2xla:xla_compiler",
|
||||
"//tensorflow/compiler/tf2xla/kernels:xla_ops",
|
||||
@ -283,7 +282,6 @@ cc_library(
|
||||
hdrs = ["xla_compilation_cache.h"],
|
||||
deps = [
|
||||
"//tensorflow/compiler/tf2xla:common",
|
||||
"//tensorflow/compiler/tf2xla:dump_graph",
|
||||
"//tensorflow/compiler/tf2xla:xla_compiler",
|
||||
"//tensorflow/compiler/xla:statusor",
|
||||
"//tensorflow/compiler/xla/client:client_library",
|
||||
@ -417,7 +415,6 @@ cc_library(
|
||||
hdrs = ["shape_inference.h"],
|
||||
deps = [
|
||||
":shape_inference_helpers",
|
||||
"//tensorflow/compiler/tf2xla:dump_graph",
|
||||
"//tensorflow/compiler/xla:statusor",
|
||||
"//tensorflow/core:core_cpu_internal",
|
||||
"//tensorflow/core:framework",
|
||||
@ -527,7 +524,6 @@ cc_library(
|
||||
"//tensorflow/cc:scope_internal",
|
||||
"//tensorflow/compiler/jit/graphcycles",
|
||||
"//tensorflow/compiler/jit/ops:xla_ops",
|
||||
"//tensorflow/compiler/tf2xla:dump_graph",
|
||||
"//tensorflow/compiler/tf2xla:resource_operation_table",
|
||||
"//tensorflow/compiler/tf2xla:side_effect_util",
|
||||
"//tensorflow/compiler/tf2xla:tf2xla_util",
|
||||
|
@ -28,7 +28,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/jit/flags.h"
|
||||
#include "tensorflow/compiler/jit/xla_cluster_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/cc/ops/xla_jit_ops.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
#include "tensorflow/core/common_runtime/function.h"
|
||||
@ -42,6 +41,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/status.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/public/version.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace {
|
||||
@ -426,7 +426,7 @@ Status BuildXlaOpsPass::Run(const GraphOptimizationPassOptions& options) {
|
||||
}
|
||||
|
||||
if (VLOG_IS_ON(1)) {
|
||||
dump_graph::DumpGraphToFile("build_xla_ops", *graph, options.flib_def);
|
||||
DumpGraphToFile("build_xla_ops", *graph, options.flib_def);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -29,7 +29,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
|
||||
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
|
||||
#include "tensorflow/compiler/tf2xla/const_analysis.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
#include "tensorflow/core/common_runtime/function.h"
|
||||
#include "tensorflow/core/common_runtime/optimization_registry.h"
|
||||
@ -50,6 +49,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/public/session_options.h"
|
||||
#include "tensorflow/core/public/version.h"
|
||||
#include "tensorflow/core/util/device_name_utils.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -1124,10 +1124,9 @@ Status Encapsulator::Subgraph::BuildFunctionDef(
|
||||
|
||||
if (VLOG_IS_ON(1)) {
|
||||
VLOG(2) << "Build function def " << name;
|
||||
dump_graph::DumpGraphToFile(absl::StrCat("encapsulate_fdef_graph_", name),
|
||||
*graph_, library);
|
||||
dump_graph::DumpFunctionDefToFile(absl::StrCat("encapsulate_fdef_", name),
|
||||
fdef);
|
||||
DumpGraphToFile(absl::StrCat("encapsulate_fdef_graph_", name), *graph_,
|
||||
library);
|
||||
DumpFunctionDefToFile(absl::StrCat("encapsulate_fdef_", name), fdef);
|
||||
}
|
||||
|
||||
const FunctionDef* original_fdef = library->Find(name);
|
||||
@ -1190,11 +1189,10 @@ Status Encapsulator::Subgraph::ReplaceFunctionDef(
|
||||
|
||||
if (VLOG_IS_ON(1)) {
|
||||
VLOG(2) << "Replace function def " << name;
|
||||
dump_graph::DumpGraphToFile(
|
||||
absl::StrCat("replace_encapsulate_fdef_graph_", name), *graph_,
|
||||
library);
|
||||
dump_graph::DumpFunctionDefToFile(
|
||||
absl::StrCat("replace_encapsulate_fdef_", name), fdef);
|
||||
DumpGraphToFile(absl::StrCat("replace_encapsulate_fdef_graph_", name),
|
||||
*graph_, library);
|
||||
DumpFunctionDefToFile(absl::StrCat("replace_encapsulate_fdef_", name),
|
||||
fdef);
|
||||
}
|
||||
|
||||
TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef));
|
||||
@ -1556,7 +1554,7 @@ Status Encapsulator::SplitIntoSubgraphs(FunctionLibraryDefinition* library) {
|
||||
if (VLOG_IS_ON(1)) {
|
||||
// Dump subgraphs.
|
||||
for (auto& entry : subgraphs_) {
|
||||
dump_graph::DumpGraphToFile(
|
||||
DumpGraphToFile(
|
||||
absl::StrCat("encapsulate_subgraphs_subgraph_", entry.first),
|
||||
*entry.second.GetGraph(), library);
|
||||
}
|
||||
@ -2398,8 +2396,7 @@ Status Encapsulator::GetShapeInfoForOutsideCompilationSends(
|
||||
&node_images, library));
|
||||
|
||||
if (VLOG_IS_ON(1)) {
|
||||
dump_graph::DumpGraphToFile("pruned_graph_for_shape_inference",
|
||||
*pruned_graph, library);
|
||||
DumpGraphToFile("pruned_graph_for_shape_inference", *pruned_graph, library);
|
||||
}
|
||||
|
||||
for (auto& subgraph_entry : subgraphs_) {
|
||||
@ -2530,8 +2527,8 @@ Status EncapsulateSubgraphsPass::Run(
|
||||
const GraphOptimizationPassOptions& options) {
|
||||
VLOG(1) << "EncapsulateSubgraphsPass::Run";
|
||||
if (VLOG_IS_ON(1)) {
|
||||
dump_graph::DumpGraphToFile("encapsulate_subgraphs_before", **options.graph,
|
||||
options.flib_def);
|
||||
DumpGraphToFile("encapsulate_subgraphs_before", **options.graph,
|
||||
options.flib_def);
|
||||
}
|
||||
|
||||
std::unique_ptr<Graph> graph_out;
|
||||
@ -2641,8 +2638,8 @@ Status EncapsulateSubgraphsPass::Run(
|
||||
"EncapsulateSubgraphsPass failed");
|
||||
|
||||
if (VLOG_IS_ON(1)) {
|
||||
dump_graph::DumpGraphToFile("encapsulate_subgraphs_after", *graph_out,
|
||||
options.flib_def);
|
||||
DumpGraphToFile("encapsulate_subgraphs_after", *graph_out,
|
||||
options.flib_def);
|
||||
}
|
||||
|
||||
*options.graph = std::move(graph_out);
|
||||
|
@ -21,7 +21,6 @@ limitations under the License.
|
||||
#include "absl/strings/ascii.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
#include "tensorflow/core/framework/node_def.pb.h"
|
||||
#include "tensorflow/core/framework/types.h"
|
||||
@ -30,6 +29,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/strings/proto_serialization.h"
|
||||
#include "tensorflow/core/lib/strings/str_util.h"
|
||||
#include "tensorflow/core/platform/fingerprint.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -372,8 +372,8 @@ Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors,
|
||||
Status EncapsulateXlaComputationsPass::Run(
|
||||
const GraphOptimizationPassOptions& options) {
|
||||
VLOG(1) << "EncapsulateXlaComputations(): "
|
||||
<< dump_graph::DumpGraphToFile("encapsulate_xla_computations_before",
|
||||
**options.graph, options.flib_def);
|
||||
<< DumpGraphToFile("encapsulate_xla_computations_before",
|
||||
**options.graph, options.flib_def);
|
||||
|
||||
const char* additional_help =
|
||||
IsCpuGpuCompile(options.graph->get())
|
||||
@ -383,14 +383,14 @@ Status EncapsulateXlaComputationsPass::Run(
|
||||
TF_RETURN_WITH_CONTEXT_IF_ERROR(Encapsulate(options.graph, options.flib_def),
|
||||
additional_help);
|
||||
VLOG(1) << "EncapsulateXlaComputations() half-way: "
|
||||
<< dump_graph::DumpGraphToFile("encapsulate_xla_computations_halfway",
|
||||
**options.graph, options.flib_def);
|
||||
<< DumpGraphToFile("encapsulate_xla_computations_halfway",
|
||||
**options.graph, options.flib_def);
|
||||
|
||||
TF_RETURN_WITH_CONTEXT_IF_ERROR(BuildXlaLaunchOps(options.graph->get()),
|
||||
additional_help);
|
||||
VLOG(1) << "EncapsulateXlaComputations() finished: "
|
||||
<< dump_graph::DumpGraphToFile("encapsulate_xla_computations_after",
|
||||
**options.graph, options.flib_def);
|
||||
<< DumpGraphToFile("encapsulate_xla_computations_after",
|
||||
**options.graph, options.flib_def);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,6 @@ limitations under the License.
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
|
||||
#include "tensorflow/compiler/jit/encapsulate_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
|
||||
#include "tensorflow/core/common_runtime/function.h"
|
||||
@ -31,6 +30,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/graph/algorithm.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/gtl/cleanup.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -581,10 +581,9 @@ Status ConstructHostGraph(
|
||||
&host_graph, outside_compilation_attr_name));
|
||||
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile(
|
||||
absl::StrCat("extract_outside_compilation_host_graph_for_",
|
||||
xla_cluster_name),
|
||||
host_graph, fld);
|
||||
DumpGraphToFile(absl::StrCat("extract_outside_compilation_host_graph_for_",
|
||||
xla_cluster_name),
|
||||
host_graph, fld);
|
||||
}
|
||||
|
||||
FunctionDef host_graph_fdef;
|
||||
@ -789,7 +788,7 @@ Status RewriteShapeInferenceGraph(const string& shape_inference_graph_name,
|
||||
std::unordered_set<const Node*>{send_from_host});
|
||||
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile(shape_inference_graph_name, *g, fld);
|
||||
DumpGraphToFile(shape_inference_graph_name, *g, fld);
|
||||
}
|
||||
|
||||
// Replace original shape inference graph.
|
||||
@ -1620,7 +1619,7 @@ Status ExtractOutsideCompilationForFunction(
|
||||
TF_RETURN_IF_ERROR(PreprocessEdgesBetweenOutsideCompilations(
|
||||
fbody->graph, outside_compilation_attr_name));
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile(
|
||||
DumpGraphToFile(
|
||||
absl::StrCat("extract_outside_compilation_for_func_before_", func_name),
|
||||
*fbody->graph, fld);
|
||||
}
|
||||
@ -1705,7 +1704,7 @@ Status ExtractOutsideCompilationForFunction(
|
||||
TF_RETURN_IF_ERROR(fld->AddFunctionDef(updated_fdef));
|
||||
}
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile(
|
||||
DumpGraphToFile(
|
||||
absl::StrCat("extract_outside_compilation_for_func_after_", func_name),
|
||||
*graph_out, fld);
|
||||
}
|
||||
@ -1719,7 +1718,7 @@ Status ExtractOutsideCompilation(
|
||||
const std::unordered_map<string, XlaClusterInfo>& clusters, Graph* g,
|
||||
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld) {
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile("extract_outside_compilation_before", *g, fld);
|
||||
DumpGraphToFile("extract_outside_compilation_before", *g, fld);
|
||||
}
|
||||
|
||||
std::vector<string> shape_inference_graphs;
|
||||
@ -1747,7 +1746,7 @@ Status ExtractOutsideCompilation(
|
||||
}
|
||||
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile("extract_outside_compilation_after", *g, fld);
|
||||
DumpGraphToFile("extract_outside_compilation_after", *g, fld);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ namespace tensorflow {
|
||||
namespace {
|
||||
|
||||
BuildXlaOpsPassFlags* build_ops_flags;
|
||||
DumpGraphFlags* dump_graph_flags;
|
||||
MarkForCompilationPassFlags* mark_for_compilation_flags;
|
||||
XlaDeviceFlags* device_flags;
|
||||
XlaOpsCommonFlags* ops_flags;
|
||||
@ -31,15 +30,6 @@ XlaOpsCommonFlags* ops_flags;
|
||||
std::vector<Flag>* flag_list;
|
||||
std::once_flag flags_init;
|
||||
|
||||
void AppendDumpGraphFlagsInternal(std::vector<Flag>* flag_list) {
|
||||
std::vector<Flag> new_flags = {
|
||||
Flag("tf_dump_graph_prefix", &dump_graph_flags->tf_dump_graph_prefix,
|
||||
"Path prefix to which graphs dumped during debugging should be "
|
||||
"written."),
|
||||
};
|
||||
flag_list->insert(flag_list->end(), new_flags.begin(), new_flags.end());
|
||||
}
|
||||
|
||||
void AppendMarkForCompilationPassFlagsInternal(std::vector<Flag>* flag_list) {
|
||||
std::vector<Flag> new_flags = {
|
||||
Flag("tf_xla_auto_jit", &mark_for_compilation_flags->tf_xla_auto_jit,
|
||||
@ -81,9 +71,6 @@ void AllocateAndParseFlags() {
|
||||
build_ops_flags = new BuildXlaOpsPassFlags;
|
||||
build_ops_flags->tf_xla_enable_lazy_compilation = true;
|
||||
|
||||
dump_graph_flags = new DumpGraphFlags;
|
||||
dump_graph_flags->tf_dump_graph_prefix = "/tmp/";
|
||||
|
||||
mark_for_compilation_flags = new MarkForCompilationPassFlags;
|
||||
mark_for_compilation_flags->tf_xla_auto_jit = 0;
|
||||
mark_for_compilation_flags->tf_xla_min_cluster_size = 2;
|
||||
@ -114,7 +101,6 @@ void AllocateAndParseFlags() {
|
||||
Flag("tf_xla_always_defer_compilation",
|
||||
&ops_flags->tf_xla_always_defer_compilation, ""),
|
||||
});
|
||||
AppendDumpGraphFlagsInternal(flag_list);
|
||||
AppendMarkForCompilationPassFlagsInternal(flag_list);
|
||||
xla::ParseFlagsFromEnvAndDieIfUnknown("TF_XLA_FLAGS", *flag_list);
|
||||
}
|
||||
@ -126,11 +112,6 @@ const BuildXlaOpsPassFlags& GetBuildXlaOpsPassFlags() {
|
||||
return *build_ops_flags;
|
||||
}
|
||||
|
||||
DumpGraphFlags* GetDumpGraphFlags() {
|
||||
std::call_once(flags_init, &AllocateAndParseFlags);
|
||||
return dump_graph_flags;
|
||||
}
|
||||
|
||||
MarkForCompilationPassFlags* GetMarkForCompilationPassFlags() {
|
||||
std::call_once(flags_init, &AllocateAndParseFlags);
|
||||
return mark_for_compilation_flags;
|
||||
@ -151,9 +132,4 @@ void AppendMarkForCompilationPassFlags(std::vector<Flag>* flag_list) {
|
||||
AppendMarkForCompilationPassFlagsInternal(flag_list);
|
||||
}
|
||||
|
||||
void AppendDumpGraphFlags(std::vector<Flag>* flag_list) {
|
||||
std::call_once(flags_init, &AllocateAndParseFlags);
|
||||
AppendDumpGraphFlagsInternal(flag_list);
|
||||
}
|
||||
|
||||
} // namespace tensorflow
|
||||
|
@ -83,12 +83,6 @@ struct BuildXlaOpsPassFlags {
|
||||
bool tf_xla_enable_lazy_compilation;
|
||||
};
|
||||
|
||||
// Flags for the XLA bridge's dump_graph module.
|
||||
struct DumpGraphFlags {
|
||||
// Path prefix to which graphs dumped during debugging should be written.
|
||||
string tf_dump_graph_prefix;
|
||||
};
|
||||
|
||||
// Return a pointer to the DumpGraphFlags struct;
|
||||
// repeated calls return the same pointer.
|
||||
// This should be called only after Flags::Parse() has returned.
|
||||
@ -100,7 +94,6 @@ MarkForCompilationPassFlags* GetMarkForCompilationPassFlags();
|
||||
const BuildXlaOpsPassFlags& GetBuildXlaOpsPassFlags();
|
||||
XlaDeviceFlags* GetXlaDeviceFlags();
|
||||
const XlaOpsCommonFlags& GetXlaOpsCommonFlags();
|
||||
DumpGraphFlags* GetDumpGraphFlags();
|
||||
|
||||
// Appends the flag definitions associated with
|
||||
// MarkForCompilationPassFlags/DumpGraphFlags to `flag_list`.
|
||||
@ -108,8 +101,6 @@ DumpGraphFlags* GetDumpGraphFlags();
|
||||
// Has the side-effect of parsing TF_XLA_FLAGS if that hasn't happened yet.
|
||||
void AppendMarkForCompilationPassFlags(
|
||||
std::vector<tensorflow::Flag>* flag_list);
|
||||
void AppendDumpGraphFlags(std::vector<tensorflow::Flag>* flag_list);
|
||||
|
||||
} // namespace tensorflow
|
||||
|
||||
#endif // TENSORFLOW_COMPILER_JIT_FLAGS_H_
|
||||
|
@ -27,12 +27,12 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/jit/flags.h"
|
||||
#include "tensorflow/compiler/jit/xla_cluster_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/cc/ops/xla_ops.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/xla/status_macros.h"
|
||||
#include "tensorflow/core/common_runtime/shape_refiner.h"
|
||||
#include "tensorflow/core/graph/algorithm.h"
|
||||
#include "tensorflow/core/public/session_options.h"
|
||||
#include "tensorflow/core/util/device_name_utils.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace {
|
||||
@ -375,15 +375,15 @@ Status IncreaseDynamismForAutoJitPass::Run(
|
||||
const GraphOptimizationPassOptions& options) {
|
||||
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
|
||||
if (flags->tf_xla_clustering_debug) {
|
||||
dump_graph::DumpGraphToFile("before_increase_dynamism_for_auto_jit_pass",
|
||||
**options.graph, options.flib_def);
|
||||
DumpGraphToFile("before_increase_dynamism_for_auto_jit_pass",
|
||||
**options.graph, options.flib_def);
|
||||
}
|
||||
|
||||
bool changed;
|
||||
TF_RETURN_IF_ERROR(FindAndRewriteSlices(options.graph->get(), &changed));
|
||||
if (changed && flags->tf_xla_clustering_debug) {
|
||||
dump_graph::DumpGraphToFile("increase_dynamism_for_auto_jit_pass",
|
||||
**options.graph, options.flib_def);
|
||||
DumpGraphToFile("increase_dynamism_for_auto_jit_pass", **options.graph,
|
||||
options.flib_def);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -31,7 +31,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/jit/union_find.h"
|
||||
#include "tensorflow/compiler/jit/xla_cluster_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/const_analysis.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
|
||||
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
|
||||
#include "tensorflow/compiler/xla/util.h"
|
||||
@ -48,6 +47,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/gtl/cleanup.h"
|
||||
#include "tensorflow/core/lib/strings/stringprintf.h"
|
||||
#include "tensorflow/core/public/version.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -1273,8 +1273,8 @@ Status MarkForCompilationPass::RunImpl(
|
||||
std::unordered_map<int, string> cluster_names;
|
||||
|
||||
if (flags->tf_xla_clustering_debug) {
|
||||
dump_graph::DumpGraphToFile("before_mark_for_compilation", **options.graph,
|
||||
options.flib_def);
|
||||
DumpGraphToFile("before_mark_for_compilation", **options.graph,
|
||||
options.flib_def);
|
||||
}
|
||||
|
||||
absl::flat_hash_map<int, std::pair<bool, string>>
|
||||
@ -1326,8 +1326,7 @@ Status MarkForCompilationPass::RunImpl(
|
||||
}
|
||||
|
||||
if (flags->tf_xla_clustering_debug) {
|
||||
dump_graph::DumpGraphToFile("mark_for_compilation", **options.graph,
|
||||
options.flib_def);
|
||||
DumpGraphToFile("mark_for_compilation", **options.graph, options.flib_def);
|
||||
|
||||
// We also dump out an annoated version of the TF graph where the nodes
|
||||
// names are prefixed with the cluster names. This can help visualizing the
|
||||
@ -1349,8 +1348,8 @@ Status MarkForCompilationPass::RunImpl(
|
||||
}
|
||||
}
|
||||
|
||||
dump_graph::DumpGraphToFile("mark_for_compilation_annotated", new_graph,
|
||||
options.flib_def);
|
||||
DumpGraphToFile("mark_for_compilation_annotated", new_graph,
|
||||
options.flib_def);
|
||||
}
|
||||
|
||||
VLogClusteringSummary(*graph);
|
||||
|
@ -16,10 +16,10 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/jit/shape_inference.h"
|
||||
|
||||
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/core/common_runtime/shape_refiner.h"
|
||||
#include "tensorflow/core/framework/shape_inference.h"
|
||||
#include "tensorflow/core/graph/algorithm.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
|
@ -19,7 +19,6 @@ limitations under the License.
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/shape_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/type_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/xla_context.h"
|
||||
@ -35,6 +34,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/platform/env.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/public/version.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
|
@ -23,7 +23,6 @@ limitations under the License.
|
||||
#include "tensorflow/compiler/jit/xla_compile_on_demand_op.h"
|
||||
#include "tensorflow/compiler/jit/xla_device_context.h"
|
||||
#include "tensorflow/compiler/jit/xla_device_ops.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/shape_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
|
||||
#include "tensorflow/compiler/xla/client/client_library.h"
|
||||
@ -51,6 +50,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/public/session_options.h"
|
||||
#include "tensorflow/core/public/version.h"
|
||||
#include "tensorflow/core/util/device_name_utils.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
#include "tensorflow/core/util/ptr_util.h"
|
||||
#include "tensorflow/core/util/stream_executor_util.h"
|
||||
|
||||
|
@ -84,7 +84,6 @@ cc_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":common",
|
||||
":dump_graph",
|
||||
":functionalize_control_flow",
|
||||
":tf2xla_proto",
|
||||
":tf2xla_util",
|
||||
@ -199,7 +198,6 @@ cc_library(
|
||||
visibility = [":friends"],
|
||||
deps = [
|
||||
":common",
|
||||
":dump_graph",
|
||||
":host_compute_metadata_proto",
|
||||
":sharding_util",
|
||||
":side_effect_util",
|
||||
@ -449,23 +447,6 @@ tf_cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "dump_graph",
|
||||
srcs = [
|
||||
"dump_graph.cc",
|
||||
],
|
||||
hdrs = [
|
||||
"dump_graph.h",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//tensorflow/compiler/jit:flags",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:graph",
|
||||
"//tensorflow/core:protos_all_cc",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "functionalize_control_flow_util",
|
||||
srcs = [
|
||||
@ -497,7 +478,6 @@ cc_library(
|
||||
":functionalize_control_flow_util",
|
||||
":tf2xla_util",
|
||||
"//tensorflow/compiler/jit:union_find",
|
||||
"//tensorflow/compiler/tf2xla:dump_graph",
|
||||
"//tensorflow/compiler/tf2xla/ops:xla_ops",
|
||||
"//tensorflow/compiler/xla:status_macros",
|
||||
"//tensorflow/core:core_cpu",
|
||||
@ -525,7 +505,6 @@ cc_library(
|
||||
":functionalize_while",
|
||||
":tf2xla_util",
|
||||
"//tensorflow/compiler/jit:union_find",
|
||||
"//tensorflow/compiler/tf2xla:dump_graph",
|
||||
"//tensorflow/compiler/tf2xla/ops:xla_ops",
|
||||
"//tensorflow/compiler/xla:status_macros",
|
||||
"//tensorflow/core:core_cpu",
|
||||
@ -562,7 +541,6 @@ cc_library(
|
||||
":functionalize_control_flow_util",
|
||||
":tf2xla_util",
|
||||
"//tensorflow/compiler/jit:union_find",
|
||||
"//tensorflow/compiler/tf2xla:dump_graph",
|
||||
"//tensorflow/compiler/tf2xla/ops:xla_ops",
|
||||
"//tensorflow/compiler/xla:status_macros",
|
||||
"//tensorflow/core:core_cpu",
|
||||
|
@ -1,44 +0,0 @@
|
||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// Helper functions for dumping Graphs, GraphDefs, and FunctionDefs to files for
|
||||
// debugging.
|
||||
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
|
||||
#include "tensorflow/compiler/jit/flags.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace dump_graph {
|
||||
|
||||
string DumpGraphDefToFile(const string& name, GraphDef const& graph_def) {
|
||||
return tensorflow::DumpGraphDefToFile(
|
||||
name, graph_def, GetDumpGraphFlags()->tf_dump_graph_prefix);
|
||||
}
|
||||
|
||||
string DumpGraphToFile(const string& name, Graph const& graph,
|
||||
const FunctionLibraryDefinition* flib_def) {
|
||||
return tensorflow::DumpGraphToFile(name, graph, flib_def,
|
||||
GetDumpGraphFlags()->tf_dump_graph_prefix);
|
||||
}
|
||||
|
||||
string DumpFunctionDefToFile(const string& name, FunctionDef const& fdef) {
|
||||
return tensorflow::DumpFunctionDefToFile(
|
||||
name, fdef, GetDumpGraphFlags()->tf_dump_graph_prefix);
|
||||
}
|
||||
|
||||
} // namespace dump_graph
|
||||
} // namespace tensorflow
|
@ -1,50 +0,0 @@
|
||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// Helper functions for dumping Graphs, GraphDefs, and FunctionDefs to files for
|
||||
// debugging.
|
||||
|
||||
#ifndef TENSORFLOW_COMPILER_TF2XLA_DUMP_GRAPH_H_
|
||||
#define TENSORFLOW_COMPILER_TF2XLA_DUMP_GRAPH_H_
|
||||
|
||||
#include "tensorflow/core/framework/function.h"
|
||||
#include "tensorflow/core/framework/graph.pb.h"
|
||||
#include "tensorflow/core/graph/graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace dump_graph {
|
||||
|
||||
// Dumps 'graph_def' to a file, as a GraphDef text proto. Returns the file name
|
||||
// chosen.
|
||||
//
|
||||
// Automatically picks a file name. Prefixes 'name' with the value of the
|
||||
// --tf_dump_graph_prefix flag and suffixes it with ".pbtxt" to form a name.
|
||||
// If a graph has already been dumped by this process with the same name,
|
||||
// suffixes with "_n.pbtxt", where 'n' is a sequence number.
|
||||
string DumpGraphDefToFile(const string& name, GraphDef const& graph_def);
|
||||
|
||||
// Similar to DumpGraphDefToFile, but builds the GraphDef to dump from a 'graph'
|
||||
// and an optional function library 'flib_def'. Returns the file name chosen.
|
||||
string DumpGraphToFile(const string& name, Graph const& graph,
|
||||
const FunctionLibraryDefinition* flib_def = nullptr);
|
||||
|
||||
// Similar to DumpGraphDefToFile, but dumps a function as a FunctionDef text
|
||||
// proto. Returns the file name chosen.
|
||||
string DumpFunctionDefToFile(const string& name, FunctionDef const& fdef);
|
||||
|
||||
} // namespace dump_graph
|
||||
} // namespace tensorflow
|
||||
|
||||
#endif // TENSORFLOW_COMPILER_TF2XLA_DUMP_GRAPH_H_
|
@ -25,7 +25,6 @@ limitations under the License.
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/jit/union_find.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
|
||||
#include "tensorflow/core/common_runtime/function.h"
|
||||
@ -37,6 +36,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/lib/strings/strcat.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
using xla::StatusOr;
|
||||
|
||||
@ -735,7 +735,7 @@ Status Conditional::BuildIfNode(Graph* graph,
|
||||
|
||||
VLOG(3) << "FunctionalizeControlFlow (" << branch_name[branch_index]
|
||||
<< "): "
|
||||
<< dump_graph::DumpGraphToFile(
|
||||
<< DumpGraphToFile(
|
||||
"functionalize_cond_body_" + branch_name[branch_index],
|
||||
*bodies_[branch_index], nullptr);
|
||||
|
||||
@ -1516,9 +1516,8 @@ void FunctionalizeCond::DumpGraphWithCondState(const string& name) {
|
||||
state_map_.AncestorStateToString(n)));
|
||||
}
|
||||
LOG(INFO) << "FunctionalizeControlFlow (" << name << "): "
|
||||
<< dump_graph::DumpGraphToFile(
|
||||
absl::StrCat("functionalize_cond_", name), *graph_,
|
||||
library_);
|
||||
<< DumpGraphToFile(absl::StrCat("functionalize_cond_", name),
|
||||
*graph_, library_);
|
||||
}
|
||||
|
||||
void FunctionalizeCond::AddSwitchId(int switch_id) {
|
||||
|
@ -24,7 +24,6 @@ limitations under the License.
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/jit/union_find.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_while.h"
|
||||
@ -43,6 +42,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/gtl/cleanup.h"
|
||||
#include "tensorflow/core/public/session_options.h"
|
||||
#include "tensorflow/core/public/version.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -50,8 +50,7 @@ Status FunctionalizeControlFlow(const FunctionLibraryDefinition* lookup_library,
|
||||
Graph* graph,
|
||||
FunctionLibraryDefinition* library) {
|
||||
VLOG(2) << "FunctionalizeControlFlow (initial): "
|
||||
<< dump_graph::DumpGraphToFile("functionalize_initial", *graph,
|
||||
library);
|
||||
<< DumpGraphToFile("functionalize_initial", *graph, library);
|
||||
|
||||
// Functionalize and remove while loops from graph.
|
||||
TF_RETURN_IF_ERROR(FunctionalizeWhileLoop(lookup_library, graph, library));
|
||||
@ -62,8 +61,7 @@ Status FunctionalizeControlFlow(const FunctionLibraryDefinition* lookup_library,
|
||||
TF_RETURN_IF_ERROR(FunctionalizeCond(graph, library));
|
||||
|
||||
VLOG(2) << "FunctionalizeControlFlow (final): "
|
||||
<< dump_graph::DumpGraphToFile("functionalize_final", *graph,
|
||||
library);
|
||||
<< DumpGraphToFile("functionalize_final", *graph, library);
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
@ -200,13 +198,13 @@ Status FunctionalizeControlFlowForFunction(
|
||||
|
||||
// Functionalize the function body.
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile(
|
||||
DumpGraphToFile(
|
||||
absl::StrCat("functionalize_control_flow_before_fdef_", func_name),
|
||||
*g, fld);
|
||||
}
|
||||
TF_RETURN_IF_ERROR(FunctionalizeControlFlow(g, fld));
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile(
|
||||
DumpGraphToFile(
|
||||
absl::StrCat("functionalize_control_flow_after_fdef_", func_name), *g,
|
||||
fld);
|
||||
}
|
||||
@ -234,8 +232,8 @@ Status FunctionalizeControlFlowPass::Run(
|
||||
const GraphOptimizationPassOptions& options) {
|
||||
Graph* graph = options.graph->get();
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile("functionalize_control_flow_before", *graph,
|
||||
options.flib_def);
|
||||
DumpGraphToFile("functionalize_control_flow_before", *graph,
|
||||
options.flib_def);
|
||||
}
|
||||
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
|
||||
new ProcessFunctionLibraryRuntime(
|
||||
@ -279,8 +277,8 @@ Status FunctionalizeControlFlowPass::Run(
|
||||
}
|
||||
|
||||
if (VLOG_IS_ON(4)) {
|
||||
dump_graph::DumpGraphToFile("functionalize_control_flow_after", *graph,
|
||||
options.flib_def);
|
||||
DumpGraphToFile("functionalize_control_flow_after", *graph,
|
||||
options.flib_def);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ limitations under the License.
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/compiler/jit/union_find.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
|
||||
@ -36,6 +35,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/graph/control_flow.h"
|
||||
#include "tensorflow/core/graph/node_builder.h"
|
||||
#include "tensorflow/core/lib/strings/strcat.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace {
|
||||
@ -293,8 +293,7 @@ Status FunctionalizeLoop(const FunctionLibraryDefinition* lookup_library,
|
||||
Graph* graph, Frame* frame,
|
||||
FunctionLibraryDefinition* library) {
|
||||
VLOG(2) << "Frame " << frame->name << " before: "
|
||||
<< dump_graph::DumpGraphToFile("functionalize_before", *graph,
|
||||
library);
|
||||
<< DumpGraphToFile("functionalize_before", *graph, library);
|
||||
|
||||
// Split loop-varying Enter nodes with multiple successors. If the same
|
||||
// Tensor is fed as input to multiple loop arguments, we may end up with a
|
||||
@ -490,8 +489,8 @@ Status FunctionalizeLoop(const FunctionLibraryDefinition* lookup_library,
|
||||
TF_RETURN_IF_ERROR(FunctionalizeCond(body_graph.get(), library));
|
||||
|
||||
VLOG(2) << "Frame " << frame->name << " condition: "
|
||||
<< dump_graph::DumpGraphToFile("loop_condition", *cond_graph, library)
|
||||
<< " body: " << dump_graph::DumpGraphToFile("loop_body", *body_graph);
|
||||
<< DumpGraphToFile("loop_condition", *cond_graph, library)
|
||||
<< " body: " << DumpGraphToFile("loop_body", *body_graph);
|
||||
|
||||
static std::atomic<int64> sequence_num(0LL);
|
||||
int64 id = ++sequence_num;
|
||||
@ -585,8 +584,7 @@ Status FunctionalizeLoop(const FunctionLibraryDefinition* lookup_library,
|
||||
frame->parent->nodes.insert(while_node);
|
||||
|
||||
VLOG(2) << "Frame " << frame->name << " after: "
|
||||
<< dump_graph::DumpGraphToFile("functionalize_after", *graph,
|
||||
library);
|
||||
<< DumpGraphToFile("functionalize_after", *graph, library);
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ limitations under the License.
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
#include "tensorflow/compiler/tf2xla/const_analysis.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/literal_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/shape_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
|
||||
@ -46,6 +45,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/public/version.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
|
@ -24,7 +24,6 @@ limitations under the License.
|
||||
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/str_join.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/functionalize_control_flow.h"
|
||||
#include "tensorflow/compiler/tf2xla/shape_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
|
||||
@ -45,6 +44,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/platform/types.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -164,12 +164,10 @@ Status RewriteAndPruneGraph(
|
||||
std::unordered_set<const Node*> retval_nodes;
|
||||
TF_RETURN_IF_ERROR(
|
||||
AddRetvalNodes(graph, node_map, config.fetch(), &retval_nodes));
|
||||
VLOG(2) << "Post rewrite: "
|
||||
<< dump_graph::DumpGraphToFile("tf2xla_post_rewrite", *graph);
|
||||
VLOG(2) << "Post rewrite: " << DumpGraphToFile("tf2xla_post_rewrite", *graph);
|
||||
PruneForReverseReachability(graph, retval_nodes);
|
||||
FixupSourceAndSinkEdges(graph);
|
||||
VLOG(2) << "Post prune: "
|
||||
<< dump_graph::DumpGraphToFile("tfcompile_post_prune", *graph);
|
||||
VLOG(2) << "Post prune: " << DumpGraphToFile("tfcompile_post_prune", *graph);
|
||||
// Sanity-check, to make sure the feeds and fetches still exist post-pruning.
|
||||
std::set<string> missing_feeds, missing_fetches;
|
||||
for (const tf2xla::Feed& feed : config.feed()) {
|
||||
|
@ -19,7 +19,6 @@ limitations under the License.
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "tensorflow/compiler/tf2xla/dump_graph.h"
|
||||
#include "tensorflow/compiler/tf2xla/graph_compiler.h"
|
||||
#include "tensorflow/compiler/tf2xla/shape_util.h"
|
||||
#include "tensorflow/compiler/tf2xla/sharding_util.h"
|
||||
@ -48,6 +47,7 @@ limitations under the License.
|
||||
#include "tensorflow/core/lib/gtl/cleanup.h"
|
||||
#include "tensorflow/core/lib/hash/hash.h"
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/core/util/dump_graph.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace {
|
||||
@ -597,7 +597,7 @@ Status XlaCompiler::CompileFunction(
|
||||
|
||||
if (VLOG_IS_ON(2)) {
|
||||
VLOG(2) << "XlaCompiler::CompileFunction: "
|
||||
<< dump_graph::DumpGraphToFile(
|
||||
<< DumpGraphToFile(
|
||||
absl::StrCat("xla_compile_function_", function_id), *graph);
|
||||
}
|
||||
|
||||
@ -1031,9 +1031,8 @@ Status XlaCompiler::CompileGraph(
|
||||
graph.get(), options_.flib_def, local_flib_def_.get()));
|
||||
if (VLOG_IS_ON(2)) {
|
||||
VLOG(2) << "XlaCompiler::CompileGraph: "
|
||||
<< dump_graph::DumpGraphToFile(
|
||||
absl::StrCat("xla_compile_graph_", name), *graph,
|
||||
flib_runtime_->GetFunctionLibraryDefinition());
|
||||
<< DumpGraphToFile(absl::StrCat("xla_compile_graph_", name), *graph,
|
||||
flib_runtime_->GetFunctionLibraryDefinition());
|
||||
}
|
||||
|
||||
// Report the error here if initialization failed.
|
||||
|
Loading…
Reference in New Issue
Block a user