diff --git a/tensorflow/core/tpu/tpu_on_demand_compiler.cc b/tensorflow/core/tpu/tpu_on_demand_compiler.cc index 75165609191..c9191314c95 100644 --- a/tensorflow/core/tpu/tpu_on_demand_compiler.cc +++ b/tensorflow/core/tpu/tpu_on_demand_compiler.cc @@ -220,15 +220,14 @@ class TpuCompiler : public Compiler { stream_executor::StreamExecutor* executor, const CompileOptions& options) override { XLA_HloModule hlo_module; - XLA_HloModule result; - auto cleanup = xla::MakeCleanup([&hlo_module, &result]() { + auto cleanup = xla::MakeCleanup([&hlo_module]() { stream_executor::tpu::SerializedProto_Free(hlo_module.proto); - stream_executor::tpu::SerializedProto_Free(result.proto); ApiConverter::Free(&hlo_module.module_config); }); hlo_module.module_config = ApiConverter::ToC(module->config()); hlo_module.proto = stream_executor::tpu::SerializeProto(module->ToProto()); auto allocator = ApiConverter::ToC(options.device_allocator); + XLA_HloModule result; StatusHelper status; ExecutorApiFn()->TpuCompiler_RunHloPassesFn( compiler_, &hlo_module, @@ -240,6 +239,7 @@ class TpuCompiler : public Compiler { } HloModuleProto result_proto = stream_executor::tpu::DeserializeProto(result.proto); + stream_executor::tpu::SerializedProto_Free(result.proto); return HloModule::CreateFromProto(result_proto, module->config()); }