STT-tensorflow/tensorflow/compiler/jit/xla_tpu_device.h
Frank Chen c5f474d1c8 [TPU] Move TPU node and system device initializers to compiler/jit
This is part of a series of changes to move TPU-related code to better locations so that the TensorFlow build isn't confused and TPU-based TF can be built without the define=framework_shared_object=false flag.

PiperOrigin-RevId: 352726495
Change-Id: Idc23455a8289c4a2546edad9ca59e9207a7492ce
2021-01-19 22:43:21 -08:00

37 lines
1.3 KiB
C++

/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_JIT_XLA_TPU_DEVICE_H_
#define TENSORFLOW_COMPILER_JIT_XLA_TPU_DEVICE_H_
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
void RegisterTpuDeviceToDeviceCopy();
void RegisterTpuNodeDevice(
bool tpu_autoclustering, bool tpu_xla_device_failure_closes_chips,
bool tpu_use_substreams_for_cross_tpu_device_transfers);
void RegisterTpuSystemDevice();
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_JIT_XLA_TPU_DEVICE_H_