STT-tensorflow/tensorflow/compiler/tf2xla/xla_tpu_backend.cc
Frank Chen bdc490c868 [TPU] Colocate tpu_compilation_device registration with other registration in compiler/tf2xla
This is part of a series of changes to move TPU-related code to better locations so that the TensorFlow build isn't confused and TPU-based TF can be built without the define=framework_shared_object=false flag.

PiperOrigin-RevId: 353341905
Change-Id: I7cd820aab37c3d4f1a838967a87dc462491e446f
2021-01-22 16:51:35 -08:00

32 lines
1.1 KiB
C++

/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/tpu/tpu_node_device_util.h"
namespace tensorflow {
namespace {
bool RegisterTpuXlaBackend() {
REGISTER_XLA_BACKEND(DEVICE_TPU_XLA_JIT, kTpuAllTypes, TpuOpFilter);
return true;
}
static bool tpu_xla_backend_registered = RegisterTpuXlaBackend();
} // namespace
} // namespace tensorflow