Introduce some common constants for TPU.

PiperOrigin-RevId: 310672767
Change-Id: I04794ac10cd6b4d03c5dc0221a17bd35ee5e650f
This commit is contained in:
Frank Chen 2020-05-08 19:49:18 -07:00 committed by TensorFlower Gardener
parent ef2112e415
commit 4909933889
5 changed files with 208 additions and 0 deletions

View File

@ -1,6 +1,10 @@
# Description: Utilities for TPU Operations
package(
default_visibility = [
"//tensorflow/core/tpu:__subpackages__",
"//tensorflow/stream_executor/tpu:__subpackages__",
],
licenses = ["notice"], # Apache 2.0
)
@ -32,3 +36,18 @@ cc_library(
"//tensorflow/core/protobuf/tpu:tpu_embedding_output_layout_proto_cc",
],
)
cc_library(
name = "tpu_defs",
srcs = ["tpu_defs.cc"],
hdrs = ["tpu_defs.h"],
)
cc_library(
name = "tpu_init_mode",
srcs = ["tpu_init_mode.cc"],
hdrs = ["tpu_init_mode.h"],
deps = [
"//tensorflow/core:lib",
],
)

View File

@ -0,0 +1,28 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
const char* const DEVICE_TPU_NODE = "TPU";
const char* const TPU_FAST_MEM_ATTR = "_TPU_FAST_MEM";
const char* const DEVICE_TPU_REPLICATED_CORE = "TPU_REPLICATED_CORE";
const char* const DEVICE_TPU_SYSTEM = "TPU_SYSTEM";
const char* const DEVICE_TPU_XLA_JIT = "XLA_TPU_JIT";
const char* const TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR =
"_mirrored_variable_indices";
} // namespace tensorflow

View File

@ -0,0 +1,48 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Common definitions related to TPUs.
#ifndef TENSORFLOW_CORE_TPU_TPU_DEFS_H_
#define TENSORFLOW_CORE_TPU_TPU_DEFS_H_
namespace tensorflow {
// Name of the TPU device, which corresponds to a single core.
extern const char* const DEVICE_TPU_NODE; // "TPU";
// The TPU_REPLICATED_CORE device is a virtual device corresponding to one core
// of a replicated TPU computation. Only valid within the body of a
// TPUReplicate computation.
extern const char* const DEVICE_TPU_REPLICATED_CORE;
extern const char* const DEVICE_TPU_SYSTEM; // "TPU_SYSTEM";
// Name of the XLA_TPU_JIT compilation device, which is an internal device to
// compile graphs for TPU. Not registered as a device; no operators can be
// assigned to this device by a user.
extern const char* const DEVICE_TPU_XLA_JIT; // "XLA_TPU_JIT";
// Attribute used internally to pass "is_mirrored_variable" attribute on
// TPUReplicatedInput nodes to _TPUReplicate.
extern const char* const TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR;
// Attribute used internally to annoate ops which might consume TPU FastMem
// variable.
extern const char* const TPU_FAST_MEM_ATTR; // "_TPU_FAST_MEM"
} // namespace tensorflow
#endif // TENSORFLOW_CORE_TPU_TPU_DEFS_H_

View File

@ -0,0 +1,66 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/tpu/tpu_init_mode.h"
#include <atomic>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace {
mutex init_mode_mutex(LINKER_INITIALIZED);
TPUInitMode init_mode TF_GUARDED_BY(init_mode_mutex);
} // namespace
namespace test {
void ForceSetTPUInitMode(const TPUInitMode mode) {
mutex_lock l(init_mode_mutex);
init_mode = mode;
}
} // namespace test
Status SetTPUInitMode(const TPUInitMode mode) {
if (mode == TPUInitMode::kNone) {
return errors::InvalidArgument("State cannot be set to: ",
static_cast<int>(mode));
}
{
mutex_lock l(init_mode_mutex);
if (init_mode != TPUInitMode::kNone && mode != init_mode) {
return errors::FailedPrecondition(
"TPUInit already attempted with mode: ", static_cast<int>(init_mode),
" and cannot be changed to: ", static_cast<int>(mode),
". You are most probably trying to initialize the TPU system, both "
"using the explicit API and using an initialization Op within the "
"graph; please choose one. ");
}
init_mode = mode;
}
return Status::OK();
}
TPUInitMode GetTPUInitMode() {
mutex_lock l(init_mode_mutex);
return init_mode;
}
} // namespace tensorflow

View File

@ -0,0 +1,47 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_TPU_TPU_INIT_MODE_H_
#define TENSORFLOW_CORE_TPU_TPU_INIT_MODE_H_
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
enum class TPUInitMode : int { kNone, kGlobal, kRegular };
// Sets the TPU initialization mode appropriately.
//
// Requires that mode is not kNone, and mode doesn't transition kGlobal
// <-> kRegular.
//
// IMPLEMENTATION DETAILS:
// Used internally to record the current mode and type of API used for TPU
// initialization in a global static variable.
Status SetTPUInitMode(TPUInitMode mode);
// Returns the current TPUInitMode.
TPUInitMode GetTPUInitMode();
namespace test {
// Forces the tpu init mode to be changed.
void ForceSetTPUInitMode(TPUInitMode mode);
} // namespace test
} // namespace tensorflow
#endif // TENSORFLOW_CORE_TPU_TPU_INIT_MODE_H_