140 lines
4.9 KiB
C++
140 lines
4.9 KiB
C++
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
==============================================================================*/
|
|
|
|
#include "tensorflow/stream_executor/tpu/tpu_topology.h"
|
|
|
|
#include "tensorflow/core/tpu/tpu_api.h"
|
|
|
|
namespace tensorflow {
|
|
namespace tpu {
|
|
|
|
TpuDimensionsExternal TpuCoreLocationExternal::chip_coordinates() const {
|
|
int x, y, z;
|
|
tpu::ExecutorApiFn()->TpuCoreLocation_ChipCoordinatesFn(core_location_, &x,
|
|
&y, &z);
|
|
return {x, y, z};
|
|
}
|
|
|
|
TpuDimensionsExternal TpuCoreLocationExternal::host_coordinates() const {
|
|
int x, y, z;
|
|
tpu::ExecutorApiFn()->TpuCoreLocation_HostCoordinatesFn(core_location_, &x,
|
|
&y, &z);
|
|
return {x, y, z};
|
|
}
|
|
|
|
int32 TpuCoreLocationExternal::index() const {
|
|
return tpu::ExecutorApiFn()->TpuCoreLocation_IndexFn(core_location_);
|
|
}
|
|
|
|
int32 TpuCoreLocationExternal::Id() const {
|
|
return tpu::ExecutorApiFn()->TpuCoreLocation_IdFn(core_location_);
|
|
}
|
|
|
|
int32 TpuHostLocationExternal::Id() const {
|
|
return tpu::ExecutorApiFn()->TpuHostLocation_IdFn(host_location_);
|
|
}
|
|
|
|
std::vector<TpuCoreLocationExternal> TpuHostLocationExternal::Cores(
|
|
TpuCoreTypeEnum core_type) const {
|
|
int num_cores = tpu::ExecutorApiFn()->TpuHostLocation_NumCoresFn(
|
|
host_location_, core_type);
|
|
std::vector<SE_TpuTopology_Core*> core_ptrs(num_cores);
|
|
tpu::ExecutorApiFn()->TpuHostLocation_CoresFn(host_location_, core_type,
|
|
core_ptrs.data());
|
|
std::vector<TpuCoreLocationExternal> result;
|
|
result.reserve(num_cores);
|
|
for (SE_TpuTopology_Core* ptr : core_ptrs) {
|
|
result.emplace_back(ptr);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
int32 TpuTopologyExternal::LogicalDevicesPerHost(
|
|
TpuCoreTypeEnum core_type) const {
|
|
return tpu::ExecutorApiFn()->TpuTopology_LogicalDevicesPerHostFn(topology_,
|
|
core_type);
|
|
}
|
|
|
|
int32 TpuTopologyExternal::LogicalDevicesPerChip(
|
|
TpuCoreTypeEnum core_type) const {
|
|
return tpu::ExecutorApiFn()->TpuTopology_LogicalDevicesPerChipFn(topology_,
|
|
core_type);
|
|
}
|
|
|
|
int32 TpuTopologyExternal::HostCount() const {
|
|
return tpu::ExecutorApiFn()->TpuTopology_HostCountFn(topology_);
|
|
}
|
|
|
|
int32 TpuTopologyExternal::ChipsPerHost() const {
|
|
return tpu::ExecutorApiFn()->TpuTopology_ChipsPerHostFn(topology_);
|
|
}
|
|
|
|
TpuTopologyChipBoundsExternal TpuTopologyExternal::chip_bounds() const {
|
|
return {tpu::ExecutorApiFn()->TpuTopology_ChipBounds_XFn(topology_),
|
|
tpu::ExecutorApiFn()->TpuTopology_ChipBounds_YFn(topology_),
|
|
tpu::ExecutorApiFn()->TpuTopology_ChipBounds_ZFn(topology_)};
|
|
}
|
|
|
|
bool TpuTopologyExternal::HasChip(int x, int y, int z) const {
|
|
return tpu::ExecutorApiFn()->TpuTopology_HasChipFn(topology_, x, y, z);
|
|
}
|
|
|
|
TpuCoreLocationExternal TpuTopologyExternal::Core(int x, int y, int z,
|
|
TpuCoreTypeEnum core_type,
|
|
int index) const {
|
|
return TpuCoreLocationExternal(tpu::ExecutorApiFn()->TpuTopology_CoreFn(
|
|
topology_, x, y, z, core_type, index));
|
|
}
|
|
|
|
std::vector<TpuCoreLocationExternal> TpuTopologyExternal::cores(
|
|
TpuCoreTypeEnum core_type) const {
|
|
int num_cores =
|
|
tpu::ExecutorApiFn()->TpuTopology_NumCoresFn(topology_, core_type);
|
|
std::vector<SE_TpuTopology_Core*> core_ptrs(num_cores);
|
|
tpu::ExecutorApiFn()->TpuTopology_CoresFn(topology_, core_type,
|
|
core_ptrs.data());
|
|
std::vector<TpuCoreLocationExternal> result;
|
|
result.reserve(num_cores);
|
|
for (SE_TpuTopology_Core* ptr : core_ptrs) {
|
|
result.emplace_back(ptr);
|
|
}
|
|
return result;
|
|
}
|
|
|
|
int TpuTopologyExternal::IdForHost(TpuDimensionsExternal host) const {
|
|
return tpu::ExecutorApiFn()->TpuTopology_IdForHostFn(topology_, host.x,
|
|
host.y, host.z);
|
|
}
|
|
|
|
TpuVersionEnum TpuTopologyExternal::version() const {
|
|
return tpu::ExecutorApiFn()->TpuTopology_VersionFn(topology_);
|
|
}
|
|
|
|
std::string TpuVersionEnumToString(TpuVersionEnum version) {
|
|
switch (version) {
|
|
case kUnknownTpuVersion:
|
|
return "Unknown TPU version";
|
|
case kTpuV2:
|
|
return "TPU v2";
|
|
case kTpuV3:
|
|
return "TPU v3";
|
|
case kTpuV4:
|
|
return "TPU v4";
|
|
}
|
|
}
|
|
|
|
} // namespace tpu
|
|
} // namespace tensorflow
|