Moving GpuVersion to gpu_types.h

This commit is contained in:
jerryyin 2019-07-18 14:31:05 +00:00
parent 4f249075c8
commit 38d95788df
6 changed files with 42 additions and 11 deletions

View File

@ -130,7 +130,6 @@ cc_library(
deps = [
"//tensorflow/core:framework_lite",
"//third_party/eigen3",
"@com_google_absl//absl/types:variant",
],
)

View File

@ -47,6 +47,14 @@ cc_library(
],
)
cc_library(
name = "gpu_types",
hdrs = ["gpu_types.h"],
deps = [
"@com_google_absl//absl/types:variant",
],
)
cc_library(
name = "partition_assignment",
srcs = [
@ -453,6 +461,7 @@ cc_library(
":cudnn_conv_runner",
":cusolver_context",
":gpu_debug_info_manager",
":gpu_types",
":hlo_execution_profiler",
":infeed_manager",
":ir_emission_utils",

View File

@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_debug_info_manager.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_types.h"
#include "tensorflow/compiler/xla/service/gpu/hlo_execution_profiler.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/llvm_ir/buffer_assignment_util.h"
@ -36,7 +37,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/tracing.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/stream_executor/platform.h"

View File

@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/executable.h"
#include "tensorflow/compiler/xla/service/gpu/buffer_allocations.h"
#include "tensorflow/compiler/xla/service/gpu/gpu_types.h"
#include "tensorflow/compiler/xla/service/gpu/stream_assignment.h"
#include "tensorflow/compiler/xla/service/gpu/thunk.h"
#include "tensorflow/compiler/xla/service/gpu/thunk_schedule.h"
@ -34,7 +35,6 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/shaped_buffer.h"
#include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/stream_executor/device_memory_allocator.h"

View File

@ -0,0 +1,31 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TYPES_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TYPES_H_
#include "absl/types/variant.h"
namespace xla {
namespace gpu {
// GpuVersion is used to abstract Gpu hardware version. On Cuda platform,
// it comprises a pair of integers denoting major and minor version.
// On ROCm platform, it comprises one integer for AMD GCN ISA version.
using GpuVersion = absl::variant<std::pair<int, int>, int>;
} // namespace gpu
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TYPES_H_

View File

@ -19,7 +19,6 @@ limitations under the License.
#include <Eigen/Core>
#include <complex>
#include "absl/types/variant.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/platform/types.h"
#include "third_party/eigen3/Eigen/Core"
@ -44,13 +43,6 @@ using complex64 = std::complex<float>;
using complex128 = std::complex<double>;
using ::Eigen::half;
namespace gpu {
// GpuVersion is used to abstract Gpu hardware version. On Cuda platform,
// it comprises a pair of integers denoting major and minor version.
// On ROCm platform, it comprises one integer for AMD GCN ISA version.
using GpuVersion = absl::variant<std::pair<int, int>, int>;
} // namespace gpu
} // namespace xla
// Alias namespace ::stream_executor as ::xla::se.