Remove unnecessary OpKernel::is_internal() method.

This method was only called to test that Recv nodes are invoked asynchronously and raise an InternalError if they are not. This has been the case continuously since the current executor was deployed.

PiperOrigin-RevId: 251942596
This commit is contained in:
Derek Murray 2019-06-06 15:42:42 -07:00 committed by TensorFlower Gardener
parent 36569233b8
commit 4f9230e2bd
4 changed files with 8 additions and 26 deletions
tensorflow/core

View File

@ -510,6 +510,13 @@ Status BaseGPUDevice::FillContextMap(const Graph* graph,
return Status::OK();
}
string BaseGPUDevice::ComputeOpKernelDebugString(const OpKernel& op_kernel,
const int& stream_id) {
return strings::StrCat(op_kernel.name(), " op ", op_kernel.type_string(),
" on GPU ", tf_gpu_id_.value(), " stream[", stream_id,
"]");
}
void BaseGPUDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) {
profiler::TraceMe activity(
[&] {
@ -528,27 +535,7 @@ void BaseGPUDevice::Compute(OpKernel* op_kernel, OpKernelContext* context) {
// implemented (or otherwise tries to launch a GPU kernel
// directly), we need to establish a stacked-scoped environment
// that directs it to execute on the proper device. Otherwise we
// expect the Op to use StreamExecutor directly and correctly. The
// way we make this discrimination is quite hacky: At the moment
// the only non-Eigen GPU Op is the recv-op, which is known to be
// asynchronous.
if (op_kernel->is_internal() && op_kernel->type_string() == "_Recv") {
context->SetStatus(errors::Internal(
"Invalid synchronous 'Compute' on GPU for '_Recv' op"));
} else {
ComputeHelper(op_kernel, context);
}
}
string BaseGPUDevice::ComputeOpKernelDebugString(const OpKernel& op_kernel,
const int& stream_id) {
return strings::StrCat(op_kernel.name(), " op ", op_kernel.type_string(),
" on GPU ", tf_gpu_id_.value(), " stream[", stream_id,
"]");
}
void BaseGPUDevice::ComputeHelper(OpKernel* op_kernel,
OpKernelContext* context) {
// expect the Op to use StreamExecutor directly and correctly.
GPUDeviceContext* gpu_device_context = device_contexts_[0];
if (context->op_device_context() != nullptr) {
gpu_device_context =

View File

@ -169,8 +169,6 @@ class BaseGPUDevice : public LocalDevice {
void ReinitializeDevice(OpKernelContext* context, PerOpGpuDevice* device,
int stream_id, Allocator* allocator);
void ComputeHelper(OpKernel* op_kernel, OpKernelContext* context);
string ComputeOpKernelDebugString(const OpKernel& op_kernel,
const int& stream_id);

View File

@ -104,7 +104,6 @@ OpKernel::OpKernel(OpKernelConstruction* context,
input_name_map_(context->num_inputs()),
output_name_map_(context->num_outputs()),
graph_def_version_(context->graph_def_version()),
is_internal_(absl::StartsWith(type_string(), "_")),
cost_estimate_(OpKernel::kInitialCostEstimateCycles) {
OP_REQUIRES_OK(context,
NameRangesForNode(*def_, *context->op_def_, &input_name_map_,

View File

@ -163,7 +163,6 @@ class OpKernel {
const string& name() const; // Same as def().name()
const string& type_string() const; // Same as def().op()
const string& requested_device() const; // Same as def().device()
bool is_internal() const { return is_internal_; }
int num_inputs() const { return input_types_.size(); }
DataType input_type(int i) const { return input_types_[i]; }
@ -219,7 +218,6 @@ class OpKernel {
NameRangeMap input_name_map_;
NameRangeMap output_name_map_;
const int graph_def_version_;
const bool is_internal_; // True if this is an internal operation
bool expensive_;
std::atomic_uint_fast64_t cost_estimate_;