tensorflow/core resolutions set 1
This commit is contained in:
parent
3bb28df8d4
commit
d469a3afe1
tensorflow/core
common_runtime
distributed_runtime
collective_param_resolver_distributed.cccollective_rma_distributed.cc
eager
graph_mgr.ccmaster_session.ccrpc
framework
@ -832,7 +832,7 @@ bool BFCAllocator::MergeTimestampedChunks(size_t required_bytes) {
|
||||
// to to_merge. If this is a standard merge (required_bytes == 0) then
|
||||
// merge them all, otherwise merge just until a Chunk of the required size
|
||||
// is produced.
|
||||
for (int ci = 0; ci < to_merge.size(); ++ci) {
|
||||
for (int ci = 0, end = to_merge.size(); ci < end; ++ci) {
|
||||
void* ptr = to_merge[ci];
|
||||
// It's possible that the Chunk associated with this memory location got
|
||||
// merged and deallocated in a prior iteration so refetch the handle and
|
||||
|
@ -133,7 +133,7 @@ class BFCAllocator : public Allocator {
|
||||
// A ChunkHandle is an index into the chunks_ vector in BFCAllocator
|
||||
// kInvalidChunkHandle means an invalid chunk
|
||||
typedef size_t ChunkHandle;
|
||||
static constexpr int kInvalidChunkHandle = -1;
|
||||
static constexpr ChunkHandle kInvalidChunkHandle = -1;
|
||||
|
||||
typedef int BinNum;
|
||||
static constexpr int kInvalidBinNum = -1;
|
||||
|
@ -411,7 +411,7 @@ Status GetOrCreateKernelAndDevice(
|
||||
// When LazyCopyFunctionRemoteInputs is disabled, all inputs need to be on
|
||||
// local devices, since we execute a remote function through worker service,
|
||||
// which doesn't accept remote inputs.
|
||||
for (int i = 0; i < op->Inputs().size(); i++) {
|
||||
for (int i = 0, end = op->Inputs().size(); i < end; i++) {
|
||||
TensorHandle* input = op->Inputs()[i];
|
||||
if (!ctx.LazyCopyFunctionRemoteInputs() &&
|
||||
input->Type() == TensorHandle::REMOTE) {
|
||||
@ -624,7 +624,7 @@ Status EagerLocalExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
Status s;
|
||||
if (executor.Async()) {
|
||||
const DataTypeVector& output_dtypes = kernel->output_dtypes();
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
retvals[i] = TensorHandle::CreateEmptyLocalHandle(
|
||||
/* d= */ ctx.CanonicalDevice(kernel->OutputDevice(i)),
|
||||
/* op_device= */ kernel->device(),
|
||||
@ -645,7 +645,7 @@ Status EagerLocalExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
// performance.
|
||||
s = executor.AddOrExecute(std::move(node));
|
||||
} else {
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
retvals[i] = nullptr;
|
||||
}
|
||||
ExecuteNode node(&ctx, op->Inputs(), op->remote_func_params(), kernel,
|
||||
@ -660,7 +660,7 @@ Status EagerLocalExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
// Since the operation failed, we need to Unref any outputs if they were
|
||||
// allocated.
|
||||
if (!s.ok()) {
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
if (retvals[i] != nullptr) {
|
||||
retvals[i]->Unref();
|
||||
}
|
||||
@ -770,7 +770,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
profiler::TraceMeLevel::kInfo);
|
||||
const bool eagerly_copy_function_remote_inputs =
|
||||
!ctx.LazyCopyFunctionRemoteInputs() || !op->is_function();
|
||||
for (int i = 0; i < op->Inputs().size(); i++) {
|
||||
for (int i = 0, end = op->Inputs().size(); i < end; i++) {
|
||||
tensorflow::TensorHandle* input = op->Inputs()[i];
|
||||
tensorflow::Device* input_device = absl::get<Device*>(input->device());
|
||||
tensorflow::Device* input_device_or_cpu =
|
||||
@ -837,7 +837,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
DataTypeVector output_dtypes;
|
||||
TF_RETURN_IF_ERROR(GetOutputDTypes(op, &output_dtypes));
|
||||
|
||||
const size_t num_outputs = static_cast<int>(output_dtypes.size());
|
||||
const int64 num_outputs = output_dtypes.size();
|
||||
if (num_outputs != *num_retvals) {
|
||||
return errors::InvalidArgument(
|
||||
"num_retvals does not match expected output dtypes");
|
||||
@ -845,7 +845,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
*num_retvals = num_outputs;
|
||||
|
||||
const tensorflow::uint64 id = remote_op->id();
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
// TODO(nareshmodi): Change the callback to instead add the decref to a
|
||||
// list of pending decrefs that we can send as a batch with the next
|
||||
// execute.
|
||||
@ -898,7 +898,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
// Since the operation failed, we need to Unref any outputs that were
|
||||
// allocated.
|
||||
if (!s.ok()) {
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
retvals[i]->Unref();
|
||||
}
|
||||
}
|
||||
@ -910,7 +910,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
|
||||
Status GetKernelOutputs(std::vector<Tensor>* outputs, int num_outputs,
|
||||
TensorHandle** retvals, EagerContext* ctx,
|
||||
KernelAndDevice* kernel) {
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
if (retvals[i] == nullptr) {
|
||||
retvals[i] = TensorHandle::CreateLocalHandle(
|
||||
std::move((*outputs)[i]),
|
||||
@ -1305,7 +1305,7 @@ void EagerLocalExecuteAsync(EagerOperation* op, TensorHandle** retvals,
|
||||
graph_collector = ctx.GetGraphCollector();
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
retvals[i] = nullptr;
|
||||
}
|
||||
|
||||
@ -1317,7 +1317,7 @@ void EagerLocalExecuteAsync(EagerOperation* op, TensorHandle** retvals,
|
||||
// Since the operation failed, we need to Unref any outputs if they were
|
||||
// allocated.
|
||||
if (!s.ok()) {
|
||||
for (int i = 0; i < num_outputs; ++i) {
|
||||
for (int i = 0, end = num_outputs; i < end; ++i) {
|
||||
if (retvals[i] != nullptr) {
|
||||
retvals[i]->Unref();
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ static Node* AddSymGrad(Graph* g, Node* n, gtl::ArraySlice<NodeOut> grads) {
|
||||
// The gradient node's outputs have the same types as the node 'n's
|
||||
// inputs, except for resources.
|
||||
DataTypeVector out_types = n->input_types();
|
||||
for (int i = 0; i < out_types.size(); ++i) {
|
||||
for (int i = 0, end = out_types.size(); i < end; ++i) {
|
||||
if (out_types[i] == DT_RESOURCE) {
|
||||
// TODO(apassos): figure out how to get the right dtype
|
||||
out_types[i] = DT_FLOAT;
|
||||
@ -221,7 +221,7 @@ SymbolicGradientBuilder::SymbolicGradientBuilder(
|
||||
x_grad_node_outputs_->clear();
|
||||
x_grad_node_outputs_->resize(x_node_outputs_.size());
|
||||
stop_nodes_.reserve(x_node_outputs_.size());
|
||||
for (int i = 0; i < x_node_outputs_.size(); ++i) {
|
||||
for (int i = 0, end = x_node_outputs_.size(); i < end; ++i) {
|
||||
stop_nodes_.insert(x_node_outputs_[i].node->id());
|
||||
}
|
||||
}
|
||||
@ -397,7 +397,7 @@ Status SymbolicGradientBuilder::Compute() {
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < x_node_outputs_.size(); ++i) {
|
||||
for (int i = 0, end = x_node_outputs_.size(); i < end; ++i) {
|
||||
(*x_grad_node_outputs_)[i] = SumGradients(x_node_outputs_[i]);
|
||||
}
|
||||
|
||||
|
@ -341,7 +341,8 @@ void CollectiveParamResolverDistributed::UpdateInstanceCache(
|
||||
}
|
||||
if (ir->known_count < cp->group.group_size) {
|
||||
ir->known_count = cp->group.group_size;
|
||||
if (ir->known.size() != cp->group.group_size) {
|
||||
const int ir_known_size = ir->known.size();
|
||||
if (ir_known_size != cp->group.group_size) {
|
||||
ir->status = errors::Internal(
|
||||
"UpdateInstanceCache:: CompleteInstanceResponse for instance ",
|
||||
cp->instance.instance_key, " has known.size()=", ir->known.size(),
|
||||
@ -349,7 +350,7 @@ void CollectiveParamResolverDistributed::UpdateInstanceCache(
|
||||
status = ir->status;
|
||||
break;
|
||||
}
|
||||
for (int i = 0; i < ir->known.size(); ++i) {
|
||||
for (int i = 0; i < ir_known_size; ++i) {
|
||||
ir->known[i] = true;
|
||||
}
|
||||
}
|
||||
|
@ -109,7 +109,8 @@ void CollectiveRemoteAccessDistributed::RecvFromPeer(
|
||||
for (const auto& chunk : extra.tensor_content()) {
|
||||
num_bytes += chunk.size();
|
||||
}
|
||||
if (num_bytes != to_tensor->TotalBytes()) {
|
||||
const int64 to_tensor_TotalBytes = to_tensor->TotalBytes();
|
||||
if (num_bytes != to_tensor_TotalBytes) {
|
||||
done(errors::Internal("RecvBufResponse returned ", num_bytes,
|
||||
" bytes where to_tensor expected ",
|
||||
to_tensor->TotalBytes()));
|
||||
|
@ -148,9 +148,8 @@ class EagerServiceImpl {
|
||||
|
||||
bool IsStale() {
|
||||
mutex_lock l(last_accessed_mu_);
|
||||
return (destroy_after_micros_ > 0 &&
|
||||
(env_->env->NowMicros() - last_accessed_micros_) >
|
||||
destroy_after_micros_);
|
||||
const int64 time_passed = env_->env->NowMicros() - last_accessed_micros_;
|
||||
return (destroy_after_micros_ > 0 && time_passed > destroy_after_micros_);
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -28,7 +28,7 @@ void RemoteMgr::AddOperationOutputs(
|
||||
const gtl::ArraySlice<tensorflow::TensorHandle*> handles,
|
||||
int64 operation_id) {
|
||||
mutex_lock l(remote_tensor_handle_mu_);
|
||||
for (int i = 0; i < handles.size(); i++) {
|
||||
for (int i = 0, end = handles.size(); i < end; i++) {
|
||||
// TODO(nareshmodi): Correctly handle operation_id not being unique.
|
||||
remote_tensor_handle_map_.emplace(
|
||||
RemoteTensorHandleInternal(operation_id, i), handles[i]);
|
||||
|
@ -403,7 +403,7 @@ void GraphMgr::RecvOutputsAsync(const int64 step_id, NamedTensors* out,
|
||||
[done, rendezvous, received_keys, out, keys](const Status s) {
|
||||
rendezvous->Unref();
|
||||
size_t output_size = 0;
|
||||
for (int i = 0; i < keys.size(); ++i) {
|
||||
for (int i = 0, end = keys.size(); i < end; ++i) {
|
||||
(*out)[keys[i]] = (*received_keys)[i];
|
||||
output_size += (*out)[keys[i]].AllocatedBytes();
|
||||
}
|
||||
|
@ -836,7 +836,7 @@ Status MasterSession::ReffedClientGraph::RunPartitions(
|
||||
<< execution_count;
|
||||
// Maps the names of fed tensors to their index in `req`.
|
||||
std::unordered_map<StringPiece, size_t, StringPieceHasher> feeds(3);
|
||||
for (size_t i = 0; i < callable_opts_.feed_size(); ++i) {
|
||||
for (size_t i = 0, end = callable_opts_.feed_size(); i < end; ++i) {
|
||||
if (!feeds.insert({callable_opts_.feed(i), i}).second) {
|
||||
// MakeCallable will fail if there are two feeds with the same name.
|
||||
return errors::Internal("Duplicated feeds in callable: ",
|
||||
@ -1564,7 +1564,7 @@ uint64 MasterSession::NewStepId(int64 graph_key) {
|
||||
} else {
|
||||
uint64 step_id = env_->collective_executor_mgr->NextStepId(graph_key);
|
||||
int32 retry_count = 0;
|
||||
while (step_id == CollectiveExecutor::kInvalidId) {
|
||||
while (static_cast<int64>(step_id) == CollectiveExecutor::kInvalidId) {
|
||||
Notification note;
|
||||
Status status;
|
||||
env_->collective_executor_mgr->RefreshStepIdSequenceAsync(
|
||||
|
@ -231,7 +231,7 @@ class GrpcEagerClientCache : public EagerClientCache {
|
||||
explicit GrpcEagerClientCache(
|
||||
std::shared_ptr<tensorflow::GrpcChannelCache> cache)
|
||||
: next_round_robin_assignment_(0), cache_(cache), threads_(4) {
|
||||
for (int i = 0; i < threads_.size(); i++) {
|
||||
for (int i = 0, end = threads_.size(); i < end; i++) {
|
||||
threads_[i].reset(new GrpcEagerClientThread());
|
||||
}
|
||||
}
|
||||
|
@ -176,8 +176,8 @@ class GrpcRemoteMaster : public MasterInterface {
|
||||
? deadline_with_backoff_micros
|
||||
: expired_time_micros;
|
||||
Env::Default()->SleepForMicroseconds(backoff_until - now_micros);
|
||||
if (Env::Default()->NowMicros() > expired_time_micros &&
|
||||
timeout_in_ms > 0) {
|
||||
const int64 default_now_micros = Env::Default()->NowMicros();
|
||||
if (default_now_micros > expired_time_micros && timeout_in_ms > 0) {
|
||||
// If timeout_in_ms is set, exit the retry loop on timeout.
|
||||
return errors::DeadlineExceeded(ctx.debug_error_string());
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ Status GrpcSession::RunHelper(
|
||||
// Build an index from fetch tensor name to first index in
|
||||
// output_tensor_names.
|
||||
std::unordered_map<string, int> output_name_to_offset;
|
||||
for (int i = 0; i < output_tensor_names.size(); ++i) {
|
||||
for (int i = 0, end = output_tensor_names.size(); i < end; ++i) {
|
||||
const string& name = output_tensor_names[i];
|
||||
if (output_name_to_offset.insert(std::make_pair(name, i)).second) {
|
||||
req->add_fetch(name);
|
||||
@ -267,7 +267,7 @@ Status GrpcSession::RunHelper(
|
||||
// In the unlikely event that output_tensor_names contains duplicates, fill in
|
||||
// the duplicate values.
|
||||
if (output_name_to_offset.size() != output_tensor_names.size()) {
|
||||
for (int i = 0; i < output_tensor_names.size(); ++i) {
|
||||
for (int i = 0, end = output_tensor_names.size(); i < end; ++i) {
|
||||
const string& name = output_tensor_names[i];
|
||||
int offset = output_name_to_offset[name];
|
||||
if (offset != i) {
|
||||
|
@ -203,7 +203,7 @@ void ExchangeQueue::CheckInvariants() {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 1; i < exchanges_.size(); ++i) {
|
||||
for (int i = 1, end = exchanges_.size(); i < end; ++i) {
|
||||
const Exchange& e0 = exchanges_[i - 1];
|
||||
const Exchange& e1 = exchanges_[i];
|
||||
// The first exchange in the pair is the one that arrived later and is
|
||||
|
@ -167,7 +167,8 @@ Status EinsumShape(shape_inference::InferenceContext* c) {
|
||||
return errors::InvalidArgument("Expected either 1 or 2 inputs but got: ",
|
||||
c->num_inputs());
|
||||
}
|
||||
if (c->num_inputs() != input_labels.size()) {
|
||||
const int input_labels_size = input_labels.size();
|
||||
if (c->num_inputs() != input_labels_size) {
|
||||
return errors::InvalidArgument("Expected ", input_labels.size(),
|
||||
" inputs for equation ", equation,
|
||||
" but got: ", c->num_inputs());
|
||||
@ -177,7 +178,7 @@ Status EinsumShape(shape_inference::InferenceContext* c) {
|
||||
// the broadcast shapes that map to ellipsis.
|
||||
absl::flat_hash_map<char, DimensionHandle> label_to_dimension;
|
||||
gtl::InlinedVector<ShapeHandle, 2> input_bcast_shapes(c->num_inputs());
|
||||
for (int i = 0; i < c->num_inputs(); ++i) {
|
||||
for (int i = 0, end = c->num_inputs(); i < end; ++i) {
|
||||
bool has_ellipsis = false;
|
||||
TF_RETURN_IF_ERROR(ValidateEinsumEllipsis(input_labels[i], &has_ellipsis));
|
||||
ShapeHandle input_shape = c->input(i);
|
||||
@ -202,7 +203,7 @@ Status EinsumShape(shape_inference::InferenceContext* c) {
|
||||
input_bcast_shapes[i] = c->Scalar();
|
||||
// Run through the input labels; populate label_to_dimension mapping and
|
||||
// compute the broadcast shapes corresponding to the ellipsis (if present).
|
||||
for (int label_idx = 0; label_idx < input_labels[i].size(); ++label_idx) {
|
||||
for (int label_idx = 0, end = input_labels[i].size(); label_idx < end; ++label_idx) {
|
||||
const char label = input_labels[i][label_idx];
|
||||
// Calculate the input axis that the current label is referring to. After
|
||||
// the ellipsis, the axis may be found by using negative indices; i.e the
|
||||
@ -281,7 +282,7 @@ Status EinsumShape(shape_inference::InferenceContext* c) {
|
||||
|
||||
// Create the output shape from output labels and label_to_dimension mapping.
|
||||
std::vector<DimensionHandle> output_dims;
|
||||
for (int label_idx = 0; label_idx < output_labels.size(); ++label_idx) {
|
||||
for (int label_idx = 0, end = output_labels.size(); label_idx < end; ++label_idx) {
|
||||
const char label = output_labels[label_idx];
|
||||
// Append the output_bcast_shape when the ellipsis is encountered.
|
||||
if (label == '.') {
|
||||
@ -473,7 +474,8 @@ Status DatasetIteratorShape(shape_inference::InferenceContext* c) {
|
||||
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
|
||||
std::vector<PartialTensorShape> output_shapes;
|
||||
TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
|
||||
if (output_shapes.size() != c->num_outputs()) {
|
||||
const int output_shapes_size = output_shapes.size();
|
||||
if (output_shapes_size != c->num_outputs()) {
|
||||
return errors::InvalidArgument(
|
||||
"`output_shapes` must be the same length as `output_types` (",
|
||||
output_shapes.size(), " vs. ", c->num_outputs());
|
||||
@ -503,7 +505,7 @@ Status MakeShapeFromFormat(TensorFormat format, DimensionOrConstant N,
|
||||
dims_actual[GetTensorInnerWidthDimIndex(num_dims, format)] =
|
||||
context->MakeDim(4);
|
||||
}
|
||||
for (int spatial_dim = 0; spatial_dim < spatial.size(); spatial_dim++) {
|
||||
for (int spatial_dim = 0, end = spatial.size(); spatial_dim < end; spatial_dim++) {
|
||||
dims_actual[GetTensorSpatialDimIndex(num_dims, format, spatial_dim)] =
|
||||
context->MakeDim(spatial[spatial_dim]);
|
||||
}
|
||||
@ -520,7 +522,7 @@ Status DimensionsFromShape(ShapeHandle shape, TensorFormat format,
|
||||
// Batch.
|
||||
*batch_dim = context->Dim(shape, GetTensorBatchDimIndex(rank, format));
|
||||
// Spatial.
|
||||
for (int spatial_dim_index = 0; spatial_dim_index < spatial_dims.size();
|
||||
for (int spatial_dim_index = 0, end = spatial_dims.size(); spatial_dim_index < end;
|
||||
++spatial_dim_index) {
|
||||
spatial_dims[spatial_dim_index] = context->Dim(
|
||||
shape, GetTensorSpatialDimIndex(rank, format, spatial_dim_index));
|
||||
@ -546,7 +548,7 @@ Status ShapeFromDimensions(DimensionHandle batch_dim,
|
||||
// Batch.
|
||||
out_dims[tensorflow::GetTensorBatchDimIndex(rank, format)] = batch_dim;
|
||||
// Spatial.
|
||||
for (int spatial_dim_index = 0; spatial_dim_index < spatial_dims.size();
|
||||
for (int spatial_dim_index = 0, end = spatial_dims.size(); spatial_dim_index < end;
|
||||
++spatial_dim_index) {
|
||||
out_dims[tensorflow::GetTensorSpatialDimIndex(
|
||||
rank, format, spatial_dim_index)] = spatial_dims[spatial_dim_index];
|
||||
@ -2338,7 +2340,7 @@ Status ExplicitShapes(InferenceContext* c) {
|
||||
if (shapes.empty()) {
|
||||
return errors::Internal("shapes attribute is empty");
|
||||
}
|
||||
for (int i = 0; i < shapes.size(); ++i) {
|
||||
for (int i = 0, end = shapes.size(); i < end; ++i) {
|
||||
ShapeHandle output_shape;
|
||||
TF_RETURN_IF_ERROR(
|
||||
c->MakeShapeFromPartialTensorShape(shapes[i], &output_shape));
|
||||
|
@ -661,7 +661,7 @@ Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op) {
|
||||
"' vs. '", new_in_sig, "'");
|
||||
VALIDATE(old_in_ref.size() == new_in_ref.size(), // Should not happen
|
||||
"Unexpected change in input ref lists.");
|
||||
for (int i = 0, iter_limit = old_in_ref.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = old_in_ref.size(); i < end; ++i) {
|
||||
// Allowed to remove "ref" from an input (or leave it unchanged).
|
||||
VALIDATE(old_in_ref[i] || !new_in_ref[i], "Input ", i,
|
||||
" changed from non-ref to ref");
|
||||
@ -677,7 +677,7 @@ Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op) {
|
||||
old_out_sig, "' vs. '", new_out_sig, "'");
|
||||
VALIDATE(old_out_ref.size() == new_out_ref.size(), // Should not happen
|
||||
"Unexpected change in output ref lists");
|
||||
for (int i = 0, iter_limit = old_out_ref.size(); i < iter_limit; ++i) {
|
||||
for (int i = 0, end = old_out_ref.size(); i < end; ++i) {
|
||||
// Allowed to add "ref" to an output (or leave it unchanged).
|
||||
VALIDATE(!old_out_ref[i] || new_out_ref[i], "Output ", i,
|
||||
" changed from ref to non-ref");
|
||||
|
@ -211,6 +211,7 @@ void InferenceContext::PostInputInit(
|
||||
}
|
||||
input_handle_shapes_and_types_ = std::move(input_handle_data);
|
||||
}
|
||||
|
||||
const int inputs_size = inputs_.size();
|
||||
if (inputs_size != num_inputs_from_node_def) {
|
||||
construction_status_ = errors::InvalidArgument(
|
||||
|
Loading…
Reference in New Issue
Block a user