Avoid capturing unused variables in lambda functions

PiperOrigin-RevId: 188747641
This commit is contained in:
Benoit Steiner 2018-03-12 11:04:59 -07:00 committed by TensorFlower Gardener
parent 1d6a57edc0
commit 62fa49ff5d
10 changed files with 23 additions and 23 deletions

View File

@ -72,9 +72,9 @@ Status AddForwardLoopCounter(WhileContext* while_ctx, const Scope& scope,
}; };
// Body function that adds one to input. // Body function that adds one to input.
BodyGraphBuilderFn body_fn = [while_ctx](const Scope& scope, BodyGraphBuilderFn body_fn = [](const Scope& scope,
const std::vector<Output>& inputs, const std::vector<Output>& inputs,
std::vector<Output>* outputs) { std::vector<Output>* outputs) {
DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(inputs.size(), 1);
outputs->emplace_back(ops::Add(scope, inputs[0], 1)); outputs->emplace_back(ops::Add(scope, inputs[0], 1));
return scope.status(); return scope.status();

View File

@ -101,8 +101,8 @@ struct ImageConnectedComponentsFunctor<CPUDevice, T> {
int cost = (union_find.block_height() + union_find.block_width()) * 20; int cost = (union_find.block_height() + union_find.block_width()) * 20;
Shard(worker_threads->num_threads, worker_threads->workers, Shard(worker_threads->num_threads, worker_threads->workers,
num_images * num_blocks_vertically * num_blocks_horizontally, cost, num_images * num_blocks_vertically * num_blocks_horizontally, cost,
[&union_find, num_images, num_blocks_vertically, [&union_find, num_blocks_vertically, num_blocks_horizontally](
num_blocks_horizontally](int64 start_block, int64 limit_block) { int64 start_block, int64 limit_block) {
for (int64 i = start_block; i < limit_block; i++) { for (int64 i = start_block; i < limit_block; i++) {
int64 block_x = i % num_blocks_horizontally; int64 block_x = i % num_blocks_horizontally;
int64 block_y = int64 block_y =

View File

@ -92,7 +92,7 @@ static Status ProcessMemoryTypes(
Status ValidateMemoryTypes(const DeviceType& device_type, const Graph* g) { Status ValidateMemoryTypes(const DeviceType& device_type, const Graph* g) {
return ProcessMemoryTypes( return ProcessMemoryTypes(
device_type, g, [g](const Edge* e, MemoryType sm, MemoryType dm) { device_type, g, [](const Edge* e, MemoryType sm, MemoryType dm) {
if (sm == dm) { if (sm == dm) {
return Status::OK(); return Status::OK();
} }
@ -155,7 +155,7 @@ Status EnsureMemoryTypes(const DeviceType& device_type,
}; };
std::vector<Item> edges; std::vector<Item> edges;
TF_RETURN_IF_ERROR(ProcessMemoryTypes( TF_RETURN_IF_ERROR(ProcessMemoryTypes(
device_type, g, [g, &edges](const Edge* e, MemoryType sm, MemoryType dm) { device_type, g, [&edges](const Edge* e, MemoryType sm, MemoryType dm) {
if (sm == dm) { if (sm == dm) {
return Status::OK(); return Status::OK();
} }

View File

@ -438,7 +438,7 @@ void GraphMgr::ExecuteAsync(const string& handle, const int64 step_id,
StartParallelExecutors(handle, step_id, item, rendezvous, collector, StartParallelExecutors(handle, step_id, item, rendezvous, collector,
cost_graph, cancellation_manager, cost_graph, cancellation_manager,
[this, item, rendezvous, done](const Status& s) { [item, rendezvous, done](const Status& s) {
done(s); done(s);
rendezvous->Unref(); rendezvous->Unref();
item->Unref(); item->Unref();

View File

@ -215,7 +215,7 @@ void Worker::DoPartialRunGraph(CallOptions* opts,
GraphMgr::NamedTensors in; GraphMgr::NamedTensors in;
GraphMgr::NamedTensors* out = new GraphMgr::NamedTensors; GraphMgr::NamedTensors* out = new GraphMgr::NamedTensors;
Status s = PrepareRunGraph(request, &in, out); Status s = PrepareRunGraph(request, &in, out);
auto finish = [this, done, out, opts](const Status& s) { auto finish = [done, out, opts](const Status& s) {
opts->ClearCancelCallback(); opts->ClearCancelCallback();
delete out; delete out;
done(s); done(s);
@ -247,7 +247,7 @@ void Worker::DoPartialRunGraph(CallOptions* opts,
session->graph_mgr->ExecuteAsync( session->graph_mgr->ExecuteAsync(
graph_handle, step_id, session.get(), request->exec_opts(), graph_handle, step_id, session.get(), request->exec_opts(),
nullptr /* collector */, nullptr /* response */, cm, in, nullptr /* collector */, nullptr /* response */, cm, in,
[this, token, step_id, session, cm](Status s) { [this, token, step_id, session](Status s) {
{ {
mutex_lock l(mu_); mutex_lock l(mu_);
cancellation_manager_->DeregisterCallback(token); cancellation_manager_->DeregisterCallback(token);

View File

@ -867,7 +867,7 @@ class IteratorGetNextOp : public AsyncOpKernel {
// inter-op thread pool thread, so we issue the call from the // inter-op thread pool thread, so we issue the call from the
// owned thread pool. // owned thread pool.
thread_pool_->Schedule(std::bind( thread_pool_->Schedule(std::bind(
[this, ctx, iterator](DoneCallback done) { [ctx, iterator](DoneCallback done) {
std::vector<Tensor> components; std::vector<Tensor> components;
bool end_of_sequence = false; bool end_of_sequence = false;

View File

@ -127,7 +127,7 @@ class Mutex : public ResourceBase {
} }
} }
thread_pool_->Schedule(std::bind( thread_pool_->Schedule(std::bind(
[this, c, cm, cancelled, [this, cm, cancelled,
token](std::function<void(const Status& s, SharedLockReleaser&& lock)> token](std::function<void(const Status& s, SharedLockReleaser&& lock)>
fn_) { fn_) {
bool local_locked; bool local_locked;
@ -173,7 +173,7 @@ class MutexLockOp : public AsyncOpKernel {
OP_REQUIRES_OK_ASYNC( OP_REQUIRES_OK_ASYNC(
c, c,
LookupOrCreateResource<Mutex>(c, HandleFromInput(c, 0), &mutex, LookupOrCreateResource<Mutex>(c, HandleFromInput(c, 0), &mutex,
[this, c](Mutex** ptr) { [c](Mutex** ptr) {
*ptr = new Mutex( *ptr = new Mutex(
c, HandleFromInput(c, 0).name()); c, HandleFromInput(c, 0).name());
return Status::OK(); return Status::OK();
@ -186,10 +186,10 @@ class MutexLockOp : public AsyncOpKernel {
mutex->AcquireAsync( mutex->AcquireAsync(
c, std::bind( c, std::bind(
[this, c, variant, mutex](DoneCallback done_, [c, variant, mutex](DoneCallback done_,
// End of bound arguments. // End of bound arguments.
const Status& s, const Status& s,
Mutex::SharedLockReleaser&& lock) { Mutex::SharedLockReleaser&& lock) {
VLOG(2) << "Finished locking mutex " << mutex VLOG(2) << "Finished locking mutex " << mutex
<< " with lock: " << lock.shared_lock.get() << " with lock: " << lock.shared_lock.get()
<< " status: " << s.ToString(); << " status: " << s.ToString();

View File

@ -351,7 +351,7 @@ class AssignVariableOp<Device, Variant> : public OpKernel {
Var* variable = nullptr; Var* variable = nullptr;
OP_REQUIRES_OK(context, LookupOrCreateResource<Var>( OP_REQUIRES_OK(context, LookupOrCreateResource<Var>(
context, HandleFromInput(context, 0), &variable, context, HandleFromInput(context, 0), &variable,
[this, context](Var** ptr) { [](Var** ptr) {
// Created on host. // Created on host.
*ptr = new Var(DT_VARIANT); *ptr = new Var(DT_VARIANT);
return Status::OK(); return Status::OK();

View File

@ -327,7 +327,7 @@ class SparseCrossOp : public OpKernel {
typename CrossTraits<HASHED_OUTPUT, InternalType>::Updater updater( typename CrossTraits<HASHED_OUTPUT, InternalType>::Updater updater(
output_start_indices, indices_out, values_out); output_start_indices, indices_out, values_out);
auto do_work = [this, &columns, crosser, updater](int64 begin, int64 end) { auto do_work = [&columns, crosser, updater](int64 begin, int64 end) {
for (int b = begin; b < end; b++) { for (int b = begin; b < end; b++) {
ProductIterator<InternalType> product_iterator(columns, b); ProductIterator<InternalType> product_iterator(columns, b);
int64 cross_count = 0; int64 cross_count = 0;

View File

@ -208,10 +208,10 @@ class SplitVOpCPUImpl {
input_element_count >= std::max(num_threads, num_split) * 4096 && input_element_count >= std::max(num_threads, num_split) * 4096 &&
input_element_count < num_split * 180 * 1024); input_element_count < num_split * 180 * 1024);
auto range_output_func = [&indices, context, &input_shape, prefix_dim_size, auto range_output_func = [&indices, context, &input_shape, split_dim,
split_dim, &split_sizes_vec, &split_start_points, &split_sizes_vec, &split_start_points,
suffix_dim_size, use_parallelism_between_outputs, use_parallelism_between_outputs, &input_reshaped,
&input_reshaped, &make_sizes, &make_sizes,
&reshape_result](int64 start, int64 limit) { &reshape_result](int64 start, int64 limit) {
for (int64 i = start; i < limit; ++i) { for (int64 i = start; i < limit; ++i) {
TensorShape output_shape(input_shape); TensorShape output_shape(input_shape);