Explicitly capture variables in lambdas.

MSVC does not like implicit capture.

PiperOrigin-RevId: 289168006
Change-Id: Ic5aa1b75677c4cf10af3177c578e2898e97a98d0
This commit is contained in:
Gunhan Gulsoy 2020-01-10 14:39:19 -08:00 committed by TensorFlower Gardener
parent 8fce32ec67
commit 8e2d38bbfd
5 changed files with 11 additions and 10 deletions

View File

@ -611,7 +611,7 @@ TEST_F(FunctionWithRemoteInputsTest, KernelAndDeviceFuncTest) {
flr, eager_pflr_.get(), std::move(input_dev_ptrs), {}, /*runner=*/nullptr,
/*collective_executor=*/nullptr, local_device, fdef_.signature().name(),
[ctx](const int64 step_id) { return ctx->CreateRendezvous(step_id); },
[]() { return op_id; }));
[=]() { return op_id; }));
// Instantiate MatMulFunction on remote_device.
const NodeDef node_def = MatMulFunctionNodeDef();

View File

@ -422,7 +422,7 @@ TEST_F(CropAndResizeOpTest, TestWithSharding) {
// ... (altogether 999 lines)
// 0, 1, 2, ..., 998
AddInput<float>(TensorShape({1, kLength, kLength, 1}),
[](int i) -> float { return i % kLength; });
[=](int i) -> float { return i % kLength; });
AddInputFromArray<float>(TensorShape({2, 4}),
{0, 0, 0.5, 0.5, 0.5, 0.5, 1, 1});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
@ -436,7 +436,7 @@ TEST_F(CropAndResizeOpTest, TestWithSharding) {
// ... (altogether 500 lines)
// 0, 1, 2, ..., 499
Tensor result1(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1}));
test::FillFn<float>(&result1, [](int i) -> float { return i % kHalf; });
test::FillFn<float>(&result1, [=](int i) -> float { return i % kHalf; });
// Result 2:
// 499, 500, 501, ..., 998
@ -444,7 +444,7 @@ TEST_F(CropAndResizeOpTest, TestWithSharding) {
// 499, 500, 501, ..., 998
Tensor result2(allocator(), DT_FLOAT, TensorShape({1, kHalf, kHalf, 1}));
test::FillFn<float>(&result2,
[](int i) -> float { return i % kHalf + kHalf - 1; });
[=](int i) -> float { return i % kHalf + kHalf - 1; });
// Expected result is the concat of the two tensors.
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, kHalf, kHalf, 1}));

View File

@ -33,7 +33,8 @@ TEST(UnboundedThreadPool, ConcurrentThreadCreation) {
const int kNumThreadsToCreate = 10;
std::atomic<int> i(0);
for (int j = 0; j < kNumThreadsToCreate; ++j) {
threads.push_back(thread_factory->StartThread("", [&i, thread_factory]() {
threads.push_back(thread_factory->StartThread("", [=, &i,
&thread_factory]() {
std::vector<std::unique_ptr<Thread>> nested_threads;
for (int k = 0; k < kNumThreadsToCreate; ++k) {
nested_threads.push_back(

View File

@ -98,13 +98,13 @@ TEST_F(MirrorPadOpTest, TestMirrorPadReflectLargeInput) {
// ... (altogether 1000 lines)
// 0, 1, 2, ..., 999
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[](int i) -> float { return i % kInput; });
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [](int i) -> float {
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i;
@ -132,13 +132,13 @@ TEST_F(MirrorPadOpTest, TestMirrorPadSymmetricLargeInput) {
// ... (altogether 1000 lines)
// 0, 1, 2, ..., 999
AddInput<float>(TensorShape({1, kInput, kInput, 1}),
[](int i) -> float { return i % kInput; });
[=](int i) -> float { return i % kInput; });
AddInputFromArray<int32>(TensorShape({4, 2}),
{0, 0, kPad, kPad, kPad, kPad, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, kOutput, kOutput, 1}));
test::FillFn<float>(&expected, [](int i) -> float {
test::FillFn<float>(&expected, [=](int i) -> float {
i = i % kOutput;
if (0 <= i && i < kPad)
return kPad - i - 1;

View File

@ -365,7 +365,7 @@ TEST(JpegMemTest, Jpeg2) {
const std::unique_ptr<uint8[]> imgdata2(new uint8[flags.stride * in_h]);
CHECK(imgdata2.get() == Uncompress(cpdata2.c_str(), cpdata2.length(), flags,
nullptr /* nwarn */,
[&imgdata2](int w, int h, int c) {
[=, &imgdata2](int w, int h, int c) {
CHECK_EQ(w, in_w);
CHECK_EQ(h, in_h);
CHECK_EQ(c, 3);