From 2e2975d794383e253ee54bb7d228fa98b37f6dbc Mon Sep 17 00:00:00 2001 From: Mahmoud Abuzaina Date: Fri, 18 Dec 2020 09:37:00 -0800 Subject: [PATCH] Fixing conv_ops_benchmark_test --- .../core/kernels/conv_ops_benchmark_test.cc | 29 ++++++++++++------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/tensorflow/core/kernels/conv_ops_benchmark_test.cc b/tensorflow/core/kernels/conv_ops_benchmark_test.cc index 8408c8b7ec5..68f663212bd 100644 --- a/tensorflow/core/kernels/conv_ops_benchmark_test.cc +++ b/tensorflow/core/kernels/conv_ops_benchmark_test.cc @@ -325,7 +325,8 @@ static Graph* FusedConv2DWithBatchNorm( .Run(state); \ BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ - BENCHMARK(BM_NAME(BM_Conv2D, type, N, H, W, C, FW, FH, FC)); + BENCHMARK(BM_NAME(BM_Conv2D, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_Conv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \ static void BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, \ @@ -336,7 +337,8 @@ static Graph* FusedConv2DWithBatchNorm( .Run(state); \ BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ - BENCHMARK(BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, FC)); + BENCHMARK(BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_Conv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \ static void BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \ @@ -349,7 +351,8 @@ static Graph* FusedConv2DWithBatchNorm( .Run(state); \ BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ - BENCHMARK(BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)); + BENCHMARK(BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_FusedConv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \ static void BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, \ @@ -361,7 +364,8 @@ static Graph* FusedConv2DWithBatchNorm( .Run(state); \ BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ - BENCHMARK(BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, FC)); + BENCHMARK(BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_FusedConv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \ static void BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \ @@ -374,7 +378,8 @@ static Graph* FusedConv2DWithBatchNorm( BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ BENCHMARK( \ - BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)); + BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_Conv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \ static void BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, \ @@ -385,7 +390,8 @@ static Graph* FusedConv2DWithBatchNorm( .Run(state); \ BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ - BENCHMARK(BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)); + BENCHMARK(BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_Conv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \ static void BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, \ @@ -399,7 +405,8 @@ static Graph* FusedConv2DWithBatchNorm( BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ BENCHMARK( \ - BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, FC)); + BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_FusedConv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \ static void BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, \ @@ -411,7 +418,8 @@ static Graph* FusedConv2DWithBatchNorm( .Run(state); \ BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ - BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)); + BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #define BM_FusedConv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, \ LABEL) \ @@ -425,7 +433,7 @@ static Graph* FusedConv2DWithBatchNorm( BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \ } \ BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNormAndRelu, type, N, H, W, C, FW, \ - FH, FC)); + FH, FC))->Arg(/*unused arg*/ 1); // -------------------------------------------------------------------------- // // Pixel CNN convolutions. @@ -584,7 +592,8 @@ BM_FusedConv2DWithBiasAndRelu(32, 32, 32, 128, 3, 3, 1024, gpu, "3x3 /b 32"); .Run(state); \ BM_SET_INFO(N, H, W, C, type, "", Conv2D); \ } \ - BENCHMARK(BM_LONG_NAME(BM_Conv2D, type, T, FORMAT, N, H, W, C, FW, FH, FC)); + BENCHMARK(BM_LONG_NAME(BM_Conv2D, type, T, FORMAT, N, H, W, C, FW, FH, FC)) \ + ->Arg(/*unused arg*/ 1); #if GOOGLE_CUDA using fp32 = float;