Merge pull request #45856 from Intel-tensorflow:mabuzain/fixing-conv-ops-benchmark-test

PiperOrigin-RevId: 348502388
Change-Id: I4bd1b0d1bed9cbacd5e2a2257b998171ed9cbf5c
This commit is contained in:
TensorFlower Gardener 2020-12-21 11:51:46 -08:00
commit 6ee022991d

View File

@ -325,7 +325,8 @@ static Graph* FusedConv2DWithBatchNorm(
.Run(state); \
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_Conv2D, type, N, H, W, C, FW, FH, FC));
BENCHMARK(BM_NAME(BM_Conv2D, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_Conv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, \
@ -336,32 +337,35 @@ static Graph* FusedConv2DWithBatchNorm(
.Run(state); \
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, FC));
BENCHMARK(BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_Conv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \
#define BM_Conv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \
FC)(::testing::benchmark::State & state) { \
test::Benchmark( \
#type, \
Conv2DWithBiasAndActivation<float>(N, H, W, C, FW, FH, FC, "Relu") \
.graph, \
/*old_benchmark_api=*/false) \
.Run(state); \
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_FusedConv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, \
FC)(::testing::benchmark::State & state) { \
test::Benchmark( \
#type, \
Conv2DWithBiasAndActivation<float>(N, H, W, C, FW, FH, FC, "Relu") \
.graph, \
FusedConv2DWithBias<float>(N, H, W, C, FW, FH, FC, {"BiasAdd"}), \
/*old_benchmark_api=*/false) \
.Run(state); \
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC));
#define BM_FusedConv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, \
FC)(::testing::benchmark::State & state) { \
test::Benchmark( \
#type, \
FusedConv2DWithBias<float>(N, H, W, C, FW, FH, FC, {"BiasAdd"}), \
/*old_benchmark_api=*/false) \
.Run(state); \
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, FC));
BENCHMARK(BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_FusedConv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \
@ -374,7 +378,8 @@ static Graph* FusedConv2DWithBatchNorm(
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK( \
BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC));
BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_Conv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, \
@ -385,7 +390,8 @@ static Graph* FusedConv2DWithBatchNorm(
.Run(state); \
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC));
BENCHMARK(BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_Conv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, \
@ -399,7 +405,8 @@ static Graph* FusedConv2DWithBatchNorm(
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK( \
BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, FC));
BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_FusedConv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \
static void BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, \
@ -411,7 +418,9 @@ static Graph* FusedConv2DWithBatchNorm(
.Run(state); \
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC));
BENCHMARK( \
BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#define BM_FusedConv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, \
LABEL) \
@ -425,7 +434,8 @@ static Graph* FusedConv2DWithBatchNorm(
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
} \
BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNormAndRelu, type, N, H, W, C, FW, \
FH, FC));
FH, FC)) \
->Arg(/*unused arg*/ 1);
// -------------------------------------------------------------------------- //
// Pixel CNN convolutions.
@ -584,7 +594,8 @@ BM_FusedConv2DWithBiasAndRelu(32, 32, 32, 128, 3, 3, 1024, gpu, "3x3 /b 32");
.Run(state); \
BM_SET_INFO(N, H, W, C, type, "", Conv2D); \
} \
BENCHMARK(BM_LONG_NAME(BM_Conv2D, type, T, FORMAT, N, H, W, C, FW, FH, FC));
BENCHMARK(BM_LONG_NAME(BM_Conv2D, type, T, FORMAT, N, H, W, C, FW, FH, FC)) \
->Arg(/*unused arg*/ 1);
#if GOOGLE_CUDA
using fp32 = float;