Merge pull request #45856 from Intel-tensorflow:mabuzain/fixing-conv-ops-benchmark-test
PiperOrigin-RevId: 348502388 Change-Id: I4bd1b0d1bed9cbacd5e2a2257b998171ed9cbf5c
This commit is contained in:
commit
6ee022991d
@ -325,7 +325,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
.Run(state); \
|
.Run(state); \
|
||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_NAME(BM_Conv2D, type, N, H, W, C, FW, FH, FC));
|
BENCHMARK(BM_NAME(BM_Conv2D, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_Conv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \
|
#define BM_Conv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \
|
||||||
static void BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, \
|
static void BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, \
|
||||||
@ -336,7 +337,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
.Run(state); \
|
.Run(state); \
|
||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, FC));
|
BENCHMARK(BM_NAME(BM_Conv2DWithBias, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_Conv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
|
#define BM_Conv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
|
||||||
static void BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \
|
static void BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \
|
||||||
@ -349,7 +351,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
.Run(state); \
|
.Run(state); \
|
||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC));
|
BENCHMARK(BM_NAME(BM_Conv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_FusedConv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \
|
#define BM_FusedConv2DWithBias(N, H, W, C, FW, FH, FC, type, LABEL) \
|
||||||
static void BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, \
|
static void BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, \
|
||||||
@ -361,7 +364,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
.Run(state); \
|
.Run(state); \
|
||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, FC));
|
BENCHMARK(BM_NAME(BM_FusedConv2DWithBias, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_FusedConv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
|
#define BM_FusedConv2DWithBiasAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
|
||||||
static void BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \
|
static void BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, \
|
||||||
@ -374,7 +378,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK( \
|
BENCHMARK( \
|
||||||
BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC));
|
BM_NAME(BM_FusedConv2DWithBiasAndRelu, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_Conv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \
|
#define BM_Conv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \
|
||||||
static void BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, \
|
static void BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, \
|
||||||
@ -385,7 +390,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
.Run(state); \
|
.Run(state); \
|
||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC));
|
BENCHMARK(BM_NAME(BM_Conv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_Conv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
|
#define BM_Conv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, LABEL) \
|
||||||
static void BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, \
|
static void BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, \
|
||||||
@ -399,7 +405,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK( \
|
BENCHMARK( \
|
||||||
BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, FC));
|
BM_NAME(BM_Conv2DWithBatchNormAndRelu, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_FusedConv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \
|
#define BM_FusedConv2DWithBatchNorm(N, H, W, C, FW, FH, FC, type, LABEL) \
|
||||||
static void BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, \
|
static void BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, \
|
||||||
@ -411,7 +418,9 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
.Run(state); \
|
.Run(state); \
|
||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC));
|
BENCHMARK( \
|
||||||
|
BM_NAME(BM_FusedConv2DWithBatchNorm, type, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#define BM_FusedConv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, \
|
#define BM_FusedConv2DWithBatchNormAndRelu(N, H, W, C, FW, FH, FC, type, \
|
||||||
LABEL) \
|
LABEL) \
|
||||||
@ -425,7 +434,8 @@ static Graph* FusedConv2DWithBatchNorm(
|
|||||||
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, LABEL, Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNormAndRelu, type, N, H, W, C, FW, \
|
BENCHMARK(BM_NAME(BM_FusedConv2DWithBatchNormAndRelu, type, N, H, W, C, FW, \
|
||||||
FH, FC));
|
FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
// -------------------------------------------------------------------------- //
|
// -------------------------------------------------------------------------- //
|
||||||
// Pixel CNN convolutions.
|
// Pixel CNN convolutions.
|
||||||
@ -584,7 +594,8 @@ BM_FusedConv2DWithBiasAndRelu(32, 32, 32, 128, 3, 3, 1024, gpu, "3x3 /b 32");
|
|||||||
.Run(state); \
|
.Run(state); \
|
||||||
BM_SET_INFO(N, H, W, C, type, "", Conv2D); \
|
BM_SET_INFO(N, H, W, C, type, "", Conv2D); \
|
||||||
} \
|
} \
|
||||||
BENCHMARK(BM_LONG_NAME(BM_Conv2D, type, T, FORMAT, N, H, W, C, FW, FH, FC));
|
BENCHMARK(BM_LONG_NAME(BM_Conv2D, type, T, FORMAT, N, H, W, C, FW, FH, FC)) \
|
||||||
|
->Arg(/*unused arg*/ 1);
|
||||||
|
|
||||||
#if GOOGLE_CUDA
|
#if GOOGLE_CUDA
|
||||||
using fp32 = float;
|
using fp32 = float;
|
||||||
|
Loading…
Reference in New Issue
Block a user