Update benchmarks to use newer API

PiperOrigin-RevId: 344156330
Change-Id: I2bf26b77cd93534658f274f29f1bf37d4c53ed81
This commit is contained in:
A. Unique TensorFlower 2020-11-24 16:55:36 -08:00 committed by TensorFlower Gardener
parent 5aff086d72
commit 29bb0deb26
9 changed files with 103 additions and 91 deletions

View File

@ -50,12 +50,10 @@ constexpr char longTagParam[] = "LONGTAG____________________________";
constexpr float largeValueParam = 2352352.2623433; constexpr float largeValueParam = 2352352.2623433;
#define BM_ScalarSummaryDev(device, dims, name, tag, value) \ #define BM_ScalarSummaryDev(device, dims, name, tag, value) \
void BM_ScalarSummary##name##device(int iters) { \ void BM_ScalarSummary##name##device(::testing::benchmark::State& state) { \
testing::StopTiming(); \
TensorShape tensorshape(DIMARGS dims); \ TensorShape tensorshape(DIMARGS dims); \
auto g = BM_ScalarSummaryOp(tensorshape, #tag, value); \ auto g = BM_ScalarSummaryOp(tensorshape, #tag, value); \
testing::StartTiming(); \ test::Benchmark("cpu", g, /*old_benchmark_api=*/false).Run(state); \
test::Benchmark("cpu", g).Run(iters); \
} \ } \
BENCHMARK(BM_ScalarSummary##name##device); BENCHMARK(BM_ScalarSummary##name##device);

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow { namespace tensorflow {
static Graph* BM_AdjustContrast(int batches, int width, int height) { static Graph* AdjustContrast(int batches, int width, int height) {
Graph* g = new Graph(OpRegistry::Global()); Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3})); Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3}));
in.flat<float>().setRandom(); in.flat<float>().setRandom();
@ -37,9 +37,12 @@ static Graph* BM_AdjustContrast(int batches, int width, int height) {
} }
#define BM_AdjustContrastDev(DEVICE, B, W, H) \ #define BM_AdjustContrastDev(DEVICE, B, W, H) \
static void BM_AdjustContrast_##DEVICE##_##B##_##W##_##H(int iters) { \ static void BM_AdjustContrast_##DEVICE##_##B##_##W##_##H( \
testing::ItemsProcessed(iters* B* W* H * 3); \ ::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BM_AdjustContrast(B, W, H)).Run(iters); \ test::Benchmark(#DEVICE, AdjustContrast(B, W, H), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * W * H * 3); \
} \ } \
BENCHMARK(BM_AdjustContrast_##DEVICE##_##B##_##W##_##H) BENCHMARK(BM_AdjustContrast_##DEVICE##_##B##_##W##_##H)

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow { namespace tensorflow {
static Graph* BM_CropAndResize(int batches, int width, int height, int depth, static Graph* CropAndResize(int batches, int width, int height, int depth,
int crop_height, int crop_width) { int crop_height, int crop_width) {
Graph* g = new Graph(OpRegistry::Global()); Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth})); Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth}));
@ -53,9 +53,11 @@ static Graph* BM_CropAndResize(int batches, int width, int height, int depth,
#define BM_CropAndResizeDev(DEVICE, B, W, H, D, CH, CW) \ #define BM_CropAndResizeDev(DEVICE, B, W, H, D, CH, CW) \
static void BM_CropAndResize_##DEVICE##_##B##_##W##_##H##_##D##_##CH##_##CW( \ static void BM_CropAndResize_##DEVICE##_##B##_##W##_##H##_##D##_##CH##_##CW( \
int iters) { \ ::testing::benchmark::State& state) { \
testing::ItemsProcessed(iters* B* W* H* D); \ test::Benchmark(#DEVICE, CropAndResize(B, W, H, D, CH, CW), \
test::Benchmark(#DEVICE, BM_CropAndResize(B, W, H, D, CH, CW)).Run(iters); \ /*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * W * H * D); \
} \ } \
BENCHMARK(BM_CropAndResize_##DEVICE##_##B##_##W##_##H##_##D##_##CH##_##CW); BENCHMARK(BM_CropAndResize_##DEVICE##_##B##_##W##_##H##_##D##_##CH##_##CW);

View File

@ -21,8 +21,8 @@ limitations under the License.
namespace tensorflow { namespace tensorflow {
static Graph* BM_MirrorPad(int batches, int height, int width, int depth, static Graph* MirrorPad(int batches, int height, int width, int depth, int pad,
int pad, const char* mode) { const char* mode) {
Graph* g = new Graph(OpRegistry::Global()); Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth})); Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth}));
in.flat<float>().setRandom(); in.flat<float>().setRandom();
@ -41,9 +41,12 @@ static Graph* BM_MirrorPad(int batches, int height, int width, int depth,
#define BM_MirrorPadDev(DEVICE, B, W, H, D, P, MODE) \ #define BM_MirrorPadDev(DEVICE, B, W, H, D, P, MODE) \
static void BM_MirrorPad_##DEVICE##_##B##_##W##_##H##_##D##_##P##_##MODE( \ static void BM_MirrorPad_##DEVICE##_##B##_##W##_##H##_##D##_##P##_##MODE( \
int iters) { \ ::testing::benchmark::State& state) { \
testing::ItemsProcessed(iters* B*(W + 2 * P) * (H + 2 * P) * D / 32); \ test::Benchmark(#DEVICE, MirrorPad(B, W, H, D, P, #MODE), \
test::Benchmark(#DEVICE, BM_MirrorPad(B, W, H, D, P, #MODE)).Run(iters); \ /*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * (W + 2 * P) * \
(H + 2 * P) * D / 32); \
} \ } \
BENCHMARK(BM_MirrorPad_##DEVICE##_##B##_##W##_##H##_##D##_##P##_##MODE); BENCHMARK(BM_MirrorPad_##DEVICE##_##B##_##W##_##H##_##D##_##P##_##MODE);

View File

@ -21,8 +21,8 @@ limitations under the License.
namespace tensorflow { namespace tensorflow {
static Graph* BM_CombinedNonMaxSuppression(int batches, int box_num, static Graph* CombinedNonMaxSuppression(int batches, int box_num, int class_num,
int class_num, int q) { int q) {
Graph* g = new Graph(OpRegistry::Global()); Graph* g = new Graph(OpRegistry::Global());
Tensor boxes(DT_FLOAT, TensorShape({batches, box_num, q, 4})); Tensor boxes(DT_FLOAT, TensorShape({batches, box_num, q, 4}));
boxes.flat<float>().setRandom(); boxes.flat<float>().setRandom();
@ -49,10 +49,12 @@ static Graph* BM_CombinedNonMaxSuppression(int batches, int box_num,
} }
#define BM_CombinedNonMaxSuppressionDev(DEVICE, B, BN, CN, Q) \ #define BM_CombinedNonMaxSuppressionDev(DEVICE, B, BN, CN, Q) \
static void BM_CombinedNMS_##DEVICE##_##B##_##BN##_##CN##_##Q(int iters) { \ static void BM_CombinedNMS_##DEVICE##_##B##_##BN##_##CN##_##Q( \
testing::ItemsProcessed(iters* B); \ ::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BM_CombinedNonMaxSuppression(B, BN, CN, Q)) \ test::Benchmark(#DEVICE, CombinedNonMaxSuppression(B, BN, CN, Q), \
.Run(iters); \ /*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B); \
} \ } \
BENCHMARK(BM_CombinedNMS_##DEVICE##_##B##_##BN##_##CN##_##Q); BENCHMARK(BM_CombinedNMS_##DEVICE##_##B##_##BN##_##CN##_##Q);

View File

@ -268,10 +268,13 @@ static Graph* ResizeBicubic(int batch_size, int size, int channels,
} }
#define BM_ResizeBicubicDev(BATCH, SIZE, CHANNELS) \ #define BM_ResizeBicubicDev(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS(int iters) { \ static void BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS( \
testing::ItemsProcessed(static_cast<int64>(iters) * BATCH * SIZE * SIZE * \ ::testing::benchmark::State& state) { \
CHANNELS); \ test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS), \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS)).Run(iters); \ /*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS); \
} \ } \
BENCHMARK(BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS); BENCHMARK(BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS);
@ -290,11 +293,12 @@ BM_ResizeBicubicDev(32, 1024, 3);
#define BM_ResizeBicubicExpand(BATCH, SIZE, CHANNELS) \ #define BM_ResizeBicubicExpand(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS( \ static void BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS( \
int iters) { \ ::testing::benchmark::State& state) { \
testing::ItemsProcessed(static_cast<int64>(iters) * BATCH * SIZE * SIZE * \ test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS, 8, 8), \
CHANNELS * 8 * 8); \ /*old_benchmark_api*/ false) \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS, 8, 8)) \ .Run(state); \
.Run(iters); \ state.SetItemsProcessed(static_cast<int64>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS * 8 * 8); \
} \ } \
BENCHMARK(BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS); BENCHMARK(BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS);

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow { namespace tensorflow {
static Graph* BM_Resize(const char* algorithm, int batches, int width, static Graph* Resize(const char* algorithm, int batches, int width,
int height) { int height) {
Graph* g = new Graph(OpRegistry::Global()); Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3})); Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3}));
@ -42,9 +42,12 @@ static Graph* BM_Resize(const char* algorithm, int batches, int width,
} }
#define BM_ResizeDev(DEVICE, ALGORITHM, B, W, H) \ #define BM_ResizeDev(DEVICE, ALGORITHM, B, W, H) \
static void BM_Resize_##ALGORITHM##_##DEVICE##_##B##_##W##_##H(int iters) { \ static void BM_Resize_##ALGORITHM##_##DEVICE##_##B##_##W##_##H( \
testing::ItemsProcessed(iters* B* W* H * 3); \ ::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, BM_Resize(#ALGORITHM, B, W, H)).Run(iters); \ test::Benchmark(#DEVICE, Resize(#ALGORITHM, B, W, H), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * W * H * 3); \
} \ } \
BENCHMARK(BM_Resize_##ALGORITHM##_##DEVICE##_##B##_##W##_##H) BENCHMARK(BM_Resize_##ALGORITHM##_##DEVICE##_##B##_##W##_##H)

View File

@ -34,8 +34,7 @@ limitations under the License.
namespace tensorflow { namespace tensorflow {
namespace { namespace {
static void BM_ExpandDims(int iters) { static void BM_ExpandDims(::testing::benchmark::State& state) {
testing::StopTiming();
Graph* g = new Graph(OpRegistry::Global()); Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_INT32, TensorShape({1, 1, 1, 1})); Tensor input(DT_INT32, TensorShape({1, 1, 1, 1}));
@ -53,15 +52,12 @@ static void BM_ExpandDims(int iters) {
.Finalize(g, &node)); .Finalize(g, &node));
FixupSourceAndSinkEdges(g); FixupSourceAndSinkEdges(g);
testing::StartTiming();
test::Benchmark("cpu", g, nullptr, nullptr, nullptr, test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR") "SINGLE_THREADED_EXECUTOR", /*old_benchmark_api*/ false)
.Run(iters); .Run(state);
testing::UseRealTime();
} }
BENCHMARK(BM_ExpandDims); BENCHMARK(BM_ExpandDims)->UseRealTime();
} // namespace } // namespace
} // namespace tensorflow } // namespace tensorflow

View File

@ -53,78 +53,79 @@ std::string GenerateRandomString(int length) {
return std::string(length, 'a'); return std::string(length, 'a');
} }
void BM_ScopedAnnotationDisabled(int iters, int annotation_size) { void BM_ScopedAnnotationDisabled(::testing::benchmark::State& state) {
testing::StopTiming(); const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size); std::string annotation = GenerateRandomString(annotation_size);
testing::StartTiming(); for (auto s : state) {
for (int i = 0; i < iters; i++) {
ScopedAnnotation trace(annotation); ScopedAnnotation trace(annotation);
} }
testing::StopTiming();
} }
BENCHMARK(BM_ScopedAnnotationDisabled)->Arg(8)->Arg(32)->Arg(128); BENCHMARK(BM_ScopedAnnotationDisabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled(int iters, int annotation_size) { void BM_ScopedAnnotationEnabled(::testing::benchmark::State& state) {
testing::StopTiming(); const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size); std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true); AnnotationStack::Enable(true);
testing::StartTiming(); for (auto s : state) {
for (int i = 0; i < iters; i++) {
ScopedAnnotation trace(annotation); ScopedAnnotation trace(annotation);
} }
testing::StopTiming();
AnnotationStack::Enable(false); AnnotationStack::Enable(false);
} }
BENCHMARK(BM_ScopedAnnotationEnabled)->Arg(8)->Arg(32)->Arg(128); BENCHMARK(BM_ScopedAnnotationEnabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Nested(int iters, int annotation_size) { void BM_ScopedAnnotationEnabled_Nested(::testing::benchmark::State& state) {
testing::StopTiming(); const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size); std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true); AnnotationStack::Enable(true);
testing::StartTiming(); for (auto s : state) {
for (int i = 0; i < iters; i++) {
ScopedAnnotation trace(annotation); ScopedAnnotation trace(annotation);
{ ScopedAnnotation trace(annotation); } { ScopedAnnotation trace(annotation); }
} }
testing::StopTiming();
AnnotationStack::Enable(false); AnnotationStack::Enable(false);
} }
BENCHMARK(BM_ScopedAnnotationEnabled_Nested)->Arg(8)->Arg(32)->Arg(128); BENCHMARK(BM_ScopedAnnotationEnabled_Nested)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Adhoc(int iters, int annotation_size) { void BM_ScopedAnnotationEnabled_Adhoc(::testing::benchmark::State& state) {
testing::StopTiming();
AnnotationStack::Enable(true); AnnotationStack::Enable(true);
testing::StartTiming(); int i = 0;
for (int i = 0; i < iters; i++) { for (auto s : state) {
// generate the annotation on the fly. // generate the annotation on the fly.
ScopedAnnotation trace(absl::StrCat(i, "-", i * i)); ScopedAnnotation trace(absl::StrCat(i, "-", i * i));
++i;
} }
testing::StopTiming();
AnnotationStack::Enable(false); AnnotationStack::Enable(false);
} }
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc)->Arg(8)->Arg(32)->Arg(128); BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc);
void BM_ScopedAnnotationDisabled_Lambda(int iters, int annotation_size) { void BM_ScopedAnnotationDisabled_Lambda(::testing::benchmark::State& state) {
for (int i = 0; i < iters; i++) { int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); }); ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
} }
} }
BENCHMARK(BM_ScopedAnnotationDisabled_Lambda)->Arg(8)->Arg(32)->Arg(128); BENCHMARK(BM_ScopedAnnotationDisabled_Lambda);
void BM_ScopedAnnotationEnabled_Adhoc_Lambda(int iters, int annotation_size) { void BM_ScopedAnnotationEnabled_Adhoc_Lambda(
::testing::benchmark::State& state) {
AnnotationStack::Enable(true); AnnotationStack::Enable(true);
for (int i = 0; i < iters; i++) { int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); }); ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
} }
AnnotationStack::Enable(false); AnnotationStack::Enable(false);
} }
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda)->Arg(8)->Arg(32)->Arg(128); BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda);
} // namespace } // namespace
} // namespace profiler } // namespace profiler