Update benchmarks to use newer API

PiperOrigin-RevId: 344156330
Change-Id: I2bf26b77cd93534658f274f29f1bf37d4c53ed81
This commit is contained in:
A. Unique TensorFlower 2020-11-24 16:55:36 -08:00 committed by TensorFlower Gardener
parent 5aff086d72
commit 29bb0deb26
9 changed files with 103 additions and 91 deletions

View File

@ -50,12 +50,10 @@ constexpr char longTagParam[] = "LONGTAG____________________________";
constexpr float largeValueParam = 2352352.2623433;
#define BM_ScalarSummaryDev(device, dims, name, tag, value) \
void BM_ScalarSummary##name##device(int iters) { \
testing::StopTiming(); \
void BM_ScalarSummary##name##device(::testing::benchmark::State& state) { \
TensorShape tensorshape(DIMARGS dims); \
auto g = BM_ScalarSummaryOp(tensorshape, #tag, value); \
testing::StartTiming(); \
test::Benchmark("cpu", g).Run(iters); \
test::Benchmark("cpu", g, /*old_benchmark_api=*/false).Run(state); \
} \
BENCHMARK(BM_ScalarSummary##name##device);

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow {
static Graph* BM_AdjustContrast(int batches, int width, int height) {
static Graph* AdjustContrast(int batches, int width, int height) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3}));
in.flat<float>().setRandom();
@ -37,9 +37,12 @@ static Graph* BM_AdjustContrast(int batches, int width, int height) {
}
#define BM_AdjustContrastDev(DEVICE, B, W, H) \
static void BM_AdjustContrast_##DEVICE##_##B##_##W##_##H(int iters) { \
testing::ItemsProcessed(iters* B* W* H * 3); \
test::Benchmark(#DEVICE, BM_AdjustContrast(B, W, H)).Run(iters); \
static void BM_AdjustContrast_##DEVICE##_##B##_##W##_##H( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, AdjustContrast(B, W, H), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * W * H * 3); \
} \
BENCHMARK(BM_AdjustContrast_##DEVICE##_##B##_##W##_##H)

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow {
static Graph* BM_CropAndResize(int batches, int width, int height, int depth,
static Graph* CropAndResize(int batches, int width, int height, int depth,
int crop_height, int crop_width) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth}));
@ -53,9 +53,11 @@ static Graph* BM_CropAndResize(int batches, int width, int height, int depth,
#define BM_CropAndResizeDev(DEVICE, B, W, H, D, CH, CW) \
static void BM_CropAndResize_##DEVICE##_##B##_##W##_##H##_##D##_##CH##_##CW( \
int iters) { \
testing::ItemsProcessed(iters* B* W* H* D); \
test::Benchmark(#DEVICE, BM_CropAndResize(B, W, H, D, CH, CW)).Run(iters); \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, CropAndResize(B, W, H, D, CH, CW), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * W * H * D); \
} \
BENCHMARK(BM_CropAndResize_##DEVICE##_##B##_##W##_##H##_##D##_##CH##_##CW);

View File

@ -21,8 +21,8 @@ limitations under the License.
namespace tensorflow {
static Graph* BM_MirrorPad(int batches, int height, int width, int depth,
int pad, const char* mode) {
static Graph* MirrorPad(int batches, int height, int width, int depth, int pad,
const char* mode) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, height, width, depth}));
in.flat<float>().setRandom();
@ -41,9 +41,12 @@ static Graph* BM_MirrorPad(int batches, int height, int width, int depth,
#define BM_MirrorPadDev(DEVICE, B, W, H, D, P, MODE) \
static void BM_MirrorPad_##DEVICE##_##B##_##W##_##H##_##D##_##P##_##MODE( \
int iters) { \
testing::ItemsProcessed(iters* B*(W + 2 * P) * (H + 2 * P) * D / 32); \
test::Benchmark(#DEVICE, BM_MirrorPad(B, W, H, D, P, #MODE)).Run(iters); \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, MirrorPad(B, W, H, D, P, #MODE), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * (W + 2 * P) * \
(H + 2 * P) * D / 32); \
} \
BENCHMARK(BM_MirrorPad_##DEVICE##_##B##_##W##_##H##_##D##_##P##_##MODE);

View File

@ -21,8 +21,8 @@ limitations under the License.
namespace tensorflow {
static Graph* BM_CombinedNonMaxSuppression(int batches, int box_num,
int class_num, int q) {
static Graph* CombinedNonMaxSuppression(int batches, int box_num, int class_num,
int q) {
Graph* g = new Graph(OpRegistry::Global());
Tensor boxes(DT_FLOAT, TensorShape({batches, box_num, q, 4}));
boxes.flat<float>().setRandom();
@ -49,10 +49,12 @@ static Graph* BM_CombinedNonMaxSuppression(int batches, int box_num,
}
#define BM_CombinedNonMaxSuppressionDev(DEVICE, B, BN, CN, Q) \
static void BM_CombinedNMS_##DEVICE##_##B##_##BN##_##CN##_##Q(int iters) { \
testing::ItemsProcessed(iters* B); \
test::Benchmark(#DEVICE, BM_CombinedNonMaxSuppression(B, BN, CN, Q)) \
.Run(iters); \
static void BM_CombinedNMS_##DEVICE##_##B##_##BN##_##CN##_##Q( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, CombinedNonMaxSuppression(B, BN, CN, Q), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B); \
} \
BENCHMARK(BM_CombinedNMS_##DEVICE##_##B##_##BN##_##CN##_##Q);

View File

@ -268,10 +268,13 @@ static Graph* ResizeBicubic(int batch_size, int size, int channels,
}
#define BM_ResizeBicubicDev(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS(int iters) { \
testing::ItemsProcessed(static_cast<int64>(iters) * BATCH * SIZE * SIZE * \
CHANNELS); \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS)).Run(iters); \
static void BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS( \
::testing::benchmark::State& state) { \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS); \
} \
BENCHMARK(BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS);
@ -290,11 +293,12 @@ BM_ResizeBicubicDev(32, 1024, 3);
#define BM_ResizeBicubicExpand(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS( \
int iters) { \
testing::ItemsProcessed(static_cast<int64>(iters) * BATCH * SIZE * SIZE * \
CHANNELS * 8 * 8); \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS, 8, 8)) \
.Run(iters); \
::testing::benchmark::State& state) { \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS, 8, 8), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS * 8 * 8); \
} \
BENCHMARK(BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS);

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow {
static Graph* BM_Resize(const char* algorithm, int batches, int width,
static Graph* Resize(const char* algorithm, int batches, int width,
int height) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in(DT_FLOAT, TensorShape({batches, width, height, 3}));
@ -42,9 +42,12 @@ static Graph* BM_Resize(const char* algorithm, int batches, int width,
}
#define BM_ResizeDev(DEVICE, ALGORITHM, B, W, H) \
static void BM_Resize_##ALGORITHM##_##DEVICE##_##B##_##W##_##H(int iters) { \
testing::ItemsProcessed(iters* B* W* H * 3); \
test::Benchmark(#DEVICE, BM_Resize(#ALGORITHM, B, W, H)).Run(iters); \
static void BM_Resize_##ALGORITHM##_##DEVICE##_##B##_##W##_##H( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Resize(#ALGORITHM, B, W, H), \
/*old_benchmark_api*/ false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * B * W * H * 3); \
} \
BENCHMARK(BM_Resize_##ALGORITHM##_##DEVICE##_##B##_##W##_##H)

View File

@ -34,8 +34,7 @@ limitations under the License.
namespace tensorflow {
namespace {
static void BM_ExpandDims(int iters) {
testing::StopTiming();
static void BM_ExpandDims(::testing::benchmark::State& state) {
Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_INT32, TensorShape({1, 1, 1, 1}));
@ -53,15 +52,12 @@ static void BM_ExpandDims(int iters) {
.Finalize(g, &node));
FixupSourceAndSinkEdges(g);
testing::StartTiming();
test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR")
.Run(iters);
testing::UseRealTime();
"SINGLE_THREADED_EXECUTOR", /*old_benchmark_api*/ false)
.Run(state);
}
BENCHMARK(BM_ExpandDims);
BENCHMARK(BM_ExpandDims)->UseRealTime();
} // namespace
} // namespace tensorflow

View File

@ -53,78 +53,79 @@ std::string GenerateRandomString(int length) {
return std::string(length, 'a');
}
void BM_ScopedAnnotationDisabled(int iters, int annotation_size) {
testing::StopTiming();
void BM_ScopedAnnotationDisabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
testing::StartTiming();
for (int i = 0; i < iters; i++) {
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
testing::StopTiming();
}
BENCHMARK(BM_ScopedAnnotationDisabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled(int iters, int annotation_size) {
testing::StopTiming();
void BM_ScopedAnnotationEnabled(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
testing::StartTiming();
for (int i = 0; i < iters; i++) {
for (auto s : state) {
ScopedAnnotation trace(annotation);
}
testing::StopTiming();
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Nested(int iters, int annotation_size) {
testing::StopTiming();
void BM_ScopedAnnotationEnabled_Nested(::testing::benchmark::State& state) {
const int annotation_size = state.range(0);
std::string annotation = GenerateRandomString(annotation_size);
AnnotationStack::Enable(true);
testing::StartTiming();
for (int i = 0; i < iters; i++) {
for (auto s : state) {
ScopedAnnotation trace(annotation);
{ ScopedAnnotation trace(annotation); }
}
testing::StopTiming();
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Nested)->Arg(8)->Arg(32)->Arg(128);
void BM_ScopedAnnotationEnabled_Adhoc(int iters, int annotation_size) {
testing::StopTiming();
void BM_ScopedAnnotationEnabled_Adhoc(::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
testing::StartTiming();
for (int i = 0; i < iters; i++) {
int i = 0;
for (auto s : state) {
// generate the annotation on the fly.
ScopedAnnotation trace(absl::StrCat(i, "-", i * i));
++i;
}
testing::StopTiming();
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc)->Arg(8)->Arg(32)->Arg(128);
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc);
void BM_ScopedAnnotationDisabled_Lambda(int iters, int annotation_size) {
for (int i = 0; i < iters; i++) {
void BM_ScopedAnnotationDisabled_Lambda(::testing::benchmark::State& state) {
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
}
BENCHMARK(BM_ScopedAnnotationDisabled_Lambda)->Arg(8)->Arg(32)->Arg(128);
BENCHMARK(BM_ScopedAnnotationDisabled_Lambda);
void BM_ScopedAnnotationEnabled_Adhoc_Lambda(int iters, int annotation_size) {
void BM_ScopedAnnotationEnabled_Adhoc_Lambda(
::testing::benchmark::State& state) {
AnnotationStack::Enable(true);
for (int i = 0; i < iters; i++) {
int i = 0;
for (auto s : state) {
ScopedAnnotation trace([&]() { return absl::StrCat(i, "-", i * i); });
++i;
}
AnnotationStack::Enable(false);
}
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda)->Arg(8)->Arg(32)->Arg(128);
BENCHMARK(BM_ScopedAnnotationEnabled_Adhoc_Lambda);
} // namespace
} // namespace profiler