Add BenchmarkType to TestResults proto and fix logging of C++ microbenchmarks.
C++ microbenchmarks results are now normalized by iters (like python benchmarks). Change: 149045367
This commit is contained in:
parent
356e599ce5
commit
54c0ff4a73
tensorflow
@ -41,8 +41,8 @@ Status TestReporter::Benchmark(int64 iters, double cpu_time, double wall_time,
|
||||
double throughput) {
|
||||
if (closed_) return Status::OK();
|
||||
benchmark_entry_.set_iters(iters);
|
||||
benchmark_entry_.set_cpu_time(cpu_time);
|
||||
benchmark_entry_.set_wall_time(wall_time);
|
||||
benchmark_entry_.set_cpu_time(cpu_time / iters);
|
||||
benchmark_entry_.set_wall_time(wall_time / iters);
|
||||
benchmark_entry_.set_throughput(throughput);
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -174,4 +174,13 @@ message TestResults {
|
||||
|
||||
// Benchmark target identifier.
|
||||
string name = 9;
|
||||
|
||||
// The type of benchmark.
|
||||
enum BenchmarkType {
|
||||
UNKNOWN = 0; // Fallback for protos written before Type was introduced.
|
||||
CPP_MICROBENCHMARK = 1;
|
||||
PYTHON_BENCHMARK = 2;
|
||||
ANDROID_BENCHMARK = 3;
|
||||
}
|
||||
BenchmarkType benchmark_type = 10;
|
||||
};
|
||||
|
@ -8,7 +8,8 @@ def tf_cc_logged_benchmark(
|
||||
target=None,
|
||||
benchmarks="..",
|
||||
tags=[],
|
||||
test_log_output_prefix=""):
|
||||
test_log_output_prefix="",
|
||||
benchmark_type="cpp_microbenchmark"):
|
||||
if not name:
|
||||
fail("Must provide a name")
|
||||
if not target:
|
||||
@ -31,7 +32,8 @@ def tf_cc_logged_benchmark(
|
||||
args = [
|
||||
"--name=//%s:%s" % (PACKAGE_NAME, name),
|
||||
"--test_name=" + target,
|
||||
"--test_args=--benchmarks=%s" % benchmarks
|
||||
"--test_args=--benchmarks=%s" % benchmarks,
|
||||
"--benchmark_type=%s" % benchmark_type,
|
||||
],
|
||||
data = [
|
||||
target,
|
||||
@ -56,4 +58,5 @@ def tf_py_logged_benchmark(
|
||||
target=target,
|
||||
benchmarks=benchmarks,
|
||||
tags=tags,
|
||||
test_log_output_prefix=test_log_output_prefix)
|
||||
test_log_output_prefix=test_log_output_prefix,
|
||||
benchmark_type="python_benchmark")
|
||||
|
@ -68,8 +68,10 @@ def main(unused_args):
|
||||
name = FLAGS.name
|
||||
test_name = FLAGS.test_name
|
||||
test_args = FLAGS.test_args
|
||||
test_results, _ = run_and_gather_logs_lib.run_and_gather_logs(name, test_name,
|
||||
test_args)
|
||||
benchmark_type = FLAGS.benchmark_type
|
||||
test_results, _ = run_and_gather_logs_lib.run_and_gather_logs(
|
||||
name, test_name=test_name, test_args=test_args,
|
||||
benchmark_type=benchmark_type)
|
||||
|
||||
# Additional bits we receive from bazel
|
||||
test_results.build_configuration.CopyFrom(gather_build_configuration())
|
||||
@ -102,6 +104,11 @@ if __name__ == "__main__":
|
||||
"--name", type=str, default="", help="Benchmark target identifier.")
|
||||
parser.add_argument(
|
||||
"--test_name", type=str, default="", help="Test target to run.")
|
||||
parser.add_argument(
|
||||
"--benchmark_type",
|
||||
type=str,
|
||||
default="",
|
||||
help="BenchmarkType enum string (benchmark type).")
|
||||
parser.add_argument(
|
||||
"--test_args",
|
||||
type=str,
|
||||
|
@ -46,15 +46,16 @@ def get_git_commit_sha():
|
||||
return os.getenv("GIT_COMMIT")
|
||||
|
||||
|
||||
def process_test_logs(name, test_name, test_args, start_time, run_time,
|
||||
log_files):
|
||||
def process_test_logs(name, test_name, test_args, benchmark_type,
|
||||
start_time, run_time, log_files):
|
||||
"""Gather test information and put it in a TestResults proto.
|
||||
|
||||
Args:
|
||||
name: Benchmark target identifier.
|
||||
test_name: A unique bazel target, e.g. "//path/to:test"
|
||||
test_args: A string containing all arguments to run the target with.
|
||||
|
||||
test_name: A unique bazel target, e.g. "//path/to:test"
|
||||
test_args: A string containing all arguments to run the target with.
|
||||
benchmark_type: A string representing the BenchmarkType enum; the
|
||||
benchmark type for this target.
|
||||
start_time: Test starting time (epoch)
|
||||
run_time: Wall time that the test ran for
|
||||
log_files: Paths to the log files
|
||||
@ -68,6 +69,8 @@ def process_test_logs(name, test_name, test_args, start_time, run_time,
|
||||
results.target = test_name
|
||||
results.start_time = start_time
|
||||
results.run_time = run_time
|
||||
results.benchmark_type = test_log_pb2.TestResults.BenchmarkType.Value(
|
||||
benchmark_type.upper())
|
||||
|
||||
# Gather source code information
|
||||
git_sha = get_git_commit_sha()
|
||||
@ -90,13 +93,15 @@ def process_benchmarks(log_files):
|
||||
return benchmarks
|
||||
|
||||
|
||||
def run_and_gather_logs(name, test_name, test_args):
|
||||
def run_and_gather_logs(name, test_name, test_args, benchmark_type):
|
||||
"""Run the bazel test given by test_name. Gather and return the logs.
|
||||
|
||||
Args:
|
||||
name: Benchmark target identifier.
|
||||
test_name: A unique bazel target, e.g. "//path/to:test"
|
||||
test_args: A string containing all arguments to run the target with.
|
||||
benchmark_type: A string representing the BenchmarkType enum; the
|
||||
benchmark type for this target.
|
||||
|
||||
Returns:
|
||||
A tuple (test_results, mangled_test_name), where
|
||||
@ -145,8 +150,9 @@ def run_and_gather_logs(name, test_name, test_args):
|
||||
|
||||
return (process_test_logs(
|
||||
name,
|
||||
test_name,
|
||||
test_args,
|
||||
test_name=test_name,
|
||||
test_args=test_args,
|
||||
benchmark_type=benchmark_type,
|
||||
start_time=int(start_time),
|
||||
run_time=run_time,
|
||||
log_files=log_files), mangled_test_name)
|
||||
|
Loading…
Reference in New Issue
Block a user