Move Keras eager microbenchmarks to keras/benchmark.

Adds overhead benchmarking for __call__ for Layer and Layer subclasses in layers/core

PiperOrigin-RevId: 315581358
Change-Id: Icb76f6d9e3d1829386c22a454c91105c20b28280
This commit is contained in:
Thomas O'Malley 2020-06-09 16:14:26 -07:00 committed by TensorFlower Gardener
parent 5f069e0eae
commit 3dcf574c5b
3 changed files with 138 additions and 42 deletions

View File

@ -52,8 +52,6 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
@ -1420,46 +1418,6 @@ class MicroBenchmarks(benchmarks_test_base.MicroBenchmarksBase):
self._run(fn, 10000)
# TODO(b/157587712): Move to keras when benchmarks are setup.
def benchmark_tf_keras_layer_call_overhead(self):
class OnlyOverheadLayer(base_layer.Layer):
def call(self, x):
return x
layer = OnlyOverheadLayer()
x = ops.convert_to_tensor([[1.]])
def fn():
layer(x)
self._run(fn, 10000)
# TODO(b/157587712): Move to keras when benchmarks are setup.
def benchmark_tf_keras_dense_overhead(self):
layer = core_layers.Dense(1)
x = ops.convert_to_tensor([[1.]])
layer(x) # Warmup call to `build` layer.
def fn():
layer(x)
self._run(fn, 10000)
# TODO(b/157587712): Move to keras when benchmarks are setup.
def benchmark_tf_keras_flatten_overhead(self):
layer = core_layers.Flatten()
x = ops.convert_to_tensor([[[1.]]])
layer(x) # Warmup call to `build` layer.
def fn():
layer(x)
self._run(fn, 10000)
if __name__ == "__main__":
test.main()

12
tensorflow/python/keras/benchmark/BUILD Normal file → Executable file
View File

@ -1,6 +1,8 @@
# Description:
# Implementation of Keras benchmarks.
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"], # Apache 2.0
@ -31,3 +33,13 @@ py_test(
"//third_party/py/numpy",
],
)
cuda_py_test(
name = "eager_microbenchmarks_test",
size = "medium",
srcs = ["eager_microbenchmarks_test.py"],
python_version = "PY3",
deps = [
"//tensorflow/python/keras",
],
)

View File

@ -0,0 +1,126 @@
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Microbenchmarks for Keras components in eager mode."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.layers import core as core_layers
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def _run_benchmark(func, num_iters, execution_mode=None):
ctx = context.context()
with context.execution_mode(execution_mode):
# call func to warm up
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
start = time.time()
for _ in range(num_iters):
func()
if execution_mode == context.ASYNC:
ctx.executor.wait()
end = time.time()
return end - start
class MicroBenchmarksBase(test.Benchmark):
"""Run and report benchmark results."""
def run_report(self, run_benchmark, func, num_iters, execution_mode=None):
"""Run and report benchmark results."""
total_time = run_benchmark(func, num_iters, execution_mode)
mean_us = total_time * 1e6 / num_iters
extras = {
"examples_per_sec": float("{0:.3f}".format(num_iters / total_time)),
"us_per_example": float("{0:.3f}".format(total_time * 1e6 / num_iters))
}
benchmark_name = self._get_benchmark_name()
self.report_benchmark(
iters=num_iters, wall_time=mean_us, extras=extras, name=benchmark_name)
def _get_benchmark_name(self):
"""Mostly copied from benchmark.py _get_name()."""
stack = tf_inspect.stack()
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, test.Benchmark):
name = frame[3] # Get the method name
# This is a hack to get around the fact that some methods might have a
# disable_tfrt decorator around them. In that case a function called
# 'decorated' wraps the real called function underneath and so we
# peek one deeper into the stack to get the real name.
if name == "decorated":
continue
else:
break
if name is None:
raise ValueError("Unable to determine calling Benchmark function.")
if context.is_tfrt_enabled():
name = name + "_tfrt"
return name
def _run(self, func, num_iters, execution_mode=None):
self.run_report(_run_benchmark, func, num_iters, execution_mode)
def benchmark_tf_keras_layer_call_overhead(self):
class OnlyOverheadLayer(base_layer.Layer):
def call(self, x):
return x
layer = OnlyOverheadLayer()
x = ops.convert_to_tensor([[1.]])
def fn():
layer(x)
self._run(fn, 10000)
def benchmark_tf_keras_dense_overhead(self):
layer = core_layers.Dense(1)
x = ops.convert_to_tensor([[1.]])
def fn():
layer(x)
self._run(fn, 10000)
def benchmark_tf_keras_flatten_overhead(self):
layer = core_layers.Flatten()
x = ops.convert_to_tensor([[[1.]]])
def fn():
layer(x)
self._run(fn, 10000)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()