Switch Keras microbenchmarks to use the TF api directly instead of using direct internal imports.
It also renames the keras benchmarks directory to `benchmarks` instead of `benchmark` for consistency w/ other benchmarks folders in tf Also adds an overhead benchmark for `.predict`. It only runs it a few times because `predict` appears to have fairly high overheads currently. PiperOrigin-RevId: 318580303 Change-Id: Ibe99ee56974ead3783fcefd9072bb87896f743fe
This commit is contained in:
parent
a9a82a1f0d
commit
07dd408634
@ -29,7 +29,7 @@ py_test(
|
||||
srcs = ["keras_cpu_benchmark_test.py"],
|
||||
python_version = "PY3",
|
||||
deps = [
|
||||
"//tensorflow/python/keras",
|
||||
"//tensorflow:tensorflow_py",
|
||||
"//third_party/py/numpy",
|
||||
],
|
||||
)
|
||||
@ -40,7 +40,7 @@ cuda_py_test(
|
||||
srcs = ["eager_microbenchmarks_test.py"],
|
||||
python_version = "PY3",
|
||||
deps = [
|
||||
"//tensorflow/python/keras",
|
||||
"//tensorflow:tensorflow_py",
|
||||
],
|
||||
)
|
||||
|
||||
@ -61,12 +61,6 @@ cuda_py_test(
|
||||
srcs = ["model_components_benchmarks_test.py"],
|
||||
python_version = "PY3",
|
||||
deps = [
|
||||
"//tensorflow/python:random_ops",
|
||||
"//tensorflow/python:training_lib",
|
||||
"//tensorflow/python/data/ops:dataset_ops",
|
||||
"//tensorflow/python/eager:backprop",
|
||||
"//tensorflow/python/eager:context",
|
||||
"//tensorflow/python/eager:profiler",
|
||||
"//tensorflow/python/eager:test",
|
||||
"//tensorflow:tensorflow_py",
|
||||
],
|
||||
)
|
@ -19,15 +19,9 @@ from __future__ import print_function
|
||||
|
||||
import time
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.keras.engine import base_layer
|
||||
from tensorflow.python.keras.layers import advanced_activations
|
||||
from tensorflow.python.keras.layers import convolutional
|
||||
from tensorflow.python.keras.layers import core
|
||||
from tensorflow.python.keras.layers import embeddings
|
||||
from tensorflow.python.keras.layers import normalization
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.util import tf_inspect
|
||||
|
||||
@ -92,24 +86,39 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_call_overhead(self):
|
||||
|
||||
class OnlyOverheadLayer(base_layer.Layer):
|
||||
class OnlyOverheadLayer(tf.keras.layers.Layer):
|
||||
|
||||
def call(self, x):
|
||||
return x
|
||||
|
||||
layer = OnlyOverheadLayer()
|
||||
x = ops.convert_to_tensor([[1.]])
|
||||
x = tf.convert_to_tensor([[1.]])
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
layer(x) # pylint: disable=not-callable
|
||||
|
||||
self._run(fn, 10000)
|
||||
|
||||
def benchmark_model_predict_tensorlike_overhead(self):
|
||||
|
||||
class OnlyOverheadLayer(tf.keras.layers.Layer):
|
||||
|
||||
def call(self, x):
|
||||
return x
|
||||
|
||||
model = tf.keras.Sequential([OnlyOverheadLayer()])
|
||||
x = tf.convert_to_tensor([[1.]])
|
||||
|
||||
def fn():
|
||||
model.predict(x)
|
||||
|
||||
self._run(fn, 20)
|
||||
|
||||
# Naming convention: benchmark_layers_{module_name}_{class}_overhead.
|
||||
def benchmark_layers_advanced_activations_leaky_relu_overhead(self):
|
||||
|
||||
layer = advanced_activations.LeakyReLU()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.LeakyReLU()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -118,8 +127,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_advanced_activations_prelu_overhead(self):
|
||||
|
||||
layer = advanced_activations.PReLU()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.PReLU()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -128,8 +137,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_advanced_activations_elu_overhead(self):
|
||||
|
||||
layer = advanced_activations.ELU()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.ELU()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -138,8 +147,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_advanced_activations_thresholded_relu_overhead(self):
|
||||
|
||||
layer = advanced_activations.ThresholdedReLU()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.ThresholdedReLU()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -148,8 +157,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_advanced_activations_softmax_overhead(self):
|
||||
|
||||
layer = advanced_activations.Softmax()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.Softmax()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -158,8 +167,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_advanced_activations_relu_overhead(self):
|
||||
|
||||
layer = advanced_activations.ReLU()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.ReLU()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -168,8 +177,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_core_masking_overhead(self):
|
||||
|
||||
layer = core.Masking()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.Masking()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -178,8 +187,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_core_dropout_overhead(self):
|
||||
|
||||
layer = core.Dropout(0.5)
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.Dropout(0.5)
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x, training=True)
|
||||
@ -188,8 +197,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_core_flatten_overhead(self):
|
||||
|
||||
layer = core.Flatten()
|
||||
x = ops.convert_to_tensor([[[1.]]])
|
||||
layer = tf.keras.layers.Flatten()
|
||||
x = tf.convert_to_tensor([[[1.]]])
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -198,8 +207,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_core_dense_overhead(self):
|
||||
|
||||
layer = core.Dense(1)
|
||||
x = ops.convert_to_tensor([[1.]])
|
||||
layer = tf.keras.layers.Dense(1)
|
||||
x = tf.convert_to_tensor([[1.]])
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -208,8 +217,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_convolutional_conv1d_overhead(self):
|
||||
|
||||
layer = convolutional.Conv1D(1, (1,))
|
||||
x = array_ops.ones((1, 1, 1))
|
||||
layer = tf.keras.layers.Conv1D(1, (1,))
|
||||
x = tf.ones((1, 1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -218,8 +227,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_convolutional_conv2d_overhead(self):
|
||||
|
||||
layer = convolutional.Conv2D(1, (1, 1))
|
||||
x = array_ops.ones((1, 1, 1, 1))
|
||||
layer = tf.keras.layers.Conv2D(1, (1, 1))
|
||||
x = tf.ones((1, 1, 1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -228,8 +237,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_convolutional_conv3d_overhead(self):
|
||||
|
||||
layer = convolutional.Conv3D(1, (1, 1, 1))
|
||||
x = array_ops.ones((1, 1, 1, 1, 1))
|
||||
layer = tf.keras.layers.Conv3D(1, (1, 1, 1))
|
||||
x = tf.ones((1, 1, 1, 1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -238,8 +247,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_embeddings_embedding_overhead(self):
|
||||
|
||||
layer = embeddings.Embedding(1, 1)
|
||||
x = array_ops.zeros((1, 1), dtype="int32")
|
||||
layer = tf.keras.layers.Embedding(1, 1)
|
||||
x = tf.zeros((1, 1), dtype="int32")
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -248,8 +257,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_batch_norm_fused_inf(self):
|
||||
|
||||
layer = normalization.BatchNormalization(fused=True)
|
||||
x = array_ops.ones((1, 1, 1, 1))
|
||||
layer = tf.keras.layers.BatchNormalization(fused=True)
|
||||
x = tf.ones((1, 1, 1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -258,8 +267,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_batch_norm_fused_train(self):
|
||||
|
||||
layer = normalization.BatchNormalization(fused=True)
|
||||
x = array_ops.ones((1, 1, 1, 1))
|
||||
layer = tf.keras.layers.BatchNormalization(fused=True)
|
||||
x = tf.ones((1, 1, 1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x, training=True)
|
||||
@ -268,8 +277,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_batch_norm_nonfused_inf(self):
|
||||
|
||||
layer = normalization.BatchNormalization(fused=False)
|
||||
x = array_ops.ones((1, 1, 1, 1))
|
||||
layer = tf.keras.layers.BatchNormalization(fused=False)
|
||||
x = tf.ones((1, 1, 1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x)
|
||||
@ -278,8 +287,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_batch_norm_nonfused_train(self):
|
||||
|
||||
layer = normalization.BatchNormalization(fused=False)
|
||||
x = array_ops.ones((1, 1, 1, 1))
|
||||
layer = tf.keras.layers.BatchNormalization(fused=False)
|
||||
x = tf.ones((1, 1, 1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x, training=True)
|
||||
@ -288,8 +297,8 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
def benchmark_layers_normalization_layer_normalization_overhead(self):
|
||||
|
||||
layer = normalization.LayerNormalization()
|
||||
x = array_ops.ones((1, 1))
|
||||
layer = tf.keras.layers.LayerNormalization()
|
||||
x = tf.ones((1, 1))
|
||||
|
||||
def fn():
|
||||
layer(x, training=True)
|
||||
@ -298,5 +307,5 @@ class MicroBenchmarksBase(test.Benchmark):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
ops.enable_eager_execution()
|
||||
assert tf.executing_eagerly()
|
||||
test.main()
|
@ -23,7 +23,8 @@ import timeit
|
||||
import numpy as np
|
||||
import six
|
||||
|
||||
from tensorflow.python import keras
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.platform import benchmark
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
@ -40,7 +41,7 @@ _LSTM_X = np.random.randint(0, 1999, size=(2500, 100))
|
||||
_LSTM_Y = np.random.random((2500, 1))
|
||||
|
||||
|
||||
class TimerCallback(keras.callbacks.Callback):
|
||||
class TimerCallback(tf.keras.callbacks.Callback):
|
||||
|
||||
def __init__(self):
|
||||
self.times = []
|
||||
@ -110,35 +111,35 @@ class KerasModelCPUBenchmark(
|
||||
extras=results)
|
||||
|
||||
def _mnist_mlp(self):
|
||||
model = keras.Sequential()
|
||||
model.add(keras.layers.Dense(512, activation='relu', input_shape=(784,)))
|
||||
model.add(keras.layers.Dropout(0.2))
|
||||
model.add(keras.layers.Dense(512, activation='relu'))
|
||||
model.add(keras.layers.Dropout(0.2))
|
||||
model.add(keras.layers.Dense(10, activation='softmax'))
|
||||
model = tf.keras.Sequential()
|
||||
model.add(tf.keras.layers.Dense(512, activation='relu', input_shape=(784,)))
|
||||
model.add(tf.keras.layers.Dropout(0.2))
|
||||
model.add(tf.keras.layers.Dense(512, activation='relu'))
|
||||
model.add(tf.keras.layers.Dropout(0.2))
|
||||
model.add(tf.keras.layers.Dense(10, activation='softmax'))
|
||||
|
||||
return model
|
||||
|
||||
def _mnist_convnet(self):
|
||||
model = keras.Sequential()
|
||||
model = tf.keras.Sequential()
|
||||
model.add(
|
||||
keras.layers.Conv2D(
|
||||
tf.keras.layers.Conv2D(
|
||||
32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
|
||||
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
|
||||
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(keras.layers.Dropout(0.25))
|
||||
model.add(keras.layers.Flatten())
|
||||
model.add(keras.layers.Dense(128, activation='relu'))
|
||||
model.add(keras.layers.Dropout(0.5))
|
||||
model.add(keras.layers.Dense(10, activation='softmax'))
|
||||
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
|
||||
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(tf.keras.layers.Dropout(0.25))
|
||||
model.add(tf.keras.layers.Flatten())
|
||||
model.add(tf.keras.layers.Dense(128, activation='relu'))
|
||||
model.add(tf.keras.layers.Dropout(0.5))
|
||||
model.add(tf.keras.layers.Dense(10, activation='softmax'))
|
||||
|
||||
return model
|
||||
|
||||
def _imdb_lstm(self):
|
||||
model = keras.Sequential()
|
||||
model.add(keras.layers.Embedding(20000, 128))
|
||||
model.add(keras.layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2))
|
||||
model.add(keras.layers.Dense(1, activation='sigmoid'))
|
||||
model = tf.keras.Sequential()
|
||||
model.add(tf.keras.layers.Embedding(20000, 128))
|
||||
model.add(tf.keras.layers.LSTM(128, dropout=0.2, recurrent_dropout=0.2))
|
||||
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
|
||||
|
||||
return model
|
||||
|
@ -22,29 +22,26 @@ import time
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.python import keras
|
||||
from tensorflow.python.data.ops import dataset_ops
|
||||
from tensorflow.python.eager import backprop # pylint: disable=unused-import
|
||||
import tensorflow as tf
|
||||
|
||||
from tensorflow.python.eager import context
|
||||
from tensorflow.python.eager import profiler
|
||||
from tensorflow.python.eager import test
|
||||
from tensorflow.python.ops import random_ops
|
||||
from tensorflow.python.training import gradient_descent
|
||||
from tensorflow.python.platform import test
|
||||
|
||||
|
||||
class SubclassedKerasModel(keras.Model):
|
||||
class SubclassedKerasModel(tf.keras.Model):
|
||||
|
||||
def __init__(self, initializer="ones"):
|
||||
super(SubclassedKerasModel, self).__init__()
|
||||
self.layer_a = keras.layers.Dense(
|
||||
self.layer_a = tf.keras.layers.Dense(
|
||||
64, kernel_initializer=initializer, bias_initializer="zeros")
|
||||
self.layer_b = keras.layers.Dense(
|
||||
self.layer_b = tf.keras.layers.Dense(
|
||||
128, kernel_initializer=initializer, bias_initializer="zeros")
|
||||
self.layer_c = keras.layers.Dense(
|
||||
self.layer_c = tf.keras.layers.Dense(
|
||||
256, kernel_initializer=initializer, bias_initializer="zeros")
|
||||
self.layer_d = keras.layers.Dense(
|
||||
self.layer_d = tf.keras.layers.Dense(
|
||||
256, kernel_initializer=initializer, bias_initializer="zeros")
|
||||
self.layer_e = keras.layers.Dense(
|
||||
self.layer_e = tf.keras.layers.Dense(
|
||||
10, kernel_initializer=initializer, bias_initializer="zeros")
|
||||
|
||||
def call(self, x):
|
||||
@ -56,32 +53,32 @@ class SubclassedKerasModel(keras.Model):
|
||||
|
||||
|
||||
def make_keras_model(initializer="ones"):
|
||||
model_input = keras.Input(shape=(10,))
|
||||
x = keras.layers.Dense(
|
||||
model_input = tf.keras.Input(shape=(10,))
|
||||
x = tf.keras.layers.Dense(
|
||||
64, kernel_initializer=initializer, bias_initializer="zeros")(model_input)
|
||||
x = keras.layers.Dense(
|
||||
x = tf.keras.layers.Dense(
|
||||
128, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||
x = keras.layers.Dense(
|
||||
x = tf.keras.layers.Dense(
|
||||
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||
x = keras.layers.Dense(
|
||||
x = tf.keras.layers.Dense(
|
||||
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||
x = keras.layers.Dense(
|
||||
x = tf.keras.layers.Dense(
|
||||
10, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||
return keras.Model(inputs=model_input, outputs=x)
|
||||
return tf.keras.Model(inputs=model_input, outputs=x)
|
||||
|
||||
|
||||
def make_sequential_keras_model(initializer="ones"):
|
||||
model = keras.models.Sequential()
|
||||
model.add(keras.layers.Dense(
|
||||
model = tf.keras.models.Sequential()
|
||||
model.add(tf.keras.layers.Dense(
|
||||
64, kernel_initializer=initializer, bias_initializer="zeros",
|
||||
input_shape=(10,)))
|
||||
model.add(keras.layers.Dense(
|
||||
model.add(tf.keras.layers.Dense(
|
||||
128, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||
model.add(keras.layers.Dense(
|
||||
model.add(tf.keras.layers.Dense(
|
||||
256, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||
model.add(keras.layers.Dense(
|
||||
model.add(tf.keras.layers.Dense(
|
||||
256, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||
model.add(keras.layers.Dense(
|
||||
model.add(tf.keras.layers.Dense(
|
||||
10, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||
return model
|
||||
|
||||
@ -120,9 +117,9 @@ class KerasComponentsBenchmarks(test.Benchmark):
|
||||
|
||||
def benchmark_keras_model_subclassed(self):
|
||||
model = SubclassedKerasModel()
|
||||
data = random_ops.random_uniform((10, 10))
|
||||
data = tf.random.uniform((10, 10))
|
||||
|
||||
func = lambda: model(data)
|
||||
func = lambda: model(data) # pylint: disable=not-callable
|
||||
# First call is more expensive (creates variables etc.), discount that.
|
||||
func()
|
||||
|
||||
@ -135,16 +132,16 @@ class KerasComponentsBenchmarks(test.Benchmark):
|
||||
|
||||
def benchmark_keras_model_functional(self):
|
||||
model = make_keras_model()
|
||||
data = random_ops.random_uniform((10, 10))
|
||||
func = lambda: model(data)
|
||||
data = tf.random.uniform((10, 10))
|
||||
func = lambda: model(data) # pylint: disable=not-callable
|
||||
# Symmetry with benchmark_keras_model_subclassed
|
||||
func()
|
||||
assert np.equal(func(), SubclassedKerasModel()(data)).all()
|
||||
assert np.equal(func(), SubclassedKerasModel()(data)).all() # pylint: disable=not-callable
|
||||
self._run(func, 30000)
|
||||
|
||||
def benchmark_keras_model_sequential(self):
|
||||
model = make_sequential_keras_model()
|
||||
data = random_ops.random_uniform((10, 10))
|
||||
data = tf.random.uniform((10, 10))
|
||||
func = lambda: model(data)
|
||||
# Symmetry with benchmark_keras_model_functional
|
||||
func()
|
||||
@ -152,11 +149,11 @@ class KerasComponentsBenchmarks(test.Benchmark):
|
||||
self._run(func, 30000)
|
||||
|
||||
def _benchmark_keras_model_fit(self, model, run_eagerly=False):
|
||||
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
|
||||
data = tf.random.uniform((10, 10), minval=-1, maxval=1)
|
||||
labels = tf.random.uniform((10, 10), minval=-1, maxval=1)
|
||||
dataset = tf.data.Dataset.from_tensors((data, labels)).repeat()
|
||||
model.compile(
|
||||
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
|
||||
"sgd",
|
||||
loss="mse", run_eagerly=run_eagerly)
|
||||
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
|
||||
# First call is more expensive (creates variables etc.), discount that.
|
||||
@ -165,11 +162,11 @@ class KerasComponentsBenchmarks(test.Benchmark):
|
||||
self._run(func, 1)
|
||||
|
||||
def _benchmark_keras_model_evaluate(self, model, run_eagerly=False):
|
||||
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
|
||||
data = tf.random.uniform((10, 10), minval=-1, maxval=1)
|
||||
labels = tf.random.uniform((10, 10), minval=-1, maxval=1)
|
||||
dataset = tf.data.Dataset.from_tensors((data, labels)).repeat()
|
||||
model.compile(
|
||||
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
|
||||
"sgd",
|
||||
loss="mse", run_eagerly=run_eagerly)
|
||||
func = lambda: model.evaluate(dataset, steps=1000, verbose=0)
|
||||
# First call is more expensive (creates variables etc.), discount that.
|
||||
@ -178,10 +175,10 @@ class KerasComponentsBenchmarks(test.Benchmark):
|
||||
self._run(func, 1)
|
||||
|
||||
def _benchmark_keras_model_predict(self, model, run_eagerly=False):
|
||||
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||
dataset = dataset_ops.Dataset.from_tensors(data).repeat()
|
||||
data = tf.random.uniform((10, 10), minval=-1, maxval=1)
|
||||
dataset = tf.data.Dataset.from_tensors(data).repeat()
|
||||
model.compile(
|
||||
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
|
||||
"sgd",
|
||||
loss="mse", run_eagerly=run_eagerly)
|
||||
func = lambda: model.predict(dataset, steps=1000, verbose=0)
|
||||
# First call is more expensive (creates variables etc.), discount that.
|
Loading…
Reference in New Issue
Block a user