Adds eager microbenchmarks for the Keras .fit
method
PiperOrigin-RevId: 218269923
This commit is contained in:
parent
e31663c010
commit
236725c044
@ -33,6 +33,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
|
|||||||
|
|
||||||
from tensorflow.python import keras
|
from tensorflow.python import keras
|
||||||
from tensorflow.python import pywrap_tensorflow
|
from tensorflow.python import pywrap_tensorflow
|
||||||
|
from tensorflow.python.data.ops import dataset_ops
|
||||||
from tensorflow.python.eager import backprop # pylint: disable=unused-import
|
from tensorflow.python.eager import backprop # pylint: disable=unused-import
|
||||||
from tensorflow.python.eager import context
|
from tensorflow.python.eager import context
|
||||||
from tensorflow.python.eager import core
|
from tensorflow.python.eager import core
|
||||||
@ -48,6 +49,7 @@ from tensorflow.python.ops import gen_math_ops
|
|||||||
from tensorflow.python.ops import math_ops
|
from tensorflow.python.ops import math_ops
|
||||||
from tensorflow.python.ops import random_ops
|
from tensorflow.python.ops import random_ops
|
||||||
from tensorflow.python.ops import resource_variable_ops
|
from tensorflow.python.ops import resource_variable_ops
|
||||||
|
from tensorflow.python.training import gradient_descent
|
||||||
|
|
||||||
CPU = "/device:CPU:0"
|
CPU = "/device:CPU:0"
|
||||||
GPU = "/device:GPU:0"
|
GPU = "/device:GPU:0"
|
||||||
@ -76,18 +78,18 @@ def c_tfe_py_fastpath_execute(a,
|
|||||||
|
|
||||||
class SubclassedKerasModel(keras.Model):
|
class SubclassedKerasModel(keras.Model):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, initializer="ones"):
|
||||||
super(SubclassedKerasModel, self).__init__()
|
super(SubclassedKerasModel, self).__init__()
|
||||||
self.layer_a = keras.layers.Dense(
|
self.layer_a = keras.layers.Dense(
|
||||||
64, kernel_initializer="ones", bias_initializer="zeros")
|
64, kernel_initializer=initializer, bias_initializer="zeros")
|
||||||
self.layer_b = keras.layers.Dense(
|
self.layer_b = keras.layers.Dense(
|
||||||
128, kernel_initializer="ones", bias_initializer="zeros")
|
128, kernel_initializer=initializer, bias_initializer="zeros")
|
||||||
self.layer_c = keras.layers.Dense(
|
self.layer_c = keras.layers.Dense(
|
||||||
256, kernel_initializer="ones", bias_initializer="zeros")
|
256, kernel_initializer=initializer, bias_initializer="zeros")
|
||||||
self.layer_d = keras.layers.Dense(
|
self.layer_d = keras.layers.Dense(
|
||||||
256, kernel_initializer="ones", bias_initializer="zeros")
|
256, kernel_initializer=initializer, bias_initializer="zeros")
|
||||||
self.layer_e = keras.layers.Dense(
|
self.layer_e = keras.layers.Dense(
|
||||||
10, kernel_initializer="ones", bias_initializer="zeros")
|
10, kernel_initializer=initializer, bias_initializer="zeros")
|
||||||
|
|
||||||
def call(self, x):
|
def call(self, x):
|
||||||
x = self.layer_a(x)
|
x = self.layer_a(x)
|
||||||
@ -97,34 +99,34 @@ class SubclassedKerasModel(keras.Model):
|
|||||||
return self.layer_e(x)
|
return self.layer_e(x)
|
||||||
|
|
||||||
|
|
||||||
def make_keras_model():
|
def make_keras_model(initializer="ones"):
|
||||||
model_input = keras.Input(shape=(10,))
|
model_input = keras.Input(shape=(10,))
|
||||||
x = keras.layers.Dense(
|
x = keras.layers.Dense(
|
||||||
64, kernel_initializer="ones", bias_initializer="zeros")(model_input)
|
64, kernel_initializer=initializer, bias_initializer="zeros")(model_input)
|
||||||
x = keras.layers.Dense(
|
x = keras.layers.Dense(
|
||||||
128, kernel_initializer="ones", bias_initializer="zeros")(x)
|
128, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||||
x = keras.layers.Dense(
|
x = keras.layers.Dense(
|
||||||
256, kernel_initializer="ones", bias_initializer="zeros")(x)
|
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||||
x = keras.layers.Dense(
|
x = keras.layers.Dense(
|
||||||
256, kernel_initializer="ones", bias_initializer="zeros")(x)
|
256, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||||
x = keras.layers.Dense(
|
x = keras.layers.Dense(
|
||||||
10, kernel_initializer="ones", bias_initializer="zeros")(x)
|
10, kernel_initializer=initializer, bias_initializer="zeros")(x)
|
||||||
return keras.Model(inputs=model_input, outputs=x)
|
return keras.Model(inputs=model_input, outputs=x)
|
||||||
|
|
||||||
|
|
||||||
def make_sequential_keras_model():
|
def make_sequential_keras_model(initializer="ones"):
|
||||||
model = keras.models.Sequential()
|
model = keras.models.Sequential()
|
||||||
model.add(keras.layers.Dense(
|
model.add(keras.layers.Dense(
|
||||||
64, kernel_initializer="ones", bias_initializer="zeros",
|
64, kernel_initializer=initializer, bias_initializer="zeros",
|
||||||
input_shape=(10,)))
|
input_shape=(10,)))
|
||||||
model.add(keras.layers.Dense(
|
model.add(keras.layers.Dense(
|
||||||
128, kernel_initializer="ones", bias_initializer="zeros"))
|
128, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||||
model.add(keras.layers.Dense(
|
model.add(keras.layers.Dense(
|
||||||
256, kernel_initializer="ones", bias_initializer="zeros"))
|
256, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||||
model.add(keras.layers.Dense(
|
model.add(keras.layers.Dense(
|
||||||
256, kernel_initializer="ones", bias_initializer="zeros"))
|
256, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||||
model.add(keras.layers.Dense(
|
model.add(keras.layers.Dense(
|
||||||
10, kernel_initializer="ones", bias_initializer="zeros"))
|
10, kernel_initializer=initializer, bias_initializer="zeros"))
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
@ -718,6 +720,48 @@ class MicroBenchmarks(test.Benchmark):
|
|||||||
assert np.equal(func(), make_keras_model()(data)).all()
|
assert np.equal(func(), make_keras_model()(data)).all()
|
||||||
self._run(func, 30000)
|
self._run(func, 30000)
|
||||||
|
|
||||||
|
def benchmark_keras_model_subclassed_fit(self):
|
||||||
|
model = SubclassedKerasModel(initializer="glorot_uniform")
|
||||||
|
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||||
|
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||||
|
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
|
||||||
|
model.compile(
|
||||||
|
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
|
||||||
|
loss="mse")
|
||||||
|
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
|
||||||
|
# First call is more expensive (creates variables etc.), discount that.
|
||||||
|
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
|
||||||
|
|
||||||
|
self._run(func, 1)
|
||||||
|
|
||||||
|
def benchmark_keras_model_functional_fit(self):
|
||||||
|
model = make_keras_model(initializer="glorot_uniform")
|
||||||
|
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||||
|
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||||
|
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
|
||||||
|
model.compile(
|
||||||
|
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
|
||||||
|
loss="mse")
|
||||||
|
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
|
||||||
|
# First call is more expensive (creates variables etc.), discount that.
|
||||||
|
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
|
||||||
|
|
||||||
|
self._run(func, 1)
|
||||||
|
|
||||||
|
def benchmark_keras_model_sequential_fit(self):
|
||||||
|
model = make_sequential_keras_model(initializer="glorot_uniform")
|
||||||
|
data = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||||
|
labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1)
|
||||||
|
dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat()
|
||||||
|
model.compile(
|
||||||
|
gradient_descent.GradientDescentOptimizer(learning_rate=0.001),
|
||||||
|
loss="mse")
|
||||||
|
func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0)
|
||||||
|
# First call is more expensive (creates variables etc.), discount that.
|
||||||
|
model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0)
|
||||||
|
|
||||||
|
self._run(func, 1)
|
||||||
|
|
||||||
def benchmarkScan(self):
|
def benchmarkScan(self):
|
||||||
elems = math_ops.range(1600)
|
elems = math_ops.range(1600)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user