Switch tests of the v1 ftrl optimizer (which is graph only) to use ops.Graph.as_default() explicitly instead of relying on the run_deprecated_v1 decorator (which we're trying to avoid)
PiperOrigin-RevId: 315765675 Change-Id: Id9d9f4371713fc7636860b46312434f8ee1407d9
This commit is contained in:
parent
7622b2e07f
commit
90ebd39ac4
@ -23,7 +23,6 @@ import numpy as np
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.ops import embedding_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import resource_variable_ops
|
||||
@ -37,158 +36,163 @@ from tensorflow.python.training import gradient_descent
|
||||
class FtrlOptimizerTest(test.TestCase):
|
||||
|
||||
def doTestFtrlwithoutRegularization(self, use_resource=False):
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
if use_resource:
|
||||
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0],
|
||||
dtype=dtype)
|
||||
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0],
|
||||
dtype=dtype)
|
||||
else:
|
||||
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session() as sess:
|
||||
if use_resource:
|
||||
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
|
||||
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
|
||||
else:
|
||||
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([0.0, 0.0], v0_val)
|
||||
self.assertAllClose([0.0, 0.0], v1_val)
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([0.0, 0.0], v0_val)
|
||||
self.assertAllClose([0.0, 0.0], v1_val)
|
||||
# Run 3 steps FTRL
|
||||
for _ in range(3):
|
||||
update.run()
|
||||
|
||||
# Run 3 steps FTRL
|
||||
for _ in range(3):
|
||||
update.run()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-2.60260963, -4.29698515]), v0_val, half_rtol=1e-2)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.28432083, -0.56694895]), v1_val)
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-2.60260963, -4.29698515]), v0_val, half_rtol=1e-2)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.28432083, -0.56694895]), v1_val)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFtrlWithoutRegularization(self):
|
||||
self.doTestFtrlwithoutRegularization(use_resource=False)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testResourceFtrlWithoutRegularization(self):
|
||||
self.doTestFtrlwithoutRegularization(use_resource=True)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFtrlwithoutRegularization2(self):
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session() as sess:
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
|
||||
# Run 3 steps FTRL
|
||||
for _ in range(3):
|
||||
update.run()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-2.55607247, -3.98729396]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.28232238, -0.56096673]), v1_val)
|
||||
# Run 3 steps FTRL
|
||||
for _ in range(3):
|
||||
update.run()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-2.55607247, -3.98729396]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.28232238, -0.56096673]), v1_val)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testMinimizeSparseResourceVariable(self):
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with self.cached_session():
|
||||
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
|
||||
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
|
||||
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
|
||||
loss = pred * pred
|
||||
sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss)
|
||||
variables.global_variables_initializer().run()
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
sgd_op.run()
|
||||
# Validate updated params
|
||||
self.assertAllCloseAccordingToType([[0, 1]],
|
||||
self.evaluate(var0),
|
||||
atol=0.01)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
|
||||
with self.cached_session():
|
||||
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]],
|
||||
dtype=dtype)
|
||||
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
|
||||
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
|
||||
loss = pred * pred
|
||||
sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss)
|
||||
variables.global_variables_initializer().run()
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
sgd_op.run()
|
||||
# Validate updated params
|
||||
self.assertAllCloseAccordingToType([[0, 1]],
|
||||
self.evaluate(var0),
|
||||
atol=0.01)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFtrlWithL1(self):
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session() as sess:
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-7.66718769, -10.91273689]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.93460727, -1.86147261]), v1_val)
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-7.66718769, -10.91273689]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.93460727, -1.86147261]), v1_val)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFtrlWithL1_L2(self):
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session() as sess:
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.24059935, -0.46829352]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.02406147, -0.04830509]), v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.24059935, -0.46829352]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.02406147, -0.04830509]), v1_val)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFtrlWithL1_L2_L2Shrinkage(self):
|
||||
"""Test the new FTRL op with support for l2 shrinkage.
|
||||
|
||||
@ -196,113 +200,117 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
towards the origin causes the gradient descent trajectory to differ. The
|
||||
weights will tend to have smaller magnitudes with this parameter set.
|
||||
"""
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session() as sess:
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
|
||||
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.22578995, -0.44345796]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.14378493, -0.13229476]), v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.22578995, -0.44345796]), v0_val)
|
||||
self.assertAllCloseAccordingToType(
|
||||
np.array([-0.14378493, -0.13229476]), v1_val)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFtrlWithL1_L2_L2ShrinkageSparse(self):
|
||||
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session() as sess:
|
||||
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
|
||||
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
|
||||
grads0 = ops.IndexedSlices(
|
||||
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
|
||||
constant_op.constant([0]), constant_op.constant([2, 1]))
|
||||
grads1 = ops.IndexedSlices(
|
||||
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
|
||||
constant_op.constant([1]), constant_op.constant([2, 1]))
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
|
||||
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
|
||||
grads0 = ops.IndexedSlices(
|
||||
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
|
||||
constant_op.constant([0]), constant_op.constant([2, 1]))
|
||||
grads1 = ops.IndexedSlices(
|
||||
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
|
||||
constant_op.constant([1]), constant_op.constant([2, 1]))
|
||||
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
opt = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
|
||||
self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
|
||||
self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)
|
||||
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update.run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val)
|
||||
self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val)
|
||||
self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
|
||||
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session() as sess:
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
|
||||
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
|
||||
|
||||
opt0 = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
opt1 = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update0 = opt0.apply_gradients([(grads0, var0)])
|
||||
update1 = opt1.apply_gradients([(grads1, var1)])
|
||||
variables.global_variables_initializer().run()
|
||||
opt0 = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
opt1 = ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update0 = opt0.apply_gradients([(grads0, var0)])
|
||||
update1 = opt1.apply_gradients([(grads1, var1)])
|
||||
variables.global_variables_initializer().run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)
|
||||
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update0.run()
|
||||
update1.run()
|
||||
# Run 10 steps FTRL
|
||||
for _ in range(10):
|
||||
update0.run()
|
||||
update1.run()
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
# var0 is experiencing L2 shrinkage so it should be smaller than var1
|
||||
# in magnitude.
|
||||
self.assertTrue((v0_val**2 < v1_val**2).all())
|
||||
accum0 = list(self.evaluate(opt0._slots)["accum"].values())[0]
|
||||
accum1 = list(self.evaluate(opt1._slots)["accum"].values())[0]
|
||||
# L2 shrinkage should not change how we update grad accumulator.
|
||||
self.assertAllCloseAccordingToType(accum0, accum1)
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
# var0 is experiencing L2 shrinkage so it should be smaller than var1
|
||||
# in magnitude.
|
||||
self.assertTrue((v0_val**2 < v1_val**2).all())
|
||||
accum0 = list(self.evaluate(opt0._slots)["accum"].values())[0]
|
||||
accum1 = list(self.evaluate(opt1._slots)["accum"].values())[0]
|
||||
# L2 shrinkage should not change how we update grad accumulator.
|
||||
self.assertAllCloseAccordingToType(accum0, accum1)
|
||||
|
||||
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
|
||||
if is_sparse:
|
||||
@ -346,95 +354,100 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
# with Adagrad.
|
||||
# So, basing on these two properties, we test if our implementation of
|
||||
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
|
||||
@test_util.run_deprecated_v1
|
||||
def testEquivAdagradwithoutRegularization(self):
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Adagrad learning rate
|
||||
learning_rate_power=-0.5,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Adagrad learning rate
|
||||
learning_rate_power=-0.5,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype)
|
||||
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
|
||||
dtype)
|
||||
|
||||
self.assertAllCloseAccordingToType(val0, val2, half_rtol=2e-3)
|
||||
self.assertAllCloseAccordingToType(val1, val3, half_rtol=2e-3)
|
||||
self.assertAllCloseAccordingToType(val0, val2, half_rtol=2e-3)
|
||||
self.assertAllCloseAccordingToType(val1, val3, half_rtol=2e-3)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEquivSparseAdagradwithoutRegularization(self):
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Adagrad learning rate
|
||||
learning_rate_power=-0.5,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Adagrad learning rate
|
||||
learning_rate_power=-0.5,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
|
||||
self.assertAllCloseAccordingToType(val0, val2)
|
||||
self.assertAllCloseAccordingToType(val1, val3)
|
||||
self.assertAllCloseAccordingToType(val0, val2)
|
||||
self.assertAllCloseAccordingToType(val1, val3)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEquivSparseGradientDescentwithoutRegularization(self):
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Fixed learning rate
|
||||
learning_rate_power=-0.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Fixed learning rate
|
||||
learning_rate_power=-0.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
gradient_descent.GradientDescentOptimizer(3.0),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
gradient_descent.GradientDescentOptimizer(3.0),
|
||||
dtype,
|
||||
is_sparse=True)
|
||||
|
||||
self.assertAllCloseAccordingToType(val0, val2)
|
||||
self.assertAllCloseAccordingToType(val1, val3)
|
||||
self.assertAllCloseAccordingToType(val0, val2)
|
||||
self.assertAllCloseAccordingToType(val1, val3)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEquivGradientDescentwithoutRegularization(self):
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Fixed learning rate
|
||||
learning_rate_power=-0.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype)
|
||||
# The v1 optimizers do not support eager execution
|
||||
with ops.Graph().as_default():
|
||||
for dtype in [dtypes.half, dtypes.float32]:
|
||||
with self.cached_session():
|
||||
val0, val1 = self.applyOptimizer(
|
||||
ftrl.FtrlOptimizer(
|
||||
3.0,
|
||||
# Fixed learning rate
|
||||
learning_rate_power=-0.0,
|
||||
initial_accumulator_value=0.1,
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0),
|
||||
dtype)
|
||||
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
gradient_descent.GradientDescentOptimizer(3.0), dtype)
|
||||
with self.cached_session():
|
||||
val2, val3 = self.applyOptimizer(
|
||||
gradient_descent.GradientDescentOptimizer(3.0), dtype)
|
||||
|
||||
self.assertAllCloseAccordingToType(val0, val2)
|
||||
self.assertAllCloseAccordingToType(val1, val3)
|
||||
self.assertAllCloseAccordingToType(val0, val2)
|
||||
self.assertAllCloseAccordingToType(val1, val3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
Loading…
x
Reference in New Issue
Block a user