Fixed some minor bugs that blocked a learning rate of exactly zero for FTRL.

PiperOrigin-RevId: 335970687
Change-Id: I3b2bb3b8c3b567ab7817bd68e72a75fd55278633
This commit is contained in:
A. Unique TensorFlower 2020-10-07 15:57:00 -07:00 committed by TensorFlower Gardener
parent e88f6739a7
commit ed6992c14c
2 changed files with 9 additions and 6 deletions

View File

@ -2615,11 +2615,14 @@ class SparseApplyFtrlOp : public OpKernel {
errors::InvalidArgument("indices must be one-dimensional"));
const Tensor& lr = ctx->input(5);
OP_REQUIRES(ctx,
TensorShapeUtils::IsScalar(lr.shape()) &&
lr.scalar<T>()() > static_cast<T>(0),
errors::InvalidArgument("lr is not a positive scalar: ",
lr.shape().DebugString()));
OP_REQUIRES(
ctx,
TensorShapeUtils::IsScalar(lr.shape()) &&
(lr.scalar<T>()() > static_cast<T>(0) ||
(multiply_linear_by_lr_ && lr.scalar<T>()() >= static_cast<T>(0))),
errors::InvalidArgument("lr is not a positive scalar (or zero if "
"multiply_linear_by_lr is set): ",
lr.shape().DebugString()));
const Tensor& l1 = ctx->input(6);
OP_REQUIRES(ctx,

View File

@ -149,7 +149,7 @@ class FtrlOptimizer(optimizer.Optimizer):
# TensorFlow ops do not need to include that parameter.
self._adjusted_l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength + self._beta /
(2. * self._learning_rate),
(2. * math_ops.maximum(self._learning_rate, 1e-36)),
name="adjusted_l2_regularization_strength")
assert self._adjusted_l2_regularization_strength_tensor is not None
self._beta_tensor = ops.convert_to_tensor(self._beta, name="beta")