diff --git a/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.cc b/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.cc index e39b1d6e1bd..915348b8e23 100644 --- a/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.cc +++ b/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.cc @@ -252,7 +252,7 @@ Status GetOptimizationAlgorithmStateVariables( StateVariableSpecification gradient_acc; gradient_acc.set_name("gradient_accumulators"); gradient_acc.mutable_fill_with_constant()->set_initial_value( - kGradientAccumulatorInitialValue); + GradientAccumulatorInitialValue()); state_variables->push_back(std::move(gradient_acc)); } if (state_variables->size() > kMaxAuxiliaryParameterCount + 1) { diff --git a/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.h b/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.h index e7516da8f39..320863d19be 100644 --- a/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.h +++ b/tensorflow/core/tpu/tpu_embedding_optimization_parameters_utils.h @@ -84,7 +84,9 @@ static constexpr int kMaxAuxiliaryParameterCount = 3; // not no-ops on zero gradients, so we need to distinguish an accumulated // gradient of zero from one that has been cleared after its gradients have // already been applied to the parameters and accumulators. -const float kGradientAccumulatorInitialValue = absl::bit_cast(1); +inline float GradientAccumulatorInitialValue() { + return absl::bit_cast(1); +} // Computes registration data for per table load Op. Each load Op transfers // the embedding parameters from the host memory to the TPU memory.