From 0ef9935e9ffb3b851f9f097530bfc1a72261e9b2 Mon Sep 17 00:00:00 2001 From: amturati Date: Fri, 4 Sep 2020 18:09:36 +0000 Subject: [PATCH] fixing style nits in gradient checker and gradients_util --- tensorflow/c/eager/gradient_checker.cc | 15 +++++++-------- tensorflow/c/eager/gradient_checker.h | 6 +----- tensorflow/c/eager/gradient_checker_test.cc | 8 +++++--- tensorflow/c/eager/gradients_util.h | 18 +----------------- 4 files changed, 14 insertions(+), 33 deletions(-) diff --git a/tensorflow/c/eager/gradient_checker.cc b/tensorflow/c/eager/gradient_checker.cc index 306666f66b9..2ca028ad865 100644 --- a/tensorflow/c/eager/gradient_checker.cc +++ b/tensorflow/c/eager/gradient_checker.cc @@ -106,10 +106,9 @@ Status RunAndMaybeSum(AbstractContext* ctx, Model forward, // ========================= End Helper Functions============================== Status CalcNumericalGrad(AbstractContext* ctx, Model forward, - std::vector inputs, + absl::Span inputs, int input_index, bool use_function, AbstractTensorHandle** numerical_grad) { - GradientRegistry registry; AbstractTensorHandle* theta = inputs[input_index]; // parameter we are grad checking @@ -123,15 +122,15 @@ Status CalcNumericalGrad(AbstractContext* ctx, Model forward, memcpy(&theta_data[0], TF_TensorData(theta_tensor), TF_TensorByteSize(theta_tensor)); - // Initialize space for the numerical gradient + // Initialize space for the numerical gradient. float dtheta_approx[num_elems]; - // Get theta shape and store in theta_dims + // Get theta shape and store in theta_dims. int num_dims = TF_NumDims(theta_tensor); int64_t theta_dims[num_dims]; GetDims(theta_tensor, theta_dims); - // Initialize auxilary data structures + // Initialize auxilary data structures. float thetaPlus_data[num_elems]; float thetaMinus_data[num_elems]; std::vector f_outputs(1); @@ -160,13 +159,13 @@ Status CalcNumericalGrad(AbstractContext* ctx, Model forward, // Get f(theta + eps): inputs[input_index] = thetaPlus.get(); - TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, absl::MakeSpan(inputs), + TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, inputs, absl::MakeSpan(f_outputs), use_function)); AbstractTensorHandle* fPlus = f_outputs[0]; // Get f(theta - eps): inputs[input_index] = thetaMinus.get(); - TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, absl::MakeSpan(inputs), + TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, inputs, absl::MakeSpan(f_outputs), use_function)); AbstractTensorHandle* fMinus = f_outputs[0]; @@ -191,7 +190,7 @@ Status CalcNumericalGrad(AbstractContext* ctx, Model forward, dtheta_approx[i] = grad_data[0]; } - // Populate *numerical_grad with the data from dtheta_approx + // Populate *numerical_grad with the data from dtheta_approx. TF_RETURN_IF_ERROR(TensorHandleWithDimsFloat(ctx, dtheta_approx, theta_dims, num_dims, numerical_grad)); return Status::OK(); diff --git a/tensorflow/c/eager/gradient_checker.h b/tensorflow/c/eager/gradient_checker.h index 5462b8e8686..8497f5af48e 100644 --- a/tensorflow/c/eager/gradient_checker.h +++ b/tensorflow/c/eager/gradient_checker.h @@ -33,10 +33,6 @@ limitations under the License. namespace tensorflow { namespace gradients { -using Model = std::function, - absl::Span, const GradientRegistry&)>; - /* Returns numerical grad inside `dtheta_approx` given `forward` model and * parameter specified by `input_index`. * @@ -49,7 +45,7 @@ using Model = std::function inputs, + absl::Span inputs, int input_index, bool use_function, AbstractTensorHandle** numerical_grad); diff --git a/tensorflow/c/eager/gradient_checker_test.cc b/tensorflow/c/eager/gradient_checker_test.cc index 496ee2fc1f5..fa7f7285c0a 100644 --- a/tensorflow/c/eager/gradient_checker_test.cc +++ b/tensorflow/c/eager/gradient_checker_test.cc @@ -81,7 +81,7 @@ TEST_P(GradientCheckerTest, TestGradCheckMatMul) { AbstractTensorHandle* grad_approx; Status s = CalcNumericalGrad( - ctx.get(), MatMulModel, inputs, /*input_index=*/0, + ctx.get(), MatMulModel, absl::MakeSpan(inputs), /*input_index=*/0, /*use_function=*/!std::get<2>(GetParam()), &grad_approx); ASSERT_EQ(errors::OK, s.code()) << s.error_message(); @@ -136,7 +136,8 @@ TEST_P(GradientCheckerTest, TestGradCheckMul) { float dapprox[1] = {0}; AbstractTensorHandle* g; - Status s = CalcNumericalGrad(ctx.get(), MulModel, inputs, /*input_index=*/0, + Status s = CalcNumericalGrad(ctx.get(), MulModel, absl::MakeSpan(inputs), + /*input_index=*/0, /*use_function=*/!std::get<2>(GetParam()), &g); ASSERT_EQ(errors::OK, s.code()) << s.error_message(); @@ -213,7 +214,8 @@ TEST_P(GradientCheckerTest, TestGradCheckSoftmax) { // Run numerical gradient approximation using the GradientChecker API. AbstractTensorHandle* g; // Will contain numerical approximation data. - s = CalcNumericalGrad(ctx.get(), SoftmaxModel, inputs, /*input_index=*/0, + s = CalcNumericalGrad(ctx.get(), SoftmaxModel, absl::MakeSpan(inputs), + /*input_index=*/0, /*use_function=*/!std::get<2>(GetParam()), &g); ASSERT_EQ(errors::OK, s.code()) << s.error_message(); diff --git a/tensorflow/c/eager/gradients_util.h b/tensorflow/c/eager/gradients_util.h index e10a089f413..3489a5b370b 100644 --- a/tensorflow/c/eager/gradients_util.h +++ b/tensorflow/c/eager/gradients_util.h @@ -35,15 +35,6 @@ limitations under the License. namespace tensorflow { namespace gradients { -TFE_TensorHandle* ScalarTensorHandleHelper(TFE_Context* ctx, float value); - -TFE_TensorHandle* TensorHandleWithDimsFloatHelper(TFE_Context* ctx, - float data[], int64_t dims[], - int num_dims); - -TFE_TensorHandle* TensorHandleWithDimsIntHelper(TFE_Context* ctx, int data[], - int64_t dims[], int num_dims); - // Get a scalar TensorHandle with given value Status ScalarTensorHandle(AbstractContext* ctx, float value, AbstractTensorHandle** tensor); @@ -79,14 +70,6 @@ Status UpdateWeights(AbstractContext* ctx, std::vector& weights, AbstractTensorHandle* learning_rate); -// Helper function for RunModel to build the function for graph mode. -AbstractContext* BuildFunction(const char* fn_name); - -// Helper function for RunModel to add params for graph mode. -Status CreateParamsForInputs(AbstractContext* ctx, - absl::Span inputs, - std::vector* params); - using Model = std::function, absl::Span, const GradientRegistry&)>; @@ -98,6 +81,7 @@ Status RunModel(Model model, AbstractContext* ctx, absl::Span outputs, bool use_function, const GradientRegistry& registry); +// Builds context and returns inside *ctx. Status BuildImmediateExecutionContext(bool use_tfrt, AbstractContext** ctx); } // namespace gradients