From 3b3862869c2c79c378a4b9007db3b421eb9e07ed Mon Sep 17 00:00:00 2001 From: Jaehong Kim Date: Tue, 24 Nov 2020 10:55:02 -0800 Subject: [PATCH] Add bug for TODO on legacy toco flag PiperOrigin-RevId: 344091608 Change-Id: I496c3a6590f398727bec38b7c8b5a320cd745f96 --- .../internal/optimized/integer_ops/fully_connected.h | 2 +- .../lite/kernels/internal/reference/fully_connected.h | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h b/tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h index d234c5bb4a1..a07ab68d7b2 100644 --- a/tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h +++ b/tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h @@ -44,7 +44,7 @@ inline void FullyConnected( const int32 output_activation_max = params.quantized_activation_max; TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each diff --git a/tensorflow/lite/kernels/internal/reference/fully_connected.h b/tensorflow/lite/kernels/internal/reference/fully_connected.h index 39a9cd023d8..d5ad9d6736b 100644 --- a/tensorflow/lite/kernels/internal/reference/fully_connected.h +++ b/tensorflow/lite/kernels/internal/reference/fully_connected.h @@ -31,7 +31,7 @@ inline void FullyConnected( float* output_data) { const float output_activation_min = params.float_activation_min; const float output_activation_max = params.float_activation_max; - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each @@ -76,7 +76,7 @@ inline void FullyConnected( TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); TFLITE_DCHECK_LE(output_activation_min, output_activation_max); - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each @@ -123,7 +123,7 @@ inline void FullyConnected( TFLITE_DCHECK_LE(output_activation_min, output_activation_max); TFLITE_DCHECK_EQ(output_offset, 0); - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each @@ -176,7 +176,7 @@ inline void ShuffledFullyConnected( TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1); TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); - // TODO(benoitjacob): This really should be: + // TODO(b/62193649): This really should be: // const int batches = ArraySize(output_dims, 1); // but the current --variable_batch hack consists in overwriting the 3rd // dimension with the runtime batch size, as we don't keep track for each