diff --git a/tensorflow/lite/kernels/internal/BUILD b/tensorflow/lite/kernels/internal/BUILD index bf329cd2d52..0f363f06a8c 100644 --- a/tensorflow/lite/kernels/internal/BUILD +++ b/tensorflow/lite/kernels/internal/BUILD @@ -175,7 +175,6 @@ cc_library( deps = [ ":types", "@gemmlowp//:fixedpoint", - "@gemmlowp//:profiler", ] + select({ ":haswell": tflite_deps_intel, ":ios_x86_64": tflite_deps_intel, diff --git a/tensorflow/lite/kernels/internal/common.h b/tensorflow/lite/kernels/internal/common.h index 71a306035c2..03d71f2b06e 100644 --- a/tensorflow/lite/kernels/internal/common.h +++ b/tensorflow/lite/kernels/internal/common.h @@ -46,7 +46,6 @@ limitations under the License. #endif #include "fixedpoint/fixedpoint.h" -#include "profiling/instrumentation.h" #include "tensorflow/lite/kernels/internal/types.h" namespace tflite { @@ -96,7 +95,6 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). // This turned out to severely regress performance: +4ms (i.e. 8%) on // MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now. - gemmlowp::ScopedProfilingLabel label("BiasAndClamp"); TFLITE_DCHECK_EQ((array_size % bias_size), 0); #ifdef USE_NEON float* array_ptr = array_data;