From c45a3444af2f81d005b12c15729cf53f35f8e438 Mon Sep 17 00:00:00 2001 From: Raman Sarokin Date: Fri, 12 Jun 2020 19:11:37 -0700 Subject: [PATCH] Compilation fixes for MSVC compiler. PiperOrigin-RevId: 316217725 Change-Id: I595eee325c6bd2ab253e710617c6a0cbaccb6aba --- .../lite/delegates/gpu/cl/kernels/space_to_depth.cc | 7 +------ .../delegates/gpu/cl/testing/performance_profiling.cc | 8 ++++---- tensorflow/lite/delegates/gpu/cl/util.h | 3 ++- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc b/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc index b763684516a..34227f6b887 100644 --- a/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc +++ b/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc @@ -66,12 +66,7 @@ std::string GetSpaceToDepthCode( c += " tmp[i] = t_ar[src_c % 4];\n"; c += " }\n"; c += " FLT4 result = (FLT4)(tmp[0], tmp[1], tmp[2], tmp[3]);\n"; - const LinkingContext context = { - .var_name = "result", - .x_coord = "X", - .y_coord = "Y", - .s_coord = "Z", - }; + const LinkingContext context{"result", "X", "Y", "Z"}; c += PostProcess(linked_operations, context); c += " " + dst_tensor.WriteWHS("result", "X", "Y", "Z"); c += "}\n"; diff --git a/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc b/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc index 75dcbc1d163..0c500cd0bbe 100644 --- a/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc +++ b/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include +#include // NOLINT(build/c++11) #include #include @@ -64,14 +65,13 @@ absl::Status RunModelSample(const std::string& model_name) { const int kNumRuns = 10; for (int i = 0; i < kNumRuns; ++i) { - const auto start = absl::Now(); + const auto start = std::chrono::high_resolution_clock::now(); for (int k = 0; k < num_runs_per_sec; ++k) { RETURN_IF_ERROR(context.AddToQueue(env.queue())); } RETURN_IF_ERROR(env.queue()->WaitForCompletion()); - const auto end = absl::Now(); - const double total_time_ms = - static_cast((end - start) / absl::Nanoseconds(1)) * 1e-6; + const auto end = std::chrono::high_resolution_clock::now(); + const double total_time_ms = (end - start).count() * 1e-6f; const double average_inference_time = total_time_ms / num_runs_per_sec; std::cout << "Total time - " << average_inference_time << "ms" << std::endl; } diff --git a/tensorflow/lite/delegates/gpu/cl/util.h b/tensorflow/lite/delegates/gpu/cl/util.h index 4b100a1b4b0..9435bb3a8a2 100644 --- a/tensorflow/lite/delegates/gpu/cl/util.h +++ b/tensorflow/lite/delegates/gpu/cl/util.h @@ -36,7 +36,8 @@ int ChannelTypeToSizeInBytes(cl_channel_type type); bool OpenCLSupported(); template -void CopyLinearFLT4(const Tensor& src, absl::Span dst) { +void CopyLinearFLT4(const tflite::gpu::Tensor& src, + absl::Span dst) { const int dst_depth = dst.size(); for (int d = 0; d < dst_depth; ++d) { T val;