diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD index d2d8996a011..d6b2e5f955a 100644 --- a/tensorflow/core/BUILD +++ b/tensorflow/core/BUILD @@ -2065,7 +2065,13 @@ cc_library( "//tensorflow/core/platform/default:logging.h", ], copts = tf_copts(), - linkopts = ["-ldl"], + linkopts = select({ + "//tensorflow:freebsd": [], + "//tensorflow:windows": [], + "//conditions:default": [ + "-ldl", + ], + }), visibility = ["//visibility:public"], deps = [ ":platform_base", diff --git a/tensorflow/lite/c/BUILD b/tensorflow/lite/c/BUILD index f9549fc3571..e1702d40d5a 100644 --- a/tensorflow/lite/c/BUILD +++ b/tensorflow/lite/c/BUILD @@ -87,6 +87,7 @@ cc_test( name = "c_api_test", size = "small", srcs = ["c_api_test.cc"], + copts = tflite_copts(), data = [ "//tensorflow/lite:testdata/add.bin", "//tensorflow/lite:testdata/add_quantized.bin", @@ -103,6 +104,7 @@ cc_test( name = "c_api_experimental_test", size = "small", srcs = ["c_api_experimental_test.cc"], + copts = tflite_copts(), data = ["//tensorflow/lite:testdata/add.bin"], deps = [ ":c_api", diff --git a/tensorflow/lite/c/c_api_experimental_test.cc b/tensorflow/lite/c/c_api_experimental_test.cc index 71a08b5af26..18bc7bb0397 100644 --- a/tensorflow/lite/c/c_api_experimental_test.cc +++ b/tensorflow/lite/c/c_api_experimental_test.cc @@ -25,11 +25,10 @@ namespace { TfLiteRegistration* GetDummyRegistration() { static TfLiteRegistration registration = { - .init = nullptr, - .free = nullptr, - .prepare = nullptr, - .invoke = [](TfLiteContext*, TfLiteNode*) { return kTfLiteOk; }, - }; + /*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, + /*invoke=*/[](TfLiteContext*, TfLiteNode*) { return kTfLiteOk; }}; return ®istration; } diff --git a/tensorflow/lite/kernels/expand_dims_test.cc b/tensorflow/lite/kernels/expand_dims_test.cc index eba5b88c42c..5bb1d76f00f 100644 --- a/tensorflow/lite/kernels/expand_dims_test.cc +++ b/tensorflow/lite/kernels/expand_dims_test.cc @@ -26,8 +26,8 @@ namespace { using ::testing::ElementsAreArray; enum class TestType { - CONST = 0, - DYNAMIC = 1, + kConst = 0, + kDynamic = 1, }; template @@ -36,7 +36,7 @@ class ExpandDimsOpModel : public SingleOpModel { ExpandDimsOpModel(int axis, std::initializer_list input_shape, std::initializer_list input_data, TestType input_tensor_types) { - if (input_tensor_types == TestType::DYNAMIC) { + if (input_tensor_types == TestType::kDynamic) { input_ = AddInput(GetTensorType()); axis_ = AddInput(TensorType_INT32); } else { @@ -50,7 +50,7 @@ class ExpandDimsOpModel : public SingleOpModel { BuildInterpreter({input_shape, {1}}); - if (input_tensor_types == TestType::DYNAMIC) { + if (input_tensor_types == TestType::kDynamic) { PopulateTensor(input_, input_data); PopulateTensor(axis_, {axis}); } @@ -69,18 +69,18 @@ class ExpandDimsOpModel : public SingleOpModel { template class ExpandDimsOpTest : public ::testing::Test { public: - static std::vector _range_; + static std::vector range_; }; template <> -std::vector ExpandDimsOpTest::_range_{TestType::CONST, - TestType::DYNAMIC}; +std::vector ExpandDimsOpTest::range_{TestType::kConst, + TestType::kDynamic}; using DataTypes = ::testing::Types; TYPED_TEST_SUITE(ExpandDimsOpTest, DataTypes); TYPED_TEST(ExpandDimsOpTest, PositiveAxis) { - for (TestType test_type : ExpandDimsOpTest::_range_) { + for (TestType test_type : ExpandDimsOpTest::range_) { std::initializer_list values = {-1, 1, -2, 2}; ExpandDimsOpModel axis_0(0, {2, 2}, values, test_type); @@ -101,7 +101,7 @@ TYPED_TEST(ExpandDimsOpTest, PositiveAxis) { } TYPED_TEST(ExpandDimsOpTest, NegativeAxis) { - for (TestType test_type : ExpandDimsOpTest::_range_) { + for (TestType test_type : ExpandDimsOpTest::range_) { std::initializer_list values = {-1, 1, -2, 2}; ExpandDimsOpModel m(-1, {2, 2}, values, test_type); @@ -115,7 +115,7 @@ TEST(ExpandDimsOpTest, StrTensor) { std::initializer_list values = {"abc", "de", "fghi"}; // this test will fail on TestType::CONST - ExpandDimsOpModel m(0, {3}, values, TestType::DYNAMIC); + ExpandDimsOpModel m(0, {3}, values, TestType::kDynamic); m.Invoke(); EXPECT_THAT(m.GetValues(), ElementsAreArray(values)); EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3})); diff --git a/tensorflow/lite/kernels/fully_connected_test.cc b/tensorflow/lite/kernels/fully_connected_test.cc index fbc02dd741d..6eda657f5bf 100644 --- a/tensorflow/lite/kernels/fully_connected_test.cc +++ b/tensorflow/lite/kernels/fully_connected_test.cc @@ -713,11 +713,13 @@ void SimpleTestQuantizedInt16OutputCase( /*activation_func=*/ActivationFunctionType_NONE, weights_format); std::mt19937 random_engine; - std::uniform_int_distribution weights_dist; + // Some compilers don't support uint8_t for uniform_distribution. + std::uniform_int_distribution weights_dist( + 0, std::numeric_limits::max()); std::vector weights_data(input_depth * output_depth); for (auto& w : weights_data) { - uint8_t q = weights_dist(random_engine); + uint8_t q = static_cast(weights_dist(random_engine)); w = (q - kWeightsZeroPoint) * kWeightsScale; } @@ -739,10 +741,12 @@ void SimpleTestQuantizedInt16OutputCase( LOG(FATAL) << "Unhandled weights format"; } - std::uniform_int_distribution input_dist; + // Some compilers don't support uint8_t for uniform_distribution. + std::uniform_int_distribution input_dist( + 0, std::numeric_limits::max()); std::vector input_data(input_depth * batches); for (auto& i : input_data) { - uint8_t q = input_dist(random_engine); + uint8_t q = static_cast(input_dist(random_engine)); i = (q - kInputZeroPoint) * kInputScale; } diff --git a/tensorflow/lite/kernels/internal/test_util.cc b/tensorflow/lite/kernels/internal/test_util.cc index 4462775ddbd..4971ed24feb 100644 --- a/tensorflow/lite/kernels/internal/test_util.cc +++ b/tensorflow/lite/kernels/internal/test_util.cc @@ -105,6 +105,7 @@ float ExponentialRandomPositiveFloat(float percentile, float percentile_val, void FillRandom(std::vector* vec, float min, float max) { std::uniform_real_distribution dist(min, max); + // TODO(b/154540105): use std::ref to avoid copying the random engine. auto gen = std::bind(dist, RandomEngine()); std::generate(std::begin(*vec), std::end(*vec), gen); } diff --git a/tensorflow/lite/kernels/internal/test_util.h b/tensorflow/lite/kernels/internal/test_util.h index 766a627c99e..6c9a341a79e 100644 --- a/tensorflow/lite/kernels/internal/test_util.h +++ b/tensorflow/lite/kernels/internal/test_util.h @@ -59,12 +59,22 @@ float ExponentialRandomPositiveFloat(float percentile, float percentile_val, // Fills a vector with random floats between |min| and |max|. void FillRandom(std::vector* vec, float min, float max); +template +void FillRandom(typename std::vector::iterator begin_it, + typename std::vector::iterator end_it, T min, T max) { + // Workaround for compilers that don't support (u)int8_t uniform_distribution. + typedef typename std::conditional= sizeof(int16_t), T, + std::int16_t>::type rand_type; + std::uniform_int_distribution dist(min, max); + // TODO(b/154540105): use std::ref to avoid copying the random engine. + auto gen = std::bind(dist, RandomEngine()); + std::generate(begin_it, end_it, [&gen] { return static_cast(gen()); }); +} + // Fills a vector with random numbers between |min| and |max|. template void FillRandom(std::vector* vec, T min, T max) { - std::uniform_int_distribution dist(min, max); - auto gen = std::bind(dist, RandomEngine()); - std::generate(std::begin(*vec), std::end(*vec), gen); + return FillRandom(std::begin(*vec), std::end(*vec), min, max); } // Fills a vector with random numbers. @@ -73,14 +83,6 @@ void FillRandom(std::vector* vec) { FillRandom(vec, std::numeric_limits::min(), std::numeric_limits::max()); } -template -void FillRandom(typename std::vector::iterator begin_it, - typename std::vector::iterator end_it, T min, T max) { - std::uniform_int_distribution dist(min, max); - auto gen = std::bind(dist, RandomEngine()); - std::generate(begin_it, end_it, gen); -} - // Fill with a "skyscraper" pattern, in which there is a central section (across // the depth) with higher values than the surround. template diff --git a/tensorflow/lite/kernels/resize_bilinear_test.cc b/tensorflow/lite/kernels/resize_bilinear_test.cc index 86dcaaefce0..5cbba026010 100644 --- a/tensorflow/lite/kernels/resize_bilinear_test.cc +++ b/tensorflow/lite/kernels/resize_bilinear_test.cc @@ -25,8 +25,8 @@ using ::testing::ElementsAreArray; using uint8 = std::uint8_t; enum class TestType { - CONST = 0, - DYNAMIC = 1, + kConst = 0, + kDynamic = 1, }; class ResizeBilinearOpModel : public SingleOpModel { @@ -35,7 +35,7 @@ class ResizeBilinearOpModel : public SingleOpModel { std::initializer_list size_data, TestType test_type, bool half_pixel_centers = false) { - bool const_size = (test_type == TestType::CONST); + bool const_size = (test_type == TestType::kConst); input_ = AddInput(input); if (const_size) { @@ -332,7 +332,7 @@ TEST_P(ResizeBilinearOpTest, ThreeDimensionalResizeInt8) { } INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTest, ResizeBilinearOpTest, - testing::Values(TestType::CONST, TestType::DYNAMIC)); + testing::Values(TestType::kConst, TestType::kDynamic)); } // namespace } // namespace tflite diff --git a/tensorflow/lite/kernels/resize_nearest_neighbor_test.cc b/tensorflow/lite/kernels/resize_nearest_neighbor_test.cc index e8170c9d45f..b894d3a74f4 100644 --- a/tensorflow/lite/kernels/resize_nearest_neighbor_test.cc +++ b/tensorflow/lite/kernels/resize_nearest_neighbor_test.cc @@ -25,8 +25,8 @@ using ::testing::ElementsAreArray; using uint8 = std::uint8_t; enum class TestType { - CONST = 0, - DYNAMIC = 1, + kConst = 0, + kDynamic = 1, }; class ResizeNearestNeighborOpModel : public SingleOpModel { @@ -34,7 +34,7 @@ class ResizeNearestNeighborOpModel : public SingleOpModel { explicit ResizeNearestNeighborOpModel(const TensorData& input, std::initializer_list size_data, TestType test_type) { - bool const_size = (test_type == TestType::CONST); + bool const_size = (test_type == TestType::kConst); input_ = AddInput(input); if (const_size) { @@ -264,7 +264,7 @@ TEST_P(ResizeNearestNeighborOpTest, ThreeDimensionalResizeInt8) { } INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpTest, ResizeNearestNeighborOpTest, - testing::Values(TestType::CONST, TestType::DYNAMIC)); + testing::Values(TestType::kConst, TestType::kDynamic)); } // namespace } // namespace tflite diff --git a/tensorflow/lite/kernels/slice_test.cc b/tensorflow/lite/kernels/slice_test.cc index b372aece52e..1a31ae44a5d 100644 --- a/tensorflow/lite/kernels/slice_test.cc +++ b/tensorflow/lite/kernels/slice_test.cc @@ -24,8 +24,8 @@ namespace { using ::testing::ElementsAreArray; enum class TestType { - CONST = 0, - DYNAMIC = 1, + kConst = 0, + kDynamic = 1, }; template @@ -39,7 +39,7 @@ class SliceOpModel : public SingleOpModel { TensorType tensor_index_type, TensorType tensor_input_type, TestType input_tensor_types) { input_ = AddInput(tensor_input_type); - if (input_tensor_types == TestType::DYNAMIC) { + if (input_tensor_types == TestType::kDynamic) { begin_ = AddInput(tensor_index_type); size_ = AddInput(tensor_index_type); } else { @@ -52,7 +52,7 @@ class SliceOpModel : public SingleOpModel { CreateSliceOptions(builder_).Union()); BuildInterpreter({input_shape, begin_shape, size_shape}); - if (input_tensor_types == TestType::DYNAMIC) { + if (input_tensor_types == TestType::kDynamic) { PopulateTensor(begin_, begin_data); PopulateTensor(size_, size_data); } @@ -239,7 +239,8 @@ TEST_P(SliceOpTest, SliceString) { } INSTANTIATE_TEST_SUITE_P(SliceOpTest, SliceOpTest, - ::testing::Values(TestType::CONST, TestType::DYNAMIC)); + ::testing::Values(TestType::kConst, + TestType::kDynamic)); } // namespace } // namespace tflite diff --git a/tensorflow/lite/kernels/split_test.cc b/tensorflow/lite/kernels/split_test.cc index 48c7a0afdf2..7952396880c 100644 --- a/tensorflow/lite/kernels/split_test.cc +++ b/tensorflow/lite/kernels/split_test.cc @@ -26,8 +26,8 @@ using ::testing::ElementsAreArray; constexpr int kAxisIsATensor = -1000; enum class TestType { - CONST = 0, - DYNAMIC = 1, + kConst = 0, + kDynamic = 1, }; class SplitOpModel : public SingleOpModel { @@ -83,7 +83,7 @@ void Check(TestType test_type, int axis, int num_splits, << " and num_splits=" << num_splits; return ss.str(); }; - if (test_type == TestType::DYNAMIC) { + if (test_type == TestType::kDynamic) { SplitOpModel m({type, input_shape}, num_splits); m.SetInput(input_data); m.SetAxis(axis); @@ -110,18 +110,18 @@ void Check(TestType test_type, int axis, int num_splits, template class SplitOpTest : public ::testing::Test { public: - static std::vector _range_; + static std::vector range_; }; template <> -std::vector SplitOpTest::_range_{TestType::CONST, - TestType::DYNAMIC}; +std::vector SplitOpTest::range_{TestType::kConst, + TestType::kDynamic}; using DataTypes = ::testing::Types; TYPED_TEST_SUITE(SplitOpTest, DataTypes); TYPED_TEST(SplitOpTest, FourDimensional) { - for (TestType test_type : SplitOpTest::_range_) { + for (TestType test_type : SplitOpTest::range_) { Check(/*axis_as_tensor*/ test_type, /*axis=*/0, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, @@ -158,7 +158,7 @@ TYPED_TEST(SplitOpTest, FourDimensional) { } TYPED_TEST(SplitOpTest, FourDimensionalInt8) { - for (TestType test_type : SplitOpTest::_range_) { + for (TestType test_type : SplitOpTest::range_) { Check(/*axis_as_tensor*/ test_type, /*axis=*/0, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, @@ -195,7 +195,7 @@ TYPED_TEST(SplitOpTest, FourDimensionalInt8) { } TYPED_TEST(SplitOpTest, FourDimensionalInt32) { - for (TestType test_type : SplitOpTest::_range_) { + for (TestType test_type : SplitOpTest::range_) { Check(/*axis_as_tensor*/ test_type, /*axis=*/0, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, @@ -232,7 +232,7 @@ TYPED_TEST(SplitOpTest, FourDimensionalInt32) { } TYPED_TEST(SplitOpTest, OneDimensional) { - for (TestType test_type : SplitOpTest::_range_) { + for (TestType test_type : SplitOpTest::range_) { Check( /*axis_as_tensor*/ test_type, /*axis=*/0, /*num_splits=*/8, {8}, {1}, {1, 2, 3, 4, 5, 6, 7, 8}, @@ -241,7 +241,7 @@ TYPED_TEST(SplitOpTest, OneDimensional) { } TYPED_TEST(SplitOpTest, NegativeAxis) { - for (TestType test_type : SplitOpTest::_range_) { + for (TestType test_type : SplitOpTest::range_) { Check(/*axis_as_tensor*/ test_type, /*axis=*/-4, /*num_splits=*/2, {2, 2, 2, 2}, {1, 2, 2, 2}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, diff --git a/tensorflow/lite/kernels/topk_v2_test.cc b/tensorflow/lite/kernels/topk_v2_test.cc index c82e5a66d5b..72ed82c1449 100644 --- a/tensorflow/lite/kernels/topk_v2_test.cc +++ b/tensorflow/lite/kernels/topk_v2_test.cc @@ -26,8 +26,8 @@ namespace { using ::testing::ElementsAreArray; enum class TestType { - CONST = 0, - DYNAMIC = 1, + kConst = 0, + kDynamic = 1, }; template @@ -36,7 +36,7 @@ class TopKV2OpModel : public SingleOpModel { TopKV2OpModel(int top_k, std::initializer_list input_shape, std::initializer_list input_data, TestType input_tensor_types) { - if (input_tensor_types == TestType::DYNAMIC) { + if (input_tensor_types == TestType::kDynamic) { input_ = AddInput(GetTensorType()); top_k_ = AddInput(TensorType_INT32); } else { @@ -49,7 +49,7 @@ class TopKV2OpModel : public SingleOpModel { SetBuiltinOp(BuiltinOperator_TOPK_V2, BuiltinOptions_TopKV2Options, 0); BuildInterpreter({input_shape, {1}}); - if (input_tensor_types == TestType::DYNAMIC) { + if (input_tensor_types == TestType::kDynamic) { PopulateTensor(input_, input_data); PopulateTensor(top_k_, {top_k}); } @@ -119,7 +119,8 @@ TEST_P(TopKV2OpTest, TypeInt32) { } INSTANTIATE_TEST_SUITE_P(TopKV2OpTest, TopKV2OpTest, - ::testing::Values(TestType::CONST, TestType::DYNAMIC)); + ::testing::Values(TestType::kConst, + TestType::kDynamic)); // Check that uint8_t works. TEST_P(TopKV2OpTest, TypeUint8) { diff --git a/tensorflow/lite/kernels/transpose_conv_test.cc b/tensorflow/lite/kernels/transpose_conv_test.cc index 1851c01bb59..8f74a943b53 100644 --- a/tensorflow/lite/kernels/transpose_conv_test.cc +++ b/tensorflow/lite/kernels/transpose_conv_test.cc @@ -37,8 +37,8 @@ namespace { using ::testing::ElementsAreArray; enum class TestType { - CONST = 0, - DYNAMIC = 1, + kConst = 0, + kDynamic = 1, }; template @@ -54,7 +54,7 @@ class BaseTransposeConvOpModel : public SingleOpModel { // Just to be confusing, transpose_conv has an _input_ named "output_shape" // that sets the shape of the output tensor of the op :). It must always be // an int32 1D four element tensor. - if (test_type == TestType::DYNAMIC) { + if (test_type == TestType::kDynamic) { output_shape_ = AddInput({TensorType_INT32, {4}}); filter_ = AddInput(filter); } else { @@ -74,7 +74,7 @@ class BaseTransposeConvOpModel : public SingleOpModel { BuildInterpreter( {GetShape(output_shape_), GetShape(filter_), GetShape(input_)}); - if (test_type == TestType::DYNAMIC) { + if (test_type == TestType::kDynamic) { PopulateTensor(output_shape_, output_shape_data); PopulateTensor(filter_, filter_data); } @@ -445,7 +445,7 @@ INSTANTIATE_TEST_SUITE_P( TransposeConvOpTest, TransposeConvOpTest, ::testing::Combine( ::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)), - ::testing::Values(TestType::CONST, TestType::DYNAMIC))); + ::testing::Values(TestType::kConst, TestType::kDynamic))); } // namespace } // namespace tflite diff --git a/tensorflow/lite/tools/benchmark/benchmark_model.h b/tensorflow/lite/tools/benchmark/benchmark_model.h index 0aca42dc200..912e54ff385 100644 --- a/tensorflow/lite/tools/benchmark/benchmark_model.h +++ b/tensorflow/lite/tools/benchmark/benchmark_model.h @@ -161,7 +161,7 @@ Flag CreateFlag(const char* name, BenchmarkParams* params, const std::string& usage) { return Flag( name, [params, name](const T& val) { params->Set(name, val); }, - params->Get(name), usage, Flag::OPTIONAL); + params->Get(name), usage, Flag::kOptional); } // Benchmarks a model. diff --git a/tensorflow/lite/tools/benchmark/delegate_provider.h b/tensorflow/lite/tools/benchmark/delegate_provider.h index 6090b7f6ee8..5a635c8f3c7 100644 --- a/tensorflow/lite/tools/benchmark/delegate_provider.h +++ b/tensorflow/lite/tools/benchmark/delegate_provider.h @@ -58,7 +58,7 @@ class DelegateProvider { const std::string& usage) const { return Flag( name, [params, name](const T& val) { params->Set(name, val); }, - default_params_.Get(name), usage, Flag::OPTIONAL); + default_params_.Get(name), usage, Flag::kOptional); } BenchmarkParams default_params_; }; diff --git a/tensorflow/lite/tools/command_line_flags.cc b/tensorflow/lite/tools/command_line_flags.cc index 0db2d53df5a..92ddb1622c6 100644 --- a/tensorflow/lite/tools/command_line_flags.cc +++ b/tensorflow/lite/tools/command_line_flags.cc @@ -142,7 +142,7 @@ Flag::Flag(const char* name, flag_type_(flag_type) {} bool Flag::Parse(const std::string& arg, bool* value_parsing_ok) const { - return ParseFlag(arg, name_, flag_type_ == POSITIONAL, value_hook_, + return ParseFlag(arg, name_, flag_type_ == kPositional, value_hook_, value_parsing_ok); } @@ -195,7 +195,7 @@ std::string Flag::GetTypeName() const { result = false; } continue; - } else if (flag.flag_type_ == Flag::REQUIRED) { + } else if (flag.flag_type_ == Flag::kRequired) { TFLITE_LOG(ERROR) << "Required flag not provided: " << flag.name_; // If the required flag isn't found, we immediately stop the whole flag // parsing. @@ -205,7 +205,7 @@ std::string Flag::GetTypeName() const { } // Parses positional flags. - if (flag.flag_type_ == Flag::POSITIONAL) { + if (flag.flag_type_ == Flag::kPositional) { if (++positional_count >= *argc) { TFLITE_LOG(ERROR) << "Too few command line arguments."; return false; @@ -245,7 +245,7 @@ std::string Flag::GetTypeName() const { // The flag isn't found, do some bookkeeping work. processed_flags[flag.name_] = -1; - if (flag.flag_type_ == Flag::REQUIRED) { + if (flag.flag_type_ == Flag::kRequired) { TFLITE_LOG(ERROR) << "Required flag not provided: " << flag.name_; result = false; // If the required flag isn't found, we immediately stop the whole flag @@ -280,7 +280,7 @@ std::string Flag::GetTypeName() const { // Prints usage for positional flag. for (int i = 0; i < sorted_idx.size(); ++i) { const Flag& flag = flag_list[sorted_idx[i]]; - if (flag.flag_type_ == Flag::POSITIONAL) { + if (flag.flag_type_ == Flag::kPositional) { positional_count++; usage_text << " <" << flag.name_ << ">"; } else { @@ -295,7 +295,7 @@ std::string Flag::GetTypeName() const { std::vector name_column(flag_list.size()); for (int i = 0; i < sorted_idx.size(); ++i) { const Flag& flag = flag_list[sorted_idx[i]]; - if (flag.flag_type_ != Flag::POSITIONAL) { + if (flag.flag_type_ != Flag::kPositional) { name_column[i] += "--"; name_column[i] += flag.name_; name_column[i] += "="; @@ -320,7 +320,8 @@ std::string Flag::GetTypeName() const { usage_text << "\t"; usage_text << std::left << std::setw(max_name_width) << name_column[i]; usage_text << "\t" << type_name << "\t"; - usage_text << (flag.flag_type_ != Flag::OPTIONAL ? "required" : "optional"); + usage_text << (flag.flag_type_ != Flag::kOptional ? "required" + : "optional"); usage_text << "\t" << flag.usage_text_ << "\n"; } return usage_text.str(); diff --git a/tensorflow/lite/tools/command_line_flags.h b/tensorflow/lite/tools/command_line_flags.h index 941a1b8b59a..95e64a19e18 100644 --- a/tensorflow/lite/tools/command_line_flags.h +++ b/tensorflow/lite/tools/command_line_flags.h @@ -65,16 +65,16 @@ namespace tflite { class Flag { public: enum FlagType { - POSITIONAL = 0, - REQUIRED, - OPTIONAL, + kPositional = 0, + kRequired, + kOptional, }; // The order of the positional flags is the same as they are added. // Positional flags are supposed to be required. template static Flag CreateFlag(const char* name, T* val, const char* usage, - FlagType flag_type = OPTIONAL) { + FlagType flag_type = kOptional) { return Flag( name, [val](const T& v) { *val = v; }, *val, usage, flag_type); } diff --git a/tensorflow/lite/tools/command_line_flags_test.cc b/tensorflow/lite/tools/command_line_flags_test.cc index eb02379143f..0216d7a0636 100644 --- a/tensorflow/lite/tools/command_line_flags_test.cc +++ b/tensorflow/lite/tools/command_line_flags_test.cc @@ -55,8 +55,10 @@ TEST(CommandLineFlagsTest, BasicUsage) { Flag::CreateFlag("some_numeric_bool", &some_numeric_bool, "some numeric bool"), Flag::CreateFlag("some_int1", &some_int1, "some int"), - Flag::CreateFlag("some_int2", &some_int2, "some int", Flag::REQUIRED), - Flag::CreateFlag("float_1", &float_1, "some float", Flag::POSITIONAL), + Flag::CreateFlag("some_int2", &some_int2, "some int", + Flag::kRequired), + Flag::CreateFlag("float_1", &float_1, "some float", + Flag::kPositional), }); EXPECT_TRUE(parsed_ok); @@ -131,7 +133,7 @@ TEST(CommandLineFlagsTest, RequiredFlagNotFound) { const char* argv_strings[] = {"program_name", "--flag=12"}; bool parsed_ok = Flags::Parse( &argc, reinterpret_cast(argv_strings), - {Flag::CreateFlag("some_flag", &some_float, "", Flag::REQUIRED)}); + {Flag::CreateFlag("some_flag", &some_float, "", Flag::kRequired)}); EXPECT_FALSE(parsed_ok); EXPECT_NEAR(-23.23f, some_float, 1e-5f); @@ -144,7 +146,7 @@ TEST(CommandLineFlagsTest, NoArguments) { const char* argv_strings[] = {"program_name"}; bool parsed_ok = Flags::Parse( &argc, reinterpret_cast(argv_strings), - {Flag::CreateFlag("some_flag", &some_float, "", Flag::REQUIRED)}); + {Flag::CreateFlag("some_flag", &some_float, "", Flag::kRequired)}); EXPECT_FALSE(parsed_ok); EXPECT_NEAR(-23.23f, some_float, 1e-5f); @@ -157,7 +159,7 @@ TEST(CommandLineFlagsTest, NotEnoughArguments) { const char* argv_strings[] = {"program_name"}; bool parsed_ok = Flags::Parse( &argc, reinterpret_cast(argv_strings), - {Flag::CreateFlag("some_flag", &some_float, "", Flag::POSITIONAL)}); + {Flag::CreateFlag("some_flag", &some_float, "", Flag::kPositional)}); EXPECT_FALSE(parsed_ok); EXPECT_NEAR(-23.23f, some_float, 1e-5f); @@ -170,7 +172,7 @@ TEST(CommandLineFlagsTest, PositionalFlagFailed) { const char* argv_strings[] = {"program_name", "string"}; bool parsed_ok = Flags::Parse( &argc, reinterpret_cast(argv_strings), - {Flag::CreateFlag("some_flag", &some_float, "", Flag::POSITIONAL)}); + {Flag::CreateFlag("some_flag", &some_float, "", Flag::kPositional)}); EXPECT_FALSE(parsed_ok); EXPECT_NEAR(-23.23f, some_float, 1e-5f); @@ -213,9 +215,9 @@ TEST(CommandLineFlagsTest, UsageString) { {Flag::CreateFlag("some_int", &some_int, "some int"), Flag::CreateFlag("some_int64", &some_int64, "some int64"), Flag::CreateFlag("some_switch", &some_switch, "some switch"), - Flag::CreateFlag("some_name", &some_name, "some name", Flag::REQUIRED), + Flag::CreateFlag("some_name", &some_name, "some name", Flag::kRequired), Flag::CreateFlag("some_int2", &some_int2, "some int", - Flag::POSITIONAL)}); + Flag::kPositional)}); // Match the usage message, being sloppy about whitespace. const char* expected_usage = " usage: some_tool_name \n" @@ -307,8 +309,8 @@ TEST(CommandLineFlagsTest, DuplicateFlagsNotFound) { const char* argv_strings[] = {"program_name", "--some_float=1.0"}; bool parsed_ok = Flags::Parse( &argc, reinterpret_cast(argv_strings), - {Flag::CreateFlag("some_int", &some_int1, "some int1", Flag::OPTIONAL), - Flag::CreateFlag("some_int", &some_int2, "some int2", Flag::REQUIRED)}); + {Flag::CreateFlag("some_int", &some_int1, "some int1", Flag::kOptional), + Flag::CreateFlag("some_int", &some_int2, "some int2", Flag::kRequired)}); EXPECT_FALSE(parsed_ok); EXPECT_EQ(-23, some_int1);