diff --git a/tensorflow/compiler/aot/benchmark.h b/tensorflow/compiler/aot/benchmark.h index 266b7fefc7e..95bb7663b35 100644 --- a/tensorflow/compiler/aot/benchmark.h +++ b/tensorflow/compiler/aot/benchmark.h @@ -38,7 +38,7 @@ namespace benchmark { struct Options { // kDefaultMicros specifies the default time to run the benchmark, and is used // if neither max_iters nor max_micros is set. - static const int64 kDefaultMicros = 3000000; + static constexpr int64 kDefaultMicros = 3000000; int64 max_iters = 0; // Maximum iterations to run, ignored if <= 0. int64 max_micros = 0; // Maximum microseconds to run, ignored if <= 0. diff --git a/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc b/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc index 0df7a84d757..c89bf54564c 100644 --- a/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc +++ b/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc @@ -191,7 +191,7 @@ class EMBenchmarkHelper { public: // Length of tensors. TODO(tucker): make this a variable parameter. - static const int kTDim = 1024; + static constexpr int kTDim = 1024; int num_ops() const { return add_kernels_.size(); } size_t tensor_size() const { diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h index 2f140316b3a..f31effb607c 100644 --- a/tensorflow/core/framework/op_kernel.h +++ b/tensorflow/core/framework/op_kernel.h @@ -694,8 +694,8 @@ class OpKernelContext { checkpoint::TensorSliceReaderCacheWrapper* slice_reader_cache = nullptr; // Support for forwarding reservations (used by ScopedAllocator). - static const int kNeverForward = -2; - static const int kNoReservation = -1; + static constexpr int kNeverForward = -2; + static constexpr int kNoReservation = -1; // Values in [0,...) represent reservations for the indexed output. const int* forward_from_array = nullptr; diff --git a/tensorflow/core/framework/shape_inference_test.cc b/tensorflow/core/framework/shape_inference_test.cc index d413882e400..45cfb2395d2 100644 --- a/tensorflow/core/framework/shape_inference_test.cc +++ b/tensorflow/core/framework/shape_inference_test.cc @@ -74,7 +74,7 @@ class ShapeInferenceTest : public ::testing::Test { void TestMergeHandles(bool input_not_output); void TestRelaxHandles(bool input_not_output); - static const int kVersion = 0; // used for graph-def version. + static constexpr int kVersion = 0; // used for graph-def version. }; TEST_F(ShapeInferenceTest, InputOutputByName) { diff --git a/tensorflow/core/framework/tensor_shape.h b/tensorflow/core/framework/tensor_shape.h index ac1bef12370..b0d4944baf3 100644 --- a/tensorflow/core/framework/tensor_shape.h +++ b/tensorflow/core/framework/tensor_shape.h @@ -103,10 +103,10 @@ class TensorShapeRep { // We use the max value of uint16 or uint32 to represent unknown shapes, so // the maximum representable valid shape in these representations is one less. - static const int64 kMaxRep16 = std::numeric_limits::max() - 1; - static const int64 kMaxRep32 = std::numeric_limits::max() - 1; - static const uint16 kUnknownRep16 = std::numeric_limits::max(); - static const uint32 kUnknownRep32 = std::numeric_limits::max(); + static constexpr int64 kMaxRep16 = std::numeric_limits::max() - 1; + static constexpr int64 kMaxRep32 = std::numeric_limits::max() - 1; + static constexpr uint16 kUnknownRep16 = std::numeric_limits::max(); + static constexpr uint32 kUnknownRep32 = std::numeric_limits::max(); Rep16* as16() { return reinterpret_cast(buf()); } Rep32* as32() { return reinterpret_cast(buf()); } @@ -134,7 +134,7 @@ class TensorShapeRep { // We store the number of dimensions in byte 14, and the RepTag in byte 15. // Bytes [0..13] vary depending on the representation. // A value of 255 indicates unknown rank in the PartialTensorShape case. - static const uint8 kUnknownRank = 255; + static constexpr uint8 kUnknownRank = 255; uint8 ndims_byte() const { return buf()[14]; } void set_ndims_byte(uint8 nd) { buf()[14] = nd; } diff --git a/tensorflow/core/framework/tensor_testutil.h b/tensorflow/core/framework/tensor_testutil.h index 1d476baa927..80dddfba801 100644 --- a/tensorflow/core/framework/tensor_testutil.h +++ b/tensorflow/core/framework/tensor_testutil.h @@ -116,11 +116,11 @@ namespace internal { template struct is_floating_point_type { - static const bool value = std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same>::value || - std::is_same>::value; + static constexpr bool value = std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same>::value || + std::is_same>::value; }; template diff --git a/tensorflow/core/framework/type_traits.h b/tensorflow/core/framework/type_traits.h index 96fbf929388..a7826a642de 100644 --- a/tensorflow/core/framework/type_traits.h +++ b/tensorflow/core/framework/type_traits.h @@ -26,10 +26,10 @@ namespace tensorflow { // Functions to define quantization attribute of types. struct true_type { - static const bool value = true; + static constexpr bool value = true; }; struct false_type { - static const bool value = false; + static constexpr bool value = false; }; // Default is_quantized is false. diff --git a/tensorflow/core/graph/graph_partition.h b/tensorflow/core/graph/graph_partition.h index 8020c2d2478..04ea0ac2e67 100644 --- a/tensorflow/core/graph/graph_partition.h +++ b/tensorflow/core/graph/graph_partition.h @@ -42,7 +42,7 @@ struct PartitionOptions { // A function that returns the incarnation of a device given the // device's fullname. If not found, GetIncarnationFunc should return // kIllegalIncarnation. - static const uint64 kIllegalIncarnation = 0; + static constexpr uint64 kIllegalIncarnation = 0; typedef std::function GetIncarnationFunc; GetIncarnationFunc get_incarnation = nullptr; diff --git a/tensorflow/core/platform/default/subprocess.h b/tensorflow/core/platform/default/subprocess.h index 31b0ef39e7b..b066274a574 100644 --- a/tensorflow/core/platform/default/subprocess.h +++ b/tensorflow/core/platform/default/subprocess.h @@ -101,7 +101,7 @@ class SubProcess { string* stderr_output); private: - static const int kNFds = 3; + static constexpr int kNFds = 3; static bool chan_valid(int chan) { return ((chan >= 0) && (chan < kNFds)); } static bool retry(int e) { return ((e == EINTR) || (e == EAGAIN) || (e == EWOULDBLOCK)); diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc index b98fd3c4cb1..74da5b9429d 100644 --- a/tensorflow/core/platform/env.cc +++ b/tensorflow/core/platform/env.cc @@ -522,7 +522,7 @@ class FileStream : public ::tensorflow::protobuf::io::ZeroCopyInputStream { } private: - static const int kBufSize = 512 << 10; + static constexpr int kBufSize = 512 << 10; RandomAccessFile* file_; int64 pos_; diff --git a/tensorflow/core/platform/protobuf.h b/tensorflow/core/platform/protobuf.h index d05095dcf55..e16d89d0c05 100644 --- a/tensorflow/core/platform/protobuf.h +++ b/tensorflow/core/platform/protobuf.h @@ -114,7 +114,7 @@ class TStringOutputStream : public protobuf::io::ZeroCopyOutputStream { int64_t ByteCount() const override; private: - static const int kMinimumSize = 16; + static constexpr int kMinimumSize = 16; tstring* target_; }; diff --git a/tensorflow/core/util/tensor_slice_reader.h b/tensorflow/core/util/tensor_slice_reader.h index 4aa9a4708e2..0fb2e11bf8d 100644 --- a/tensorflow/core/util/tensor_slice_reader.h +++ b/tensorflow/core/util/tensor_slice_reader.h @@ -61,7 +61,7 @@ class TensorSliceReader { }; typedef std::function OpenTableFunction; - static const int kLoadAllShards = -1; + static constexpr int kLoadAllShards = -1; TensorSliceReader(const string& filepattern); TensorSliceReader(const string& filepattern, OpenTableFunction open_function); TensorSliceReader(const string& filepattern, OpenTableFunction open_function, diff --git a/tensorflow/core/util/tensor_slice_writer.h b/tensorflow/core/util/tensor_slice_writer.h index b610565e1e0..86077a54ff8 100644 --- a/tensorflow/core/util/tensor_slice_writer.h +++ b/tensorflow/core/util/tensor_slice_writer.h @@ -68,7 +68,7 @@ class TensorSliceWriter { static size_t MaxBytesPerElement(DataType dt); private: - static const size_t kMaxMessageBytes = 1LL << 31; + static constexpr size_t kMaxMessageBytes = 1LL << 31; // Filling in the TensorProto in a SavedSlice will add the following // header bytes, in addition to the data: // - 1 byte: TensorProto tag and wire format @@ -77,7 +77,7 @@ class TensorSliceWriter { // - <= 5 bytes: *_val length // However, we add 1KB of slack, to be conservative and guard // against other additions to the TensorProto. - static const size_t kTensorProtoHeaderBytes = 1 << 10; + static constexpr size_t kTensorProtoHeaderBytes = 1 << 10; const string filename_; const CreateBuilderFunction create_builder_; diff --git a/tensorflow/lite/delegates/gpu/cl/model_hints.h b/tensorflow/lite/delegates/gpu/cl/model_hints.h index 274064dcf13..7661cc0dacb 100644 --- a/tensorflow/lite/delegates/gpu/cl/model_hints.h +++ b/tensorflow/lite/delegates/gpu/cl/model_hints.h @@ -26,11 +26,11 @@ struct ModelHints { using ModelHint = uint64_t; // By default we want the fastest inference - static const ModelHint kFastestInference = 0x00000000; + static constexpr ModelHint kFastestInference = 0x00000000; // Can improve compilation time, but inference can be slower - static const ModelHint kReduceKernelsCount = 0x00000001; + static constexpr ModelHint kReduceKernelsCount = 0x00000001; // Can improve tuning time, but inference can be slower - static const ModelHint kFastTuning = 0x00000002; + static constexpr ModelHint kFastTuning = 0x00000002; void Add(ModelHint hint) { if (hint == kFastestInference) { diff --git a/tensorflow/lite/interpreter_test.cc b/tensorflow/lite/interpreter_test.cc index ab63fcfe8c3..bb64721757d 100644 --- a/tensorflow/lite/interpreter_test.cc +++ b/tensorflow/lite/interpreter_test.cc @@ -1094,7 +1094,7 @@ TEST(InterpreterTensorsCapacityTest, TestExceedHeadroom) { } struct TestExternalContext : public TfLiteExternalContext { - static const TfLiteExternalContextType kType = kTfLiteGemmLowpContext; + static constexpr TfLiteExternalContextType kType = kTfLiteGemmLowpContext; static TestExternalContext* Get(TfLiteContext* context) { return reinterpret_cast( diff --git a/tensorflow/lite/kernels/test_util.h b/tensorflow/lite/kernels/test_util.h index 7b504e42371..b02b68494b4 100644 --- a/tensorflow/lite/kernels/test_util.h +++ b/tensorflow/lite/kernels/test_util.h @@ -816,40 +816,40 @@ struct TypeUnion; template <> struct TypeUnion { public: - static const TensorType tensor_type = TensorType::TensorType_FLOAT32; - static const TfLiteType tflite_type = TfLiteType::kTfLiteFloat32; + static constexpr TensorType tensor_type = TensorType::TensorType_FLOAT32; + static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteFloat32; typedef float ScalarType; }; template <> struct TypeUnion { public: - static const TensorType tensor_type = TensorType::TensorType_INT32; - static const TfLiteType tflite_type = TfLiteType::kTfLiteInt32; + static constexpr TensorType tensor_type = TensorType::TensorType_INT32; + static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteInt32; typedef int32_t ScalarType; }; template <> struct TypeUnion { public: - static const TensorType tensor_type = TensorType::TensorType_INT16; - static const TfLiteType tflite_type = TfLiteType::kTfLiteInt16; + static constexpr TensorType tensor_type = TensorType::TensorType_INT16; + static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteInt16; typedef int16_t ScalarType; }; template <> struct TypeUnion { public: - static const TensorType tensor_type = TensorType::TensorType_INT8; - static const TfLiteType tflite_type = TfLiteType::kTfLiteInt8; + static constexpr TensorType tensor_type = TensorType::TensorType_INT8; + static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteInt8; typedef int8_t ScalarType; }; template <> struct TypeUnion { public: - static const TensorType tensor_type = TensorType::TensorType_UINT8; - static const TfLiteType tflite_type = TfLiteType::kTfLiteUInt8; + static constexpr TensorType tensor_type = TensorType::TensorType_UINT8; + static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteUInt8; typedef uint8_t ScalarType; }; diff --git a/tensorflow/stream_executor/device_options.h b/tensorflow/stream_executor/device_options.h index b195bc84e14..00eb8c8dbb0 100644 --- a/tensorflow/stream_executor/device_options.h +++ b/tensorflow/stream_executor/device_options.h @@ -39,19 +39,19 @@ struct DeviceOptions { // this flag prevents it from ever being deallocated. Potentially saves // thrashing the thread stack memory allocation, but at the potential cost of // some memory space. - static const unsigned kDoNotReclaimStackAllocation = 0x1; + static constexpr unsigned kDoNotReclaimStackAllocation = 0x1; // The following options refer to synchronization options when // using SynchronizeStream or SynchronizeContext. // Synchronize with spinlocks. - static const unsigned kScheduleSpin = 0x02; + static constexpr unsigned kScheduleSpin = 0x02; // Synchronize with spinlocks that also call CPU yield instructions. - static const unsigned kScheduleYield = 0x04; + static constexpr unsigned kScheduleYield = 0x04; // Synchronize with a "synchronization primitive" (e.g. mutex). - static const unsigned kScheduleBlockingSync = 0x08; + static constexpr unsigned kScheduleBlockingSync = 0x08; - static const unsigned kMask = 0xf; // Mask of all available flags. + static constexpr unsigned kMask = 0xf; // Mask of all available flags. // Constructs an or-d together set of device options. explicit DeviceOptions(unsigned flags) : flags_(flags) { diff --git a/tensorflow/stream_executor/gpu/redzone_allocator.h b/tensorflow/stream_executor/gpu/redzone_allocator.h index 77755ccd3c6..e5e42df73bd 100644 --- a/tensorflow/stream_executor/gpu/redzone_allocator.h +++ b/tensorflow/stream_executor/gpu/redzone_allocator.h @@ -39,10 +39,10 @@ namespace stream_executor { // memory for cudnn convolutions. class RedzoneAllocator : public ScratchAllocator { public: - static const int64 kDefaultMemoryLimit = 1LL << 32; // 4GB - static const int64 kDefaultRedzoneSize = + static constexpr int64 kDefaultMemoryLimit = 1LL << 32; // 4GB + static constexpr int64 kDefaultRedzoneSize = 1LL << 23; // 8MiB per side, 16MiB total. - static const uint8 kDefaultRedzonePattern = -1; + static constexpr uint8 kDefaultRedzonePattern = -1; RedzoneAllocator(Stream* stream, DeviceMemoryAllocator* memory_allocator, GpuAsmOpts gpu_compilation_opts_, int64 memory_limit = kDefaultMemoryLimit, diff --git a/tensorflow/stream_executor/rng.h b/tensorflow/stream_executor/rng.h index acbf8fce4ca..3dee347cb12 100644 --- a/tensorflow/stream_executor/rng.h +++ b/tensorflow/stream_executor/rng.h @@ -40,8 +40,8 @@ namespace rng { // thread-hostility. class RngSupport { public: - static const int kMinSeedBytes = 16; - static const int kMaxSeedBytes = INT_MAX; + static constexpr int kMinSeedBytes = 16; + static constexpr int kMaxSeedBytes = INT_MAX; // Releases any random-number-generation resources associated with this // support object in the underlying platform implementation. diff --git a/tensorflow/stream_executor/stream_executor_pimpl.h b/tensorflow/stream_executor/stream_executor_pimpl.h index eeb07100a19..f7f69f78e89 100644 --- a/tensorflow/stream_executor/stream_executor_pimpl.h +++ b/tensorflow/stream_executor/stream_executor_pimpl.h @@ -723,7 +723,7 @@ class StreamExecutor { // Only one worker thread is needed; little work will be done by the // executor. - static const int kNumBackgroundThreads = 1; + static constexpr int kNumBackgroundThreads = 1; // Indicates if StreamExecutor operation tracing should be performed. bool tracing_enabled_;