Some const declarations changed to constexpr

PiperOrigin-RevId: 307629520
Change-Id: I22dcc35701417a184b77791ec3efe4a2957251bc
This commit is contained in:
A. Unique TensorFlower 2020-04-21 10:17:31 -07:00 committed by TensorFlower Gardener
parent 298b24151e
commit c5fd4efc4c
20 changed files with 49 additions and 49 deletions

View File

@ -38,7 +38,7 @@ namespace benchmark {
struct Options { struct Options {
// kDefaultMicros specifies the default time to run the benchmark, and is used // kDefaultMicros specifies the default time to run the benchmark, and is used
// if neither max_iters nor max_micros is set. // if neither max_iters nor max_micros is set.
static const int64 kDefaultMicros = 3000000; static constexpr int64 kDefaultMicros = 3000000;
int64 max_iters = 0; // Maximum iterations to run, ignored if <= 0. int64 max_iters = 0; // Maximum iterations to run, ignored if <= 0.
int64 max_micros = 0; // Maximum microseconds to run, ignored if <= 0. int64 max_micros = 0; // Maximum microseconds to run, ignored if <= 0.

View File

@ -191,7 +191,7 @@ class EMBenchmarkHelper {
public: public:
// Length of tensors. TODO(tucker): make this a variable parameter. // Length of tensors. TODO(tucker): make this a variable parameter.
static const int kTDim = 1024; static constexpr int kTDim = 1024;
int num_ops() const { return add_kernels_.size(); } int num_ops() const { return add_kernels_.size(); }
size_t tensor_size() const { size_t tensor_size() const {

View File

@ -694,8 +694,8 @@ class OpKernelContext {
checkpoint::TensorSliceReaderCacheWrapper* slice_reader_cache = nullptr; checkpoint::TensorSliceReaderCacheWrapper* slice_reader_cache = nullptr;
// Support for forwarding reservations (used by ScopedAllocator). // Support for forwarding reservations (used by ScopedAllocator).
static const int kNeverForward = -2; static constexpr int kNeverForward = -2;
static const int kNoReservation = -1; static constexpr int kNoReservation = -1;
// Values in [0,...) represent reservations for the indexed output. // Values in [0,...) represent reservations for the indexed output.
const int* forward_from_array = nullptr; const int* forward_from_array = nullptr;

View File

@ -74,7 +74,7 @@ class ShapeInferenceTest : public ::testing::Test {
void TestMergeHandles(bool input_not_output); void TestMergeHandles(bool input_not_output);
void TestRelaxHandles(bool input_not_output); void TestRelaxHandles(bool input_not_output);
static const int kVersion = 0; // used for graph-def version. static constexpr int kVersion = 0; // used for graph-def version.
}; };
TEST_F(ShapeInferenceTest, InputOutputByName) { TEST_F(ShapeInferenceTest, InputOutputByName) {

View File

@ -103,10 +103,10 @@ class TensorShapeRep {
// We use the max value of uint16 or uint32 to represent unknown shapes, so // We use the max value of uint16 or uint32 to represent unknown shapes, so
// the maximum representable valid shape in these representations is one less. // the maximum representable valid shape in these representations is one less.
static const int64 kMaxRep16 = std::numeric_limits<uint16>::max() - 1; static constexpr int64 kMaxRep16 = std::numeric_limits<uint16>::max() - 1;
static const int64 kMaxRep32 = std::numeric_limits<uint32>::max() - 1; static constexpr int64 kMaxRep32 = std::numeric_limits<uint32>::max() - 1;
static const uint16 kUnknownRep16 = std::numeric_limits<uint16>::max(); static constexpr uint16 kUnknownRep16 = std::numeric_limits<uint16>::max();
static const uint32 kUnknownRep32 = std::numeric_limits<uint32>::max(); static constexpr uint32 kUnknownRep32 = std::numeric_limits<uint32>::max();
Rep16* as16() { return reinterpret_cast<Rep16*>(buf()); } Rep16* as16() { return reinterpret_cast<Rep16*>(buf()); }
Rep32* as32() { return reinterpret_cast<Rep32*>(buf()); } Rep32* as32() { return reinterpret_cast<Rep32*>(buf()); }
@ -134,7 +134,7 @@ class TensorShapeRep {
// We store the number of dimensions in byte 14, and the RepTag in byte 15. // We store the number of dimensions in byte 14, and the RepTag in byte 15.
// Bytes [0..13] vary depending on the representation. // Bytes [0..13] vary depending on the representation.
// A value of 255 indicates unknown rank in the PartialTensorShape case. // A value of 255 indicates unknown rank in the PartialTensorShape case.
static const uint8 kUnknownRank = 255; static constexpr uint8 kUnknownRank = 255;
uint8 ndims_byte() const { return buf()[14]; } uint8 ndims_byte() const { return buf()[14]; }
void set_ndims_byte(uint8 nd) { buf()[14] = nd; } void set_ndims_byte(uint8 nd) { buf()[14] = nd; }

View File

@ -116,11 +116,11 @@ namespace internal {
template <typename T> template <typename T>
struct is_floating_point_type { struct is_floating_point_type {
static const bool value = std::is_same<T, Eigen::half>::value || static constexpr bool value = std::is_same<T, Eigen::half>::value ||
std::is_same<T, float>::value || std::is_same<T, float>::value ||
std::is_same<T, double>::value || std::is_same<T, double>::value ||
std::is_same<T, std::complex<float>>::value || std::is_same<T, std::complex<float>>::value ||
std::is_same<T, std::complex<double>>::value; std::is_same<T, std::complex<double>>::value;
}; };
template <typename T> template <typename T>

View File

@ -26,10 +26,10 @@ namespace tensorflow {
// Functions to define quantization attribute of types. // Functions to define quantization attribute of types.
struct true_type { struct true_type {
static const bool value = true; static constexpr bool value = true;
}; };
struct false_type { struct false_type {
static const bool value = false; static constexpr bool value = false;
}; };
// Default is_quantized is false. // Default is_quantized is false.

View File

@ -42,7 +42,7 @@ struct PartitionOptions {
// A function that returns the incarnation of a device given the // A function that returns the incarnation of a device given the
// device's fullname. If not found, GetIncarnationFunc should return // device's fullname. If not found, GetIncarnationFunc should return
// kIllegalIncarnation. // kIllegalIncarnation.
static const uint64 kIllegalIncarnation = 0; static constexpr uint64 kIllegalIncarnation = 0;
typedef std::function<uint64(const string&)> GetIncarnationFunc; typedef std::function<uint64(const string&)> GetIncarnationFunc;
GetIncarnationFunc get_incarnation = nullptr; GetIncarnationFunc get_incarnation = nullptr;

View File

@ -101,7 +101,7 @@ class SubProcess {
string* stderr_output); string* stderr_output);
private: private:
static const int kNFds = 3; static constexpr int kNFds = 3;
static bool chan_valid(int chan) { return ((chan >= 0) && (chan < kNFds)); } static bool chan_valid(int chan) { return ((chan >= 0) && (chan < kNFds)); }
static bool retry(int e) { static bool retry(int e) {
return ((e == EINTR) || (e == EAGAIN) || (e == EWOULDBLOCK)); return ((e == EINTR) || (e == EAGAIN) || (e == EWOULDBLOCK));

View File

@ -522,7 +522,7 @@ class FileStream : public ::tensorflow::protobuf::io::ZeroCopyInputStream {
} }
private: private:
static const int kBufSize = 512 << 10; static constexpr int kBufSize = 512 << 10;
RandomAccessFile* file_; RandomAccessFile* file_;
int64 pos_; int64 pos_;

View File

@ -114,7 +114,7 @@ class TStringOutputStream : public protobuf::io::ZeroCopyOutputStream {
int64_t ByteCount() const override; int64_t ByteCount() const override;
private: private:
static const int kMinimumSize = 16; static constexpr int kMinimumSize = 16;
tstring* target_; tstring* target_;
}; };

View File

@ -61,7 +61,7 @@ class TensorSliceReader {
}; };
typedef std::function<Status(const string&, Table**)> OpenTableFunction; typedef std::function<Status(const string&, Table**)> OpenTableFunction;
static const int kLoadAllShards = -1; static constexpr int kLoadAllShards = -1;
TensorSliceReader(const string& filepattern); TensorSliceReader(const string& filepattern);
TensorSliceReader(const string& filepattern, OpenTableFunction open_function); TensorSliceReader(const string& filepattern, OpenTableFunction open_function);
TensorSliceReader(const string& filepattern, OpenTableFunction open_function, TensorSliceReader(const string& filepattern, OpenTableFunction open_function,

View File

@ -68,7 +68,7 @@ class TensorSliceWriter {
static size_t MaxBytesPerElement(DataType dt); static size_t MaxBytesPerElement(DataType dt);
private: private:
static const size_t kMaxMessageBytes = 1LL << 31; static constexpr size_t kMaxMessageBytes = 1LL << 31;
// Filling in the TensorProto in a SavedSlice will add the following // Filling in the TensorProto in a SavedSlice will add the following
// header bytes, in addition to the data: // header bytes, in addition to the data:
// - 1 byte: TensorProto tag and wire format // - 1 byte: TensorProto tag and wire format
@ -77,7 +77,7 @@ class TensorSliceWriter {
// - <= 5 bytes: *_val length // - <= 5 bytes: *_val length
// However, we add 1KB of slack, to be conservative and guard // However, we add 1KB of slack, to be conservative and guard
// against other additions to the TensorProto. // against other additions to the TensorProto.
static const size_t kTensorProtoHeaderBytes = 1 << 10; static constexpr size_t kTensorProtoHeaderBytes = 1 << 10;
const string filename_; const string filename_;
const CreateBuilderFunction create_builder_; const CreateBuilderFunction create_builder_;

View File

@ -26,11 +26,11 @@ struct ModelHints {
using ModelHint = uint64_t; using ModelHint = uint64_t;
// By default we want the fastest inference // By default we want the fastest inference
static const ModelHint kFastestInference = 0x00000000; static constexpr ModelHint kFastestInference = 0x00000000;
// Can improve compilation time, but inference can be slower // Can improve compilation time, but inference can be slower
static const ModelHint kReduceKernelsCount = 0x00000001; static constexpr ModelHint kReduceKernelsCount = 0x00000001;
// Can improve tuning time, but inference can be slower // Can improve tuning time, but inference can be slower
static const ModelHint kFastTuning = 0x00000002; static constexpr ModelHint kFastTuning = 0x00000002;
void Add(ModelHint hint) { void Add(ModelHint hint) {
if (hint == kFastestInference) { if (hint == kFastestInference) {

View File

@ -1094,7 +1094,7 @@ TEST(InterpreterTensorsCapacityTest, TestExceedHeadroom) {
} }
struct TestExternalContext : public TfLiteExternalContext { struct TestExternalContext : public TfLiteExternalContext {
static const TfLiteExternalContextType kType = kTfLiteGemmLowpContext; static constexpr TfLiteExternalContextType kType = kTfLiteGemmLowpContext;
static TestExternalContext* Get(TfLiteContext* context) { static TestExternalContext* Get(TfLiteContext* context) {
return reinterpret_cast<TestExternalContext*>( return reinterpret_cast<TestExternalContext*>(

View File

@ -816,40 +816,40 @@ struct TypeUnion;
template <> template <>
struct TypeUnion<float> { struct TypeUnion<float> {
public: public:
static const TensorType tensor_type = TensorType::TensorType_FLOAT32; static constexpr TensorType tensor_type = TensorType::TensorType_FLOAT32;
static const TfLiteType tflite_type = TfLiteType::kTfLiteFloat32; static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteFloat32;
typedef float ScalarType; typedef float ScalarType;
}; };
template <> template <>
struct TypeUnion<int32_t> { struct TypeUnion<int32_t> {
public: public:
static const TensorType tensor_type = TensorType::TensorType_INT32; static constexpr TensorType tensor_type = TensorType::TensorType_INT32;
static const TfLiteType tflite_type = TfLiteType::kTfLiteInt32; static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteInt32;
typedef int32_t ScalarType; typedef int32_t ScalarType;
}; };
template <> template <>
struct TypeUnion<int16_t> { struct TypeUnion<int16_t> {
public: public:
static const TensorType tensor_type = TensorType::TensorType_INT16; static constexpr TensorType tensor_type = TensorType::TensorType_INT16;
static const TfLiteType tflite_type = TfLiteType::kTfLiteInt16; static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteInt16;
typedef int16_t ScalarType; typedef int16_t ScalarType;
}; };
template <> template <>
struct TypeUnion<int8_t> { struct TypeUnion<int8_t> {
public: public:
static const TensorType tensor_type = TensorType::TensorType_INT8; static constexpr TensorType tensor_type = TensorType::TensorType_INT8;
static const TfLiteType tflite_type = TfLiteType::kTfLiteInt8; static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteInt8;
typedef int8_t ScalarType; typedef int8_t ScalarType;
}; };
template <> template <>
struct TypeUnion<uint8_t> { struct TypeUnion<uint8_t> {
public: public:
static const TensorType tensor_type = TensorType::TensorType_UINT8; static constexpr TensorType tensor_type = TensorType::TensorType_UINT8;
static const TfLiteType tflite_type = TfLiteType::kTfLiteUInt8; static constexpr TfLiteType tflite_type = TfLiteType::kTfLiteUInt8;
typedef uint8_t ScalarType; typedef uint8_t ScalarType;
}; };

View File

@ -39,19 +39,19 @@ struct DeviceOptions {
// this flag prevents it from ever being deallocated. Potentially saves // this flag prevents it from ever being deallocated. Potentially saves
// thrashing the thread stack memory allocation, but at the potential cost of // thrashing the thread stack memory allocation, but at the potential cost of
// some memory space. // some memory space.
static const unsigned kDoNotReclaimStackAllocation = 0x1; static constexpr unsigned kDoNotReclaimStackAllocation = 0x1;
// The following options refer to synchronization options when // The following options refer to synchronization options when
// using SynchronizeStream or SynchronizeContext. // using SynchronizeStream or SynchronizeContext.
// Synchronize with spinlocks. // Synchronize with spinlocks.
static const unsigned kScheduleSpin = 0x02; static constexpr unsigned kScheduleSpin = 0x02;
// Synchronize with spinlocks that also call CPU yield instructions. // Synchronize with spinlocks that also call CPU yield instructions.
static const unsigned kScheduleYield = 0x04; static constexpr unsigned kScheduleYield = 0x04;
// Synchronize with a "synchronization primitive" (e.g. mutex). // Synchronize with a "synchronization primitive" (e.g. mutex).
static const unsigned kScheduleBlockingSync = 0x08; static constexpr unsigned kScheduleBlockingSync = 0x08;
static const unsigned kMask = 0xf; // Mask of all available flags. static constexpr unsigned kMask = 0xf; // Mask of all available flags.
// Constructs an or-d together set of device options. // Constructs an or-d together set of device options.
explicit DeviceOptions(unsigned flags) : flags_(flags) { explicit DeviceOptions(unsigned flags) : flags_(flags) {

View File

@ -39,10 +39,10 @@ namespace stream_executor {
// memory for cudnn convolutions. // memory for cudnn convolutions.
class RedzoneAllocator : public ScratchAllocator { class RedzoneAllocator : public ScratchAllocator {
public: public:
static const int64 kDefaultMemoryLimit = 1LL << 32; // 4GB static constexpr int64 kDefaultMemoryLimit = 1LL << 32; // 4GB
static const int64 kDefaultRedzoneSize = static constexpr int64 kDefaultRedzoneSize =
1LL << 23; // 8MiB per side, 16MiB total. 1LL << 23; // 8MiB per side, 16MiB total.
static const uint8 kDefaultRedzonePattern = -1; static constexpr uint8 kDefaultRedzonePattern = -1;
RedzoneAllocator(Stream* stream, DeviceMemoryAllocator* memory_allocator, RedzoneAllocator(Stream* stream, DeviceMemoryAllocator* memory_allocator,
GpuAsmOpts gpu_compilation_opts_, GpuAsmOpts gpu_compilation_opts_,
int64 memory_limit = kDefaultMemoryLimit, int64 memory_limit = kDefaultMemoryLimit,

View File

@ -40,8 +40,8 @@ namespace rng {
// thread-hostility. // thread-hostility.
class RngSupport { class RngSupport {
public: public:
static const int kMinSeedBytes = 16; static constexpr int kMinSeedBytes = 16;
static const int kMaxSeedBytes = INT_MAX; static constexpr int kMaxSeedBytes = INT_MAX;
// Releases any random-number-generation resources associated with this // Releases any random-number-generation resources associated with this
// support object in the underlying platform implementation. // support object in the underlying platform implementation.

View File

@ -723,7 +723,7 @@ class StreamExecutor {
// Only one worker thread is needed; little work will be done by the // Only one worker thread is needed; little work will be done by the
// executor. // executor.
static const int kNumBackgroundThreads = 1; static constexpr int kNumBackgroundThreads = 1;
// Indicates if StreamExecutor operation tracing should be performed. // Indicates if StreamExecutor operation tracing should be performed.
bool tracing_enabled_; bool tracing_enabled_;