Some const declarations changed to constexpr
PiperOrigin-RevId: 307516221 Change-Id: Ia11cc2f89dc10069aacc56169a64dcd02ee8c09a
This commit is contained in:
parent
ae06854dec
commit
8b88a0c888
@ -124,12 +124,12 @@ class BFCAllocator : public Allocator {
|
||||
// A ChunkHandle is an index into the chunks_ vector in BFCAllocator
|
||||
// kInvalidChunkHandle means an invalid chunk
|
||||
typedef size_t ChunkHandle;
|
||||
static const int kInvalidChunkHandle = -1;
|
||||
static constexpr int kInvalidChunkHandle = -1;
|
||||
|
||||
typedef int BinNum;
|
||||
static const int kInvalidBinNum = -1;
|
||||
static constexpr int kInvalidBinNum = -1;
|
||||
// The following means that the largest bin'd chunk size is 256 << 21 = 512MB.
|
||||
static const int kNumBins = 21;
|
||||
static constexpr int kNumBins = 21;
|
||||
|
||||
// A Chunk points to a piece of memory that's either entirely free or entirely
|
||||
// in use by one user memory allocation.
|
||||
@ -243,8 +243,8 @@ class BFCAllocator : public Allocator {
|
||||
: bin_size(bs), free_chunks(ChunkComparator(allocator)) {}
|
||||
};
|
||||
|
||||
static const size_t kMinAllocationBits = 8;
|
||||
static const size_t kMinAllocationSize = 1 << kMinAllocationBits;
|
||||
static constexpr size_t kMinAllocationBits = 8;
|
||||
static constexpr size_t kMinAllocationSize = 1 << kMinAllocationBits;
|
||||
|
||||
// BFCAllocator allocates memory into a collection of disjoint
|
||||
// AllocationRegions. Each AllocationRegion corresponds to one call to
|
||||
|
@ -32,7 +32,7 @@ struct BuildGraphOptions {
|
||||
// TODO(mrry): Remove this when the distributed runtime supports Arg/Retval.
|
||||
bool use_function_convention = false;
|
||||
|
||||
static const int64 kNoCollectiveGraphKey = 0;
|
||||
static constexpr int64 kNoCollectiveGraphKey = 0;
|
||||
int64 collective_graph_key = kNoCollectiveGraphKey;
|
||||
|
||||
// If not `kNone`, order all CollectiveReduce operations statically and
|
||||
|
@ -200,9 +200,9 @@ class ExecutorImpl : public Executor {
|
||||
// Initial time (in CPU cycles) we expect an operation to take. Used to
|
||||
// determine whether an operation should be place in a threadpool.
|
||||
// Operations start out "expensive".
|
||||
static const uint64 kInitialCostEstimateCycles = 100 * 1000 * 1000;
|
||||
static const uint64 kOpIsExpensiveThresholdCycles = 5000;
|
||||
static const uint64 kCostDecay = 10;
|
||||
static constexpr uint64 kInitialCostEstimateCycles = 100 * 1000 * 1000;
|
||||
static constexpr uint64 kOpIsExpensiveThresholdCycles = 5000;
|
||||
static constexpr uint64 kCostDecay = 10;
|
||||
|
||||
std::unique_ptr<std::atomic<bool>[]> is_expensive_;
|
||||
std::unique_ptr<std::atomic_uint_fast64_t[]> cost_estimates_;
|
||||
|
@ -261,7 +261,7 @@ class PendingCounts {
|
||||
// Each frame in this subgraph has its own PendingCounts.
|
||||
|
||||
// We use 3 bits each for dead_count and pending.
|
||||
static const int kMaxCountForPackedCounts = 7;
|
||||
static constexpr int kMaxCountForPackedCounts = 7;
|
||||
|
||||
// Most counts are small, so we pack a pending count and a dead
|
||||
// count into 3 bits each, use 1 bit to indicate that the node has
|
||||
|
@ -84,8 +84,8 @@ class ProcessState : public ProcessStateInterface {
|
||||
|
||||
// If these flags need to be runtime configurable consider adding
|
||||
// them to ConfigProto.
|
||||
static const bool FLAGS_brain_mem_reg_gpu_dma = true;
|
||||
static const bool FLAGS_brain_gpu_record_mem_types = false;
|
||||
static constexpr bool FLAGS_brain_mem_reg_gpu_dma = true;
|
||||
static constexpr bool FLAGS_brain_gpu_record_mem_types = false;
|
||||
|
||||
// Helper method for unit tests to reset the ProcessState singleton by
|
||||
// cleaning up everything. Never use in production.
|
||||
|
@ -27,8 +27,8 @@ class ScopedAllocatorInstance;
|
||||
// Manages a single backing tensor and a collection of aliases.
|
||||
class ScopedAllocator {
|
||||
public:
|
||||
static const int32 kInvalidId = 0;
|
||||
static const size_t kMaxAlignment = 64;
|
||||
static constexpr int32 kInvalidId = 0;
|
||||
static constexpr size_t kMaxAlignment = 64;
|
||||
|
||||
// A subrange of the TensorBuffer associated with this object that
|
||||
// will be the backing memory for one aliased tensor.
|
||||
@ -39,7 +39,7 @@ class ScopedAllocator {
|
||||
size_t bytes_allocated;
|
||||
};
|
||||
// Field index that refers to backing tensor, not any aliased field.
|
||||
static const int32 kBackingIndex = -1;
|
||||
static constexpr int32 kBackingIndex = -1;
|
||||
|
||||
// backing_tensor is expected to be newly allocated by a ScopedAllocatorOp
|
||||
// instance. It must be large enough to back all of the specified
|
||||
|
@ -188,7 +188,7 @@ class StepStatsCollector : public StepStatsCollectorInterface {
|
||||
private:
|
||||
// TODO(suharshs): Make this configurable if its not possible to find a value
|
||||
// that works for all cases.
|
||||
static const uint64 kMaxCollectedNodes = 1 << 20;
|
||||
static constexpr uint64 kMaxCollectedNodes = 1 << 20;
|
||||
|
||||
typedef std::vector<std::unique_ptr<NodeExecStatsWrapper>> NodeStatsVector;
|
||||
typedef std::unordered_map<uint32, string> ThreadNamesMap;
|
||||
|
Loading…
x
Reference in New Issue
Block a user