Change some const declarations to constexpr

PiperOrigin-RevId: 307482476
Change-Id: Ibe6ddc3889e065bfb19aae4b057590a2c2ab3331
This commit is contained in:
A. Unique TensorFlower 2020-04-20 14:55:39 -07:00 committed by TensorFlower Gardener
parent e885af2941
commit 14a5a76e76
15 changed files with 33 additions and 32 deletions

View File

@ -353,7 +353,7 @@ class CheckNumericsV2Op<GPUDevice, T> : public CheckNumericsOp<GPUDevice, T> {
}
}
static const int abnormal_detected_size = 3;
static constexpr int abnormal_detected_size = 3;
};
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM

View File

@ -793,7 +793,7 @@ struct base {
// operation. Each functor for which this is enabled increases the
// code size, so by default this is disabled for binary functors and
// is enabled on a per-op basis as needed.
static const bool use_bcast_optimization = false;
static constexpr bool use_bcast_optimization = false;
// operator() has the signature:
// out_type operator()(in_type in0, in_type in1 ...)
@ -811,24 +811,24 @@ struct base {
// Whether the functor can error out. Currently applies only to integer
// div and mod.
static const bool has_errors = false;
static constexpr bool has_errors = false;
};
// For now, we only apply certain speed optimization for
// float/double's broadcast binary op.
template <typename T>
struct use_bcast_optimization {
static const bool value = false;
static constexpr bool value = false;
};
template <>
struct use_bcast_optimization<float> {
static const bool value = true;
static constexpr bool value = true;
};
template <>
struct use_bcast_optimization<double> {
static const bool value = true;
static constexpr bool value = true;
};
////////////////////////////////////////////////////////////////////////////////
@ -1007,17 +1007,17 @@ struct rint : base<T, Eigen::internal::scalar_rint_op<T>> {};
template <typename T>
struct add : base<T, Eigen::internal::scalar_sum_op<T>> {
static const bool use_bcast_optimization = true;
static constexpr bool use_bcast_optimization = true;
};
template <typename T>
struct sub : base<T, Eigen::internal::scalar_difference_op<T>> {
static const bool use_bcast_optimization = true;
static constexpr bool use_bcast_optimization = true;
};
template <typename T>
struct mul : base<T, Eigen::internal::scalar_product_op<T>> {
static const bool use_bcast_optimization = true;
static constexpr bool use_bcast_optimization = true;
};
template <typename T>
@ -1029,7 +1029,7 @@ struct div : base<T, Eigen::internal::scalar_quotient_op<T>> {};
template <typename T>
struct safe_div : base<T, Eigen::internal::safe_div_or_mod_op<
T, Eigen::internal::scalar_quotient_op<T>>> {
static const bool has_errors = true;
static constexpr bool has_errors = true;
};
template <typename T>
@ -1044,7 +1044,7 @@ struct mod : base<T, Eigen::internal::scalar_mod2_op<T>> {};
template <typename T>
struct safe_mod : base<T, Eigen::internal::safe_div_or_mod_op<
T, Eigen::internal::scalar_mod2_op<T>>> {
static const bool has_errors = true;
static constexpr bool has_errors = true;
};
template <typename T>
@ -1053,7 +1053,7 @@ struct floor_fmod : base<T, Eigen::internal::google_floor_fmod<T>> {};
template <typename T>
struct safe_floor_mod : base<T, Eigen::internal::safe_div_or_mod_op<
T, Eigen::internal::google_floor_mod<T>>> {
static const bool has_errors = true;
static constexpr bool has_errors = true;
};
template <typename T>
@ -1062,7 +1062,7 @@ struct floor_div : base<T, Eigen::internal::google_floor_div<T>> {};
template <typename T>
struct safe_floor_div : base<T, Eigen::internal::safe_div_or_mod_op<
T, Eigen::internal::google_floor_div<T>>> {
static const bool has_errors = true;
static constexpr bool has_errors = true;
};
template <typename T>
@ -1073,7 +1073,7 @@ struct pow : base<T, Eigen::internal::scalar_pow_op<T, T>> {};
template <typename T>
struct safe_pow : base<T, Eigen::internal::safe_scalar_binary_pow_op<T, T>> {
static const bool has_errors = true;
static constexpr bool has_errors = true;
};
template <typename T>

View File

@ -185,7 +185,7 @@ struct CopyFilterDepth {
template <typename T>
struct ComputeFilterRangeTransform {
typedef typename Eigen::internal::packet_traits<T>::type Packet;
static const int64 kPacketSize = (sizeof(Packet) / sizeof(T));
static constexpr int64 kPacketSize = (sizeof(Packet) / sizeof(T));
typedef Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>

View File

@ -270,10 +270,10 @@ class TensorContractionBlocking<float, float, float, StorageIndex,
static constexpr float kScaleN = 1.0;
// Mkldnn Avx/Avx2/Avx512 unroll factors are: 8/16/48.
static const StorageIndex kUnrollM = 48;
static constexpr StorageIndex kUnrollM = 48;
// Mkldnn Avx/Avx2/Avx512 unroll factors are: 6/6/8.
static const StorageIndex kUnrollN = 24;
static constexpr StorageIndex kUnrollN = 24;
public:
TensorContractionBlocking(StorageIndex k, StorageIndex m, StorageIndex n,

View File

@ -63,7 +63,7 @@ class TensorEvaluatorHasPartialPacket {
functionExistsSfinae<TensorEvaluatorType, PacketType, IndexType>(
nullptr)) status;
static const bool value = status::value;
static constexpr bool value = status::value;
};
// Compute a mask for loading/storing coefficients in/from a packet in a

View File

@ -277,11 +277,11 @@ struct AvgPoolMeanReducer {
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__) && \
!defined(__HIPCC__)
// We only support packet access for floats.
static const bool PacketAccess = internal::is_same<T, float>::value;
static constexpr bool PacketAccess = internal::is_same<T, float>::value;
#else
static const bool PacketAccess = false;
#endif
static const bool IsStateful = true;
static constexpr bool IsStateful = true;
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE AvgPoolMeanReducer() : scalarCount_(0) {
typedef typename packet_traits<T>::type Packet;

View File

@ -28,15 +28,15 @@ template <DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename ArgType,
struct CustomTensorEvaluator {
typedef TensorVolumePatchOp<Planes, Rows, Cols, ArgType> XprType;
typedef typename XprType::Index Index;
static const int NumInputDims = internal::array_size<
static constexpr int NumInputDims = internal::array_size<
typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
static const int NumDims = NumInputDims + 1;
static constexpr int NumDims = NumInputDims + 1;
typedef DSizes<Index, NumDims> Dimensions;
typedef
typename internal::remove_const<typename XprType::Scalar>::type Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
static const Index PacketSize =
static constexpr Index PacketSize =
internal::unpacket_traits<PacketReturnType>::size;
enum {

View File

@ -317,7 +317,7 @@ namespace {
template <typename Device, typename T>
class ParameterizedTruncatedNormalOp : public OpKernel {
// Reshape batches so each batch is this size if possible.
static const int32 kDesiredBatchSize = 100;
static constexpr int32 kDesiredBatchSize = 100;
public:
explicit ParameterizedTruncatedNormalOp(OpKernelConstruction* context)

View File

@ -36,7 +36,7 @@ namespace tensorflow {
class QueueBase : public QueueInterface {
public:
// As a possible value of 'capacity'.
static const int32 kUnbounded = INT_MAX;
static constexpr int32 kUnbounded = INT_MAX;
// Args:
// component_dtypes: The types of each component in a queue-element tuple.

View File

@ -326,7 +326,7 @@ namespace {
template <typename Device, typename T, typename U>
class RandomBinomialOp : public OpKernel {
// Reshape batches so each batch is this size if possible.
static const int32 kDesiredBatchSize = 100;
static constexpr int32 kDesiredBatchSize = 100;
public:
explicit RandomBinomialOp(OpKernelConstruction* context)
@ -439,7 +439,7 @@ class RandomBinomialOp : public OpKernel {
template <typename Device, typename T, typename U>
class StatelessRandomBinomialOp : public OpKernel {
// Reshape batches so each batch is this size if possible.
static const int32 kDesiredBatchSize = 100;
static constexpr int32 kDesiredBatchSize = 100;
public:
explicit StatelessRandomBinomialOp(OpKernelConstruction* context)

View File

@ -111,7 +111,7 @@ struct FillPhiloxRandomTask<Distribution, false> {
template <class Distribution>
struct FillPhiloxRandomTask<Distribution, true> {
typedef typename Distribution::ResultElementType T;
static const int64 kReservedSamplesPerOutput = 256;
static constexpr int64 kReservedSamplesPerOutput = 256;
static void Run(random::PhiloxRandom base_gen, T* data, int64 size,
int64 start_group, int64 limit_group, Distribution dist) {

View File

@ -58,7 +58,7 @@ class ResizeBicubicOpTest : public OpsTestBase {
}
private:
static const int64 kTableSize = (1 << 10);
static constexpr int64 kTableSize = (1 << 10);
const float* InitCoeffsTable() {
// Allocate and initialize coefficients table using Bicubic

View File

@ -311,7 +311,7 @@ class SparseMatmulOpTest : public ::testing::Test {
#elif defined EIGEN_VECTORIZE_AVX || defined EIGEN_VECTORIZE_AVX2
static const int kMaxPacketSize = 8;
#else
static const int kMaxPacketSize = 4;
static constexpr int kMaxPacketSize = 4;
#endif
typedef typename Eigen::internal::packet_traits<float>::type Packet;
const int PacketSize;

View File

@ -244,7 +244,7 @@ Status MOutOfBoundsError(int64 m, std::size_t i, int lhs_index_a,
template <typename T, typename Tindices, bool ADJ_A, bool ADJ_B>
struct SparseTensorDenseMatMulFunctor<CPUDevice, T, Tindices, ADJ_A, ADJ_B> {
// Vectorize certain operations above this size.
static const std::size_t kNumVectorize = 32;
static constexpr std::size_t kNumVectorize = 32;
static Status Compute(const CPUDevice& d, typename TTypes<T>::Matrix out,
typename TTypes<Tindices>::ConstMatrix a_indices,

View File

@ -137,7 +137,8 @@ class UnaryOpsComposition : public OpKernel {
}
private:
static const int kPacketSize = Eigen::internal::unpacket_traits<Packet>::size;
static constexpr int kPacketSize =
Eigen::internal::unpacket_traits<Packet>::size;
static inline int64 AlignBlockSize(int64 block_size) {
// Align block size to packet size and account for unrolling in run above.