From d13ee0b7f8a77636293981b8db2dac86e67b71cf Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Fri, 2 Nov 2018 11:06:52 -0700 Subject: [PATCH] Remove the padded_dimensions and padding_value fields from the Layout protobuffer. These fields were never used nor supported. PiperOrigin-RevId: 219828371 --- tensorflow/compiler/xla/index_util.cc | 12 +--- tensorflow/compiler/xla/index_util.h | 3 +- tensorflow/compiler/xla/layout_util.cc | 64 +------------------ tensorflow/compiler/xla/layout_util.h | 17 ----- tensorflow/compiler/xla/layout_util_test.cc | 24 ------- .../xla/service/cpu/dot_op_emitter.cc | 9 +-- .../compiler/xla/service/cpu/ir_emitter.cc | 6 -- .../xla/service/gpu/ir_emission_utils.cc | 14 ++-- .../compiler/xla/service/layout_assignment.cc | 1 - tensorflow/compiler/xla/shape_util.cc | 45 +------------ tensorflow/compiler/xla/shape_util_test.cc | 30 --------- tensorflow/compiler/xla/xla_data.proto | 41 ++---------- 12 files changed, 20 insertions(+), 246 deletions(-) diff --git a/tensorflow/compiler/xla/index_util.cc b/tensorflow/compiler/xla/index_util.cc index 3fadabcf520..2a0241af3ef 100644 --- a/tensorflow/compiler/xla/index_util.cc +++ b/tensorflow/compiler/xla/index_util.cc @@ -29,8 +29,6 @@ namespace xla { /* static */ int64 IndexUtil::MultidimensionalIndexToLinearIndex( const Shape& shape, absl::Span multi_index) { DCHECK_EQ(shape.dimensions_size(), multi_index.size()); - // Padding and nested layouts not supported yet. - DCHECK_EQ(0, shape.layout().padded_dimensions_size()); for (size_t i = 0; i < multi_index.size(); ++i) { DCHECK_GE(multi_index[i], 0); @@ -94,8 +92,6 @@ namespace xla { /* static */ std::vector IndexUtil::LinearIndexToMultidimensionalIndex( const Shape& shape, int64 linear_index) { - // Padding and nested layouts not supported yet. - DCHECK_EQ(0, shape.layout().padded_dimensions_size()); DCHECK_GE(linear_index, 0); DCHECK_LT(linear_index, ShapeUtil::ElementsIn(shape)); @@ -133,18 +129,12 @@ namespace xla { /* static */ int64 IndexUtil::GetDimensionStride(const Shape& shape, int64 dimension) { - int64 pdim_size = LayoutUtil::PaddedDimensions(shape).size(); int64 stride = 1; - DCHECK(pdim_size == 0 || pdim_size == shape.dimensions_size()); for (auto dim : LayoutUtil::MinorToMajor(shape)) { if (dim == dimension) { break; } - if (pdim_size == 0) { - stride *= shape.dimensions(dim); - } else { - stride *= LayoutUtil::PaddedDimension(shape, dim); - } + stride *= shape.dimensions()[dim]; } return stride; } diff --git a/tensorflow/compiler/xla/index_util.h b/tensorflow/compiler/xla/index_util.h index 2979cf87dde..458bdaf2f89 100644 --- a/tensorflow/compiler/xla/index_util.h +++ b/tensorflow/compiler/xla/index_util.h @@ -61,8 +61,7 @@ class IndexUtil { static bool BumpIndices(const Shape& shape, absl::Span indices); // Calculates the stride size (in number of elements, not byte size) of a - // given logical shape dimension (from 0 to rank-1). If available, padded - // dimensions are used. + // given logical shape dimension (from 0 to rank-1). // Example: // GetDimensionStride(F32[5,8,10,4]{3,2,1,0}, 1) == // sizeof(dimension(3)) * sizeof(dimension(2)) == 4 * 10 diff --git a/tensorflow/compiler/xla/layout_util.cc b/tensorflow/compiler/xla/layout_util.cc index 66af644cf78..2398470dd49 100644 --- a/tensorflow/compiler/xla/layout_util.cc +++ b/tensorflow/compiler/xla/layout_util.cc @@ -201,8 +201,7 @@ Layout CreateDefaultLayoutForRank(int64 rank) { } if (!ShapeUtil::IsArray(shape)) { - if (layout.minor_to_major_size() != 0 || - layout.padded_dimensions_size() != 0) { + if (layout.minor_to_major_size() != 0) { return InvalidArgument( "shape of primitive type %s should not have a non-trivial layout", PrimitiveType_Name(shape.element_type())); @@ -241,28 +240,6 @@ Layout CreateDefaultLayoutForRank(int64 rank) { } dimensions_in_layout[dim] = true; } - - if (layout.padded_dimensions_size() > 0) { - if (layout.padded_dimensions_size() != ShapeUtil::Rank(shape)) { - return InvalidArgument( - "layout has %d padded dimensions, but shape is rank %d", - layout.padded_dimensions_size(), ShapeUtil::Rank(shape)); - } - for (int i = 0; i < layout.padded_dimensions_size(); ++i) { - if (layout.padded_dimensions(i) < shape.dimensions(i)) { - return InvalidArgument( - "for dimension %d, dimension padding (%d) is smaller than " - "the dimension size (%d) of the shape", - i, layout.padded_dimensions(i), shape.dimensions(i)); - } - } - } - } - - if (layout.format() == SPARSE) { - if (!layout.padded_dimensions().empty()) { - return InvalidArgument("Sparse layout has padded dimensions"); - } } return Status::OK(); @@ -303,38 +280,6 @@ Layout CreateDefaultLayoutForRank(int64 rank) { layout.minor_to_major().end(), std::greater()); } -/* static */ bool LayoutUtil::IsPadded(const Shape& shape) { - if (!ShapeUtil::IsArray(shape) || !HasLayout(shape) || - shape.layout().padded_dimensions_size() == 0) { - return false; - } - CHECK(IsDenseArray(shape)) << shape.ShortDebugString(); - CHECK_EQ(shape.dimensions_size(), shape.layout().padded_dimensions_size()); - for (int64 i = 0; i < shape.dimensions_size(); ++i) { - if (shape.layout().padded_dimensions(i) > shape.dimensions(i)) { - return true; - } - } - return false; -} - -/* static */ absl::Span LayoutUtil::PaddedDimensions( - const Shape& shape) { - CHECK(IsDenseArray(shape)); - return AsInt64Slice(shape.layout().padded_dimensions()); -} - -/* static */ int64 LayoutUtil::PaddedDimension(const Shape& shape, - int64 index) { - CHECK(IsDenseArray(shape)); - return shape.layout().padded_dimensions(index); -} - -/* static */ PaddingValue LayoutUtil::GetPaddingValue(const Shape& shape) { - CHECK(IsDenseArray(shape)); - return shape.layout().padding_value(); -} - /* static */ bool LayoutUtil::IsSparseArray(const Shape& shape) { return ShapeUtil::IsArray(shape) && shape.has_layout() && IsSparse(shape.layout()); @@ -513,13 +458,6 @@ std::ostream& operator<<(std::ostream& out, const Layout& layout) { for (int64 minor_to_major : layout.minor_to_major()) { hash_value = Hash64Combine(hash_value, hash()(minor_to_major)); } - - for (int64 padded_dim : layout.padded_dimensions()) { - hash_value = Hash64Combine(hash_value, hash()(padded_dim)); - } - - hash_value = - Hash64Combine(hash_value, hash()(layout.padding_value())); hash_value = Hash64Combine(hash_value, layout.max_sparse_elements()); return hash_value; diff --git a/tensorflow/compiler/xla/layout_util.h b/tensorflow/compiler/xla/layout_util.h index 97806d7e331..6e0390763da 100644 --- a/tensorflow/compiler/xla/layout_util.h +++ b/tensorflow/compiler/xla/layout_util.h @@ -104,23 +104,6 @@ class LayoutUtil { // more minor, and so on until dimension N-1 which is the minor. static bool IsMonotonicWithDim0Major(const Layout& layout); - // Returns whether the layout of the given shape has padding (a - // padded_dimension value in Layout is greater than the corresponding - // dimension size). - static bool IsPadded(const Shape& shape); - - // Returns the padded_dimensions array for the given Shape. Requires that the - // shape is an array and has a dense layout. - static absl::Span PaddedDimensions(const Shape& shape); - - // Returns the given index of the padded_dimensions array for the given Shape. - // Requires that the shape is an array and has a dense layout. - static int64 PaddedDimension(const Shape& shape, int64 index); - - // Returns the padding_value for the given Shape. Requires that the shape is - // an array and has a dense layout. - static PaddingValue GetPaddingValue(const Shape& shape); - // Returns whether the given Shape is an array (i.e. not a tuple) and has a // sparse format layout. static bool IsSparseArray(const Shape& shape); diff --git a/tensorflow/compiler/xla/layout_util_test.cc b/tensorflow/compiler/xla/layout_util_test.cc index a50d53eaeb1..12ce2d2d7c6 100644 --- a/tensorflow/compiler/xla/layout_util_test.cc +++ b/tensorflow/compiler/xla/layout_util_test.cc @@ -304,30 +304,6 @@ TEST_F(LayoutUtilTest, SetToDefaultLayoutTuple) { shape.tuple_shapes(1).layout())); } -TEST_F(LayoutUtilTest, IsPadded) { - Shape shape_without_layout = ShapeUtil::MakeShape(F32, {2, 3, 4}); - LayoutUtil::ClearLayout(&shape_without_layout); - EXPECT_FALSE(LayoutUtil::IsPadded(shape_without_layout)); - - Shape shape_with_layout = ShapeUtil::MakeShape(F32, {2, 3, 4}); - LayoutUtil::SetToDefaultLayout(&shape_with_layout); - EXPECT_FALSE(LayoutUtil::IsPadded(shape_with_layout)); - - // Add padding equal to the dimension sizes. In this case the padding is a - // nop. - Shape shape_with_degenerate_padding = ShapeUtil::MakeShape(F32, {2, 3, 4}); - shape_with_degenerate_padding.mutable_layout()->add_padded_dimensions(2); - shape_with_degenerate_padding.mutable_layout()->add_padded_dimensions(3); - shape_with_degenerate_padding.mutable_layout()->add_padded_dimensions(4); - EXPECT_FALSE(LayoutUtil::IsPadded(shape_with_degenerate_padding)); - - Shape shape_with_padding = ShapeUtil::MakeShape(F32, {2, 3, 4}); - shape_with_padding.mutable_layout()->add_padded_dimensions(2); - shape_with_padding.mutable_layout()->add_padded_dimensions(14); - shape_with_padding.mutable_layout()->add_padded_dimensions(42); - EXPECT_TRUE(LayoutUtil::IsPadded(shape_with_padding)); -} - TEST_F(LayoutUtilTest, DefaultLayoutGettersMajorToMinor) { EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({1, 0}), LayoutUtil::GetDefaultLayoutForR2())); diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc index 99fa707c959..97f9b85a606 100644 --- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc +++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc @@ -1546,10 +1546,8 @@ DotOpEmitter::MatMultDims DotOpEmitter::GetMatMultDims() const { LayoutUtil::Minor(target_array_.GetShape().layout(), 0) == 0}; } -// Return whether the given shape is a matrix with no padding. -static bool IsRank2WithNoPadding(const Shape& shape) { - return ShapeUtil::Rank(shape) == 2 && !LayoutUtil::IsPadded(shape); -} +// Return whether the given shape is rank 2. +static bool IsRank2(const Shape& shape) { return ShapeUtil::Rank(shape) == 2; } // In a gemm operation where output = lhs * rhs, check whether the given shapes // are valid for the operation. @@ -1565,8 +1563,7 @@ static bool AreValidGemmShapes( return false; } - if (!(IsRank2WithNoPadding(lhs_shape) && IsRank2WithNoPadding(rhs_shape) && - IsRank2WithNoPadding(output_shape))) { + if (!(IsRank2(lhs_shape) && IsRank2(rhs_shape) && IsRank2(output_shape))) { return false; } diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc index 1b0c6f70f96..d6968323f33 100644 --- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc +++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc @@ -2415,14 +2415,8 @@ StatusOr IrEmitter::EmitFastConcatenate( *failure_reason = "operand has mismatching layouts"; return false; } - if (LayoutUtil::IsPadded(op->shape())) { - *failure_reason = "operand has padded layout"; - return false; - } } - CHECK(!LayoutUtil::IsPadded(concatenate->shape())); - // We split the dimensions into three categories: the dimension over which we // are concatenating (concat_dim), the dimensions that are minor to it // (inner_dims) and the dimensions that are major to it (outer_dims). diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc index 846d6384405..42fb38dffae 100644 --- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc +++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc @@ -38,10 +38,9 @@ namespace gpu { namespace { -// Return whether the given shape is a matrix with no padding. -bool IsRank2WithNoPadding(const Shape& shape, int64 batch_dimensions_size) { - return ShapeUtil::Rank(shape) == batch_dimensions_size + 2 && - !LayoutUtil::IsPadded(shape); +// Return whether the given shape is rank 2 excluding the batch dimensions. +bool IsRank2(const Shape& shape, int64 batch_dimensions_size) { + return ShapeUtil::Rank(shape) == batch_dimensions_size + 2; } // In a gemm operation where output = lhs * rhs, check whether the given shapes @@ -56,10 +55,9 @@ bool AreValidGemmShapes(const Shape& lhs_shape, const Shape& rhs_shape, bool type_is_allowed = (output_primitive_type == F16 || output_primitive_type == F32 || output_primitive_type == F64 || output_primitive_type == C64); - return type_is_allowed && - IsRank2WithNoPadding(lhs_shape, batch_dimensions_size) && - IsRank2WithNoPadding(rhs_shape, batch_dimensions_size) && - IsRank2WithNoPadding(output_shape, batch_dimensions_size) && + return type_is_allowed && IsRank2(lhs_shape, batch_dimensions_size) && + IsRank2(rhs_shape, batch_dimensions_size) && + IsRank2(output_shape, batch_dimensions_size) && !ShapeUtil::IsZeroElementArray(lhs_shape) && !ShapeUtil::IsZeroElementArray(rhs_shape); } diff --git a/tensorflow/compiler/xla/service/layout_assignment.cc b/tensorflow/compiler/xla/service/layout_assignment.cc index a959e5ee807..6b033946698 100644 --- a/tensorflow/compiler/xla/service/layout_assignment.cc +++ b/tensorflow/compiler/xla/service/layout_assignment.cc @@ -449,7 +449,6 @@ Status LayoutAssignment::AddMandatoryConstraints( // instruction. // TODO(b/31425034): Change infeeds to be more like parameters, with // shapes in the ComputationLayout. - DCHECK(!LayoutUtil::IsPadded(instruction->shape())); TF_RETURN_IF_ERROR( constraints->SetInstructionLayout(instruction->shape(), instruction)); } else if (instruction->opcode() == HloOpcode::kOutfeed) { diff --git a/tensorflow/compiler/xla/shape_util.cc b/tensorflow/compiler/xla/shape_util.cc index d7aa29d8ee9..17120e610cb 100644 --- a/tensorflow/compiler/xla/shape_util.cc +++ b/tensorflow/compiler/xla/shape_util.cc @@ -116,16 +116,6 @@ bool CompareShapes(const Shape& lhs, const Shape& rhs, bool compare_layouts, VLOG(3) << "CompareShapes: lhs layout != rhs layout"; return false; } - if (!absl::c_equal(lhs.layout().padded_dimensions(), - rhs.layout().padded_dimensions())) { - VLOG(3) - << "CompareShapes: lhs padded_dimensions != rhs padded_dimensions"; - return false; - } - if (lhs.layout().padding_value() != rhs.layout().padding_value()) { - VLOG(3) << "CompareShapes: lhs padding value != rhs padding_value"; - return false; - } } } @@ -818,17 +808,7 @@ StatusOr ParseShapeStringInternal(absl::string_view* s) { allocated_element_count = LayoutUtil::MaxSparseElements(shape.layout()); } else { CHECK(LayoutUtil::IsDenseArray(shape)) << shape.ShortDebugString(); - absl::Span padded_dimensions = - LayoutUtil::PaddedDimensions(shape); - if (!padded_dimensions.empty()) { - CHECK_EQ(Rank(shape), padded_dimensions.size()); - allocated_element_count = 1; - for (int64 dimension_size : padded_dimensions) { - allocated_element_count *= dimension_size; - } - } else { - allocated_element_count = ElementsIn(shape); - } + allocated_element_count = ElementsIn(shape); } return allocated_element_count * ByteSizeOfPrimitiveType(shape.element_type()); @@ -946,12 +926,8 @@ StatusOr ParseShapeStringInternal(absl::string_view* s) { return dense_shape_size; } - bool is_padded = shape_has_valid_layout && - LayoutUtil::IsDenseArray(shape) && - LayoutUtil::IsPadded(shape); absl::Span shape_max_dimensions = - is_padded ? LayoutUtil::PaddedDimensions(shape) - : AsInt64Slice(shape.dimensions()); + AsInt64Slice(shape.dimensions()); for (int64 dim : shape_max_dimensions) { dense_shape_size = MultiplyWithoutOverflow(dense_shape_size, dim); if (dense_shape_size < 0) { @@ -1193,13 +1169,6 @@ Status ForEachMutableSubshapeHelper( permutation, AsInt64Slice(shape.layout().minor_to_major()))) { new_layout->add_minor_to_major(index); } - if (shape.layout().padded_dimensions_size() > 0) { - new_layout->clear_padded_dimensions(); - for (auto dim : - Permute(permutation, shape.layout().padded_dimensions())) { - new_layout->add_padded_dimensions(dim); - } - } // The permutation accepted by TransposeIsBitcast is the inverse of the // permutation here. CHECK(TransposeIsBitcast(shape, new_shape, InversePermutation(permutation))) @@ -1302,11 +1271,6 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape, return false; } - // Padding is not handled. - if (LayoutUtil::IsPadded(input_shape) && LayoutUtil::IsPadded(output_shape)) { - return false; - } - // Check the reshape permutes the positions of each dimension in the // minor-to-major order. positions[i]=k means dimension `i` is k-th minor. // input_positions = apply(dimension_mapping, output_positions) @@ -1338,11 +1302,6 @@ ShapeUtil::DimensionsUnmodifiedByReshape(const Shape& input_shape, return false; } - // Padding is not handled. - if (LayoutUtil::IsPadded(input_shape) || LayoutUtil::IsPadded(output_shape)) { - return false; - } - CHECK_EQ(ElementsIn(input_shape), ElementsIn(output_shape)); if (ElementsIn(input_shape) == 0) { return true; diff --git a/tensorflow/compiler/xla/shape_util_test.cc b/tensorflow/compiler/xla/shape_util_test.cc index c622ecdca1f..0c647369a37 100644 --- a/tensorflow/compiler/xla/shape_util_test.cc +++ b/tensorflow/compiler/xla/shape_util_test.cc @@ -345,26 +345,6 @@ TEST(ShapeUtilTest, OpaqueVsArray) { EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1)); } -TEST(ShapeUtilTest, CompareShapesWithPaddedDimensionsMismatch) { - Shape shape1 = ShapeUtil::MakeShape(F32, {20, 30}); - shape1.mutable_layout()->add_padded_dimensions(10); - - Shape shape2 = ShapeUtil::MakeShape(F32, {20, 30}); - shape2.mutable_layout()->add_padded_dimensions(11); - - EXPECT_FALSE(ShapeUtil::Equal(shape1, shape2)); -} - -TEST(ShapeUtilTest, CompareShapesWithPaddingValueMismatch) { - Shape shape1 = ShapeUtil::MakeShape(F32, {20, 30}); - shape1.mutable_layout()->set_padding_value(ZERO_PAD); - - Shape shape2 = ShapeUtil::MakeShape(F32, {20, 30}); - shape2.mutable_layout()->set_padding_value(LOWEST_PAD); - - EXPECT_FALSE(ShapeUtil::Equal(shape1, shape2)); -} - TEST(ShapeUtilTest, ScalarDefaultLayoutEqualsScalarEmptyMin2Maj) { Shape scalar_default_layout = ShapeUtil::MakeShape(F32, {}); ASSERT_TRUE(scalar_default_layout.has_layout()) @@ -395,16 +375,6 @@ TEST(ShapeUtilTest, ByteSizeOfWithoutPadding) { EXPECT_EQ(0, ShapeUtil::ByteSizeOf(ShapeUtil::MakeTokenShape())); } -TEST(ShapeUtilTest, ByteSizeOfWithPadding) { - EXPECT_EQ(4, ShapeUtil::ByteSizeOfPrimitiveType(F32)); - Shape shape = ShapeUtil::MakeShape(F32, {10, 20}); - EXPECT_EQ(800, ShapeUtil::ByteSizeOf(shape)); - - shape.mutable_layout()->add_padded_dimensions(15); - shape.mutable_layout()->add_padded_dimensions(21); - EXPECT_EQ(15 * 21 * 4, ShapeUtil::ByteSizeOf(shape)); -} - TEST(ShapeUtilTest, NilShape) { EXPECT_TRUE(ShapeUtil::IsNil(ShapeUtil::MakeNil())); EXPECT_FALSE(ShapeUtil::IsNil(ShapeUtil::MakeShape(F32, {1, 2, 3}))); diff --git a/tensorflow/compiler/xla/xla_data.proto b/tensorflow/compiler/xla/xla_data.proto index 73b3589dbf1..b6bd919e2b2 100644 --- a/tensorflow/compiler/xla/xla_data.proto +++ b/tensorflow/compiler/xla/xla_data.proto @@ -78,28 +78,6 @@ enum PrimitiveType { // Next = 18 } -// Describes the value held inside padding elements. -enum PaddingValue { - INVALID_PAD = 0; - - // Zero padding must be 0-values that correspond to the shape's element type. - ZERO_PAD = 1; - - // One padding must be 1-values that correspond to the shape's element type. - ONE_PAD = 2; - - // "Lowest" padding must be the lowest values in the shape's element type, - // used as padding for operations like max-accumulation. - LOWEST_PAD = 3; - - // "Highest" padding must be the largest values in the shape's element type, - // used as padding for operations like min-accumulation. - HIGHEST_PAD = 4; - - // Unknown padding could be anything; e.g. floating NaNs! - UNKNOWN_PAD = 5; -} - // Describes the padding configuration for Pad operation. The padding amount on // both edges as well as between the elements are specified for each dimension. message PaddingConfig { @@ -123,8 +101,7 @@ message PaddingConfig { // A format specifies the method used by a layout to store an array in memory. enum Format { INVALID_FORMAT = 0; - // The default layout, with exactly one storage location per element (ignoring - // padding). + // The default layout, with exactly one storage location per element. DENSE = 1; // A sparsely encoded layout, providing only the index/value pairs of non-zero // elements. @@ -132,8 +109,7 @@ enum Format { } // A layout describes how the array is placed in (1D) memory space. This -// includes the minor-to-major ordering of dimensions within a shape, as well as -// any padding present in those dimensions. +// includes the minor-to-major ordering of dimensions within a shape. // // Clients must specify the layouts of input Literals to the // computation. Layouts specified in interior operations which take Shapes (for @@ -151,16 +127,11 @@ message Layout { // (slowest varying index). This field is required. repeated int64 minor_to_major = 1; - // The width to which the layout of each dimension is padded up to. If - // present, the size of the padded_dimensions must equal the rank of the - // shape. The padding appears at the end of a dimension, not at the - // beginning. This kind of padding, unlike padding in e.g. convolution, is not - // part of the shape. This field must be unset unless the format is DENSE. - repeated int64 padded_dimensions = 2; + reserved 2; + reserved "padded_dimensions"; - // Describes the values in the padding specified by padded_dimensions. This - // field must be unset unless the format is DENSE. - PaddingValue padding_value = 3; + reserved 3; + reserved "padding_value"; // The maximum number of elements that can be stored for SPARSE formats. This // can be used to determine the maximum size in bytes of arrays stored in