From 5cbd0bcf412c56c6610c24ae12c83840dc9724a6 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Thu, 11 Jun 2020 01:03:25 +0000 Subject: [PATCH 01/19] [-Wsign-compare] batch resolution 1 --- .../quantization/import_quant_stats_pass.cc | 4 +-- .../lite/quantization/quantization_config.cc | 4 +-- .../lite/quantization/quantization_driver.cc | 4 +-- .../lite/quantization/quantization_utils.cc | 10 +++---- .../mlir/tensorflow/utils/dump_mlir_util.cc | 2 +- tensorflow/compiler/mlir/xla/ir/chlo_ops.cc | 2 +- tensorflow/compiler/mlir/xla/ir/hlo_ops.cc | 6 ++-- tensorflow/compiler/xla/window_util.cc | 2 +- tensorflow/core/kernels/batch_kernels.cc | 6 ++-- .../core/kernels/data/prefetch_autotuner.cc | 4 +-- tensorflow/core/kernels/quantization_utils.h | 2 +- tensorflow/core/platform/s3/s3_file_system.cc | 2 +- .../core/profiler/utils/derived_timeline.cc | 2 +- .../core/profiler/utils/derived_timeline.h | 2 +- .../core/profiler/utils/xplane_utils.cc | 2 +- tensorflow/core/util/bcast.h | 4 +-- .../convert_trivial_tile_to_concat.cc | 2 +- .../convert_trivial_transpose_to_reshape.cc | 2 +- .../toco/graph_transformations/dequantize.cc | 2 +- .../graph_transformations/drop_fake_quant.cc | 2 +- ...int8_weights_safe_for_fast_int8_kernels.cc | 2 +- .../fuse_broadcast_into_following_binary.cc | 2 +- .../group_bidirectional_sequence_ops.cc | 4 +-- .../graph_transformations/hardcode_min_max.cc | 2 +- .../identify_nearest_upsample.cc | 2 +- .../merge_reshape_into_preceding_transpose.cc | 4 +-- .../propagate_array_data_types.cc | 2 +- .../propagate_fake_quant_num_bits.cc | 2 +- .../propagate_fixed_sizes.cc | 28 +++++++++---------- .../remove_successive_transpose.cc | 10 +++---- .../remove_trivial_passthrough.cc | 2 +- .../reorder_elementwise_unary.cc | 4 +-- .../reorder_reshape_transpose.cc | 12 ++++---- .../resolve_batch_normalization.cc | 10 +++---- .../resolve_constant_concatenation.cc | 2 +- .../resolve_constant_pack.cc | 2 +- .../resolve_constant_slice.cc | 2 +- .../resolve_constant_transpose.cc | 2 +- .../resolve_constant_unary.cc | 4 +-- .../unpartition_embedding_lookup.cc | 4 +-- tensorflow/lite/toco/model_cmdline_flags.cc | 8 +++--- tensorflow/lite/toco/toco_cmdline_flags.cc | 2 +- 42 files changed, 89 insertions(+), 89 deletions(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc index d924a3e82ac..5419a0d5e1b 100644 --- a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc +++ b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc @@ -76,7 +76,7 @@ class ImportQuantStatsPass // If the index is out of range, this method returns false. Otherwise it // returns true if the value is a float tensor. bool IsQuantizableResult(Operation *op, int index) { - if (index < 0 || index >= op->getNumResults()) return false; + if (index < 0 || index >= static_cast(op->getNumResults())) return false; Value res = op->getResult(index); return res.getType().isa() && res.getType().cast().getElementType().isa(); @@ -158,7 +158,7 @@ void ImportQuantStatsPass::ImportAsStatsOps(OpBuilder b, Operation *op, InsertStatsOpAtResult(b, op->getResult(index), layer_stats, axis_stats, axis); } else { - for (int i = 0; i < op->getNumResults(); ++i) { + for (int i = 0; i < static_cast(op->getNumResults()); ++i) { if (IsQuantizableResult(op, i)) { InsertStatsOpAtResult(b, op->getResult(i), layer_stats, axis_stats, axis); diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc index 6b897bd5608..c4cf6e71cf3 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc @@ -48,7 +48,7 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_mins; if (!min_values.empty()) { std::vector node_mins_str = absl::StrSplit(min_values, ','); - for (int i = 0; i < node_mins_str.size(); i++) { + for (size_t i = 0; i < node_mins_str.size(); i++) { double value; if (!absl::SimpleAtod(node_mins_str[i], &value)) { return true; @@ -60,7 +60,7 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_maxs; if (!max_values.empty()) { std::vector node_maxs_str = absl::StrSplit(max_values, ','); - for (int i = 0; i < node_maxs_str.size(); i++) { + for (size_t i = 0; i < node_maxs_str.size(); i++) { double value; if (!absl::SimpleAtod(node_maxs_str[i], &value)) { llvm::errs() << "Unexpected mins: " << node_maxs_str[i] << "\n"; diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc index 2964a3e79f8..fc11604ef8a 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc @@ -294,7 +294,7 @@ class QuantizationDriver { return; if (current_op == op) llvm::errs() << "===>>>"; llvm::errs() << op->getName() << " : ("; - for (auto i = 0; i < op->getNumOperands(); ++i) { + for (size_t i = 0; i < op->getNumOperands(); ++i) { if (auto params = GetOperandQuantState(op, i).params) params.print(llvm::errs()); else @@ -303,7 +303,7 @@ class QuantizationDriver { llvm::errs() << ","; } llvm::errs() << ") -> ("; - for (auto i = 0; i < op->getNumResults(); ++i) { + for (size_t i = 0; i < op->getNumResults(); ++i) { if (auto params = GetResultQuantState(op, i).params) params.print(llvm::errs()); else diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc index 3d50f280d0f..b9ca5329519 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc @@ -54,7 +54,7 @@ static Type GetQuantizedType(Builder builder, Type input_type, } else if (min.size() == max.size()) { auto shape = input_type.dyn_cast(); if (!shape || shape.getRank() <= quant_dim || - min.size() != shape.getDimSize(quant_dim)) { + static_cast(min.size()) != shape.getDimSize(quant_dim)) { return {}; } // TODO(b/141508873): the quantization dim is set to the last dimension. @@ -75,7 +75,7 @@ TypeAttr RescaleQuantizedType(Type input, Attribute factor) { if (auto qtype = ele_type.dyn_cast()) { ArrayRef scales = qtype.getScales(); // Broadcasting hasn't been implemented yet. - if (scales.size() != factor_values.getNumElements()) return {}; + if (static_cast(scales.size()) != factor_values.getNumElements()) return {}; SmallVector new_scales; new_scales.reserve(scales.size()); auto scales_iter = scales.begin(); @@ -269,7 +269,7 @@ Type GetUniformQuantizedPerAxisTypeForWeight(ElementsAttr attr, int quant_dim, bool narrow_range) { Builder builder(attr.getContext()); auto shape = attr.getType().cast().getShape(); - if (shape.size() <= quant_dim) return {}; + if (static_cast(shape.size()) <= quant_dim) return {}; // `symmetric` can only be used when it is `signed` and `narrow_range`. if (symmetric && (!is_signed || !narrow_range)) return {}; @@ -334,7 +334,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias( const std::vector& op_types) { if (op_types.empty()) return {}; - int axis_size = 1; + size_t axis_size = 1; int32_t quant_dim = -1; Type expressed_type; // Requires all the op types are valid UniformQuantizedTypes or @@ -368,7 +368,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias( scales[index_scale.index()] *= index_scale.value(); } } else if (auto type = op_type.dyn_cast()) { - for (int index = 0; index != axis_size; ++index) { + for (size_t index = 0; index != axis_size; ++index) { scales[index] *= type.getScale(); } } diff --git a/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc b/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc index 797687ea658..b5a6c922707 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc @@ -41,7 +41,7 @@ std::string MakeUniqueFilename(string name) { static NameCounts& instance = *new NameCounts; // Remove illegal characters from `name`. - for (int i = 0; i < name.size(); ++i) { + for (size_t i = 0; i < name.size(); ++i) { char ch = name[i]; if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' || ch == '\\') { diff --git a/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc b/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc index 26db4549a2a..f5b895f0c76 100644 --- a/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc +++ b/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc @@ -49,7 +49,7 @@ static Type GetBroadcastType(Type x, Type y, Type element_type, if (shape_x.size() == shape_y.size()) { llvm::SmallVector out_shape(shape_x.size()); - for (int i = 0; i < shape_x.size(); i++) { + for (size_t i = 0; i < shape_x.size(); i++) { auto x_val = shape_x[i]; auto y_val = shape_y[i]; if (x_val == -1 || y_val == -1) { diff --git a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc index d20f1713eba..569e45912a2 100644 --- a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc +++ b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc @@ -143,7 +143,7 @@ DenseIntElementsAttr BuildConvPaddingAttrs( int rank = padding_low.size(); SmallVector padding; - for (unsigned i = 0; i < rank; ++i) { + for (unsigned i = 0; i < static_cast(rank); ++i) { padding.push_back(GetPaddingValue(padding_attr, {i, 0}) + padding_low[i]); padding.push_back(GetPaddingValue(padding_attr, {i, 1}) + padding_high[i]); } @@ -853,7 +853,7 @@ static Attribute foldConcatenateHelper(ConcatenateOp* op, auto shape = type.getShape(); size_t top_size = 1; - for (int i = 0; i < axis; i++) { + for (size_t i = 0; i < axis; i++) { top_size = top_size * shape[i]; } @@ -1118,7 +1118,7 @@ static LogicalResult Verify(MapOp op) { // increasing. auto values = op.dimensions().getValues(); auto dimensions = std::vector{values.begin(), values.end()}; - for (int i = 0; i < dimensions.size(); ++i) { + for (int i = 0; static_cast(i) < dimensions.size(); ++i) { if (dimensions[i] != i) return op.emitOpError() << "requires monotonically increasing dimension " "numbers, but got: " diff --git a/tensorflow/compiler/xla/window_util.cc b/tensorflow/compiler/xla/window_util.cc index a58179c3ee0..e33d0b6d1dc 100644 --- a/tensorflow/compiler/xla/window_util.cc +++ b/tensorflow/compiler/xla/window_util.cc @@ -42,7 +42,7 @@ Window MakeWindow(absl::Span sizes, absl::Span strides) { Window window; CHECK_EQ(sizes.size(), strides.size()); - for (auto nb = 0; nb < sizes.size(); ++nb) { + for (auto nb = 0; static_cast(nb) < sizes.size(); ++nb) { auto* dimension = window.add_dimensions(); dimension->set_size(sizes[nb]); dimension->set_stride(strides[nb]); diff --git a/tensorflow/core/kernels/batch_kernels.cc b/tensorflow/core/kernels/batch_kernels.cc index 151f2367c95..ee271f1a123 100644 --- a/tensorflow/core/kernels/batch_kernels.cc +++ b/tensorflow/core/kernels/batch_kernels.cc @@ -486,18 +486,18 @@ class BatchResource : public ResourceBase { std::map> split_tensors; DCHECK_EQ(batch->task(0).context->num_outputs(), combined_outputs.size()); - if (combined_outputs.size() != batch->task(0).context->num_outputs()) { + if (static_cast(combined_outputs.size()) != batch->task(0).context->num_outputs()) { return errors::Internal("Wrong number of batched output tensors"); } // Generate 'split_tensors' and populate the context outputs. - for (int i = 0; i < combined_outputs.size(); ++i) { + for (size_t i = 0; i < combined_outputs.size(); ++i) { const Tensor& output_tensor = combined_outputs[i]; if (output_tensor.shape().dims() == 0) { return errors::FailedPrecondition( "Batched output tensor has 0 dimensions"); } - if (output_tensor.shape().dim_size(0) != batch->size() + padding_size) { + if (output_tensor.shape().dim_size(0) != static_cast(batch->size() + padding_size)) { return errors::FailedPrecondition( "Batched output tensor's 0th dimension does not equal the sum of " "the 0th dimension sizes of the input tensors"); diff --git a/tensorflow/core/kernels/data/prefetch_autotuner.cc b/tensorflow/core/kernels/data/prefetch_autotuner.cc index a3bb1acc352..a3fd9919d6b 100644 --- a/tensorflow/core/kernels/data/prefetch_autotuner.cc +++ b/tensorflow/core/kernels/data/prefetch_autotuner.cc @@ -40,13 +40,13 @@ void PrefetchAutotuner::RecordConsumption(size_t current_buffer_size) { case Mode::kDisabled: return; case Mode::kUpswing: - if (current_buffer_size == buffer_limit_) { + if (static_cast(current_buffer_size) == buffer_limit_) { mode_ = Mode::kDownswing; } return; case Mode::kDownswing: if (current_buffer_size == 0) { - if (buffer_limit_ >= kBufferLimitThreshold) { + if (buffer_limit_ >= static_cast(kBufferLimitThreshold)) { buffer_limit_ += kBufferLimitThreshold; } else { buffer_limit_ *= 2; diff --git a/tensorflow/core/kernels/quantization_utils.h b/tensorflow/core/kernels/quantization_utils.h index 315616f3fb3..06c901967b0 100644 --- a/tensorflow/core/kernels/quantization_utils.h +++ b/tensorflow/core/kernels/quantization_utils.h @@ -268,7 +268,7 @@ inline void RequantizeManyInNewRangeReference(const qint32* input, int64 count, // that could be easily adapted for a SIMD implementation. It should also be // possible to perform all the calculations in 32-bit rather than 64, but // that's not been implemented yet. - for (size_t index = 0; index < count; ++index) { + for (size_t index = 0; static_cast(index) < count; ++index) { const int64 input_value = static_cast(input[index]); const int64 fp_value = ((input_value * range_scale_fp) >> 32) + input_offset_fp; diff --git a/tensorflow/core/platform/s3/s3_file_system.cc b/tensorflow/core/platform/s3/s3_file_system.cc index 1726c9fbc6c..45d648abcc0 100644 --- a/tensorflow/core/platform/s3/s3_file_system.cc +++ b/tensorflow/core/platform/s3/s3_file_system.cc @@ -906,7 +906,7 @@ Status S3FileSystem::MultiPartCopy(const Aws::String& source, // wait on the mutex until notify is called // then check the finished parts as there could be false notifications multi_part_copy_cv.wait(lock, [&finishedPartStates, num_parts] { - return finishedPartStates.size() == num_parts; + return static_cast(finishedPartStates.size()) == num_parts; }); } // check if there was any error for any part diff --git a/tensorflow/core/profiler/utils/derived_timeline.cc b/tensorflow/core/profiler/utils/derived_timeline.cc index 112c0977763..3d03fc22c16 100644 --- a/tensorflow/core/profiler/utils/derived_timeline.cc +++ b/tensorflow/core/profiler/utils/derived_timeline.cc @@ -130,7 +130,7 @@ void DerivedXLineBuilder::ExpandOrAddLevelEvent(const XEvent& event, } void DerivedXLineBuilder::ResetLastEvents(int level) { - for (int i = level; i < last_event_by_level_.size(); ++i) { + for (int i = level; i < static_cast(last_event_by_level_.size()); ++i) { last_event_by_level_[i] = absl::nullopt; } if (level == 0) ResetDependentLines(); diff --git a/tensorflow/core/profiler/utils/derived_timeline.h b/tensorflow/core/profiler/utils/derived_timeline.h index cd4da7996c5..92489399b8f 100644 --- a/tensorflow/core/profiler/utils/derived_timeline.h +++ b/tensorflow/core/profiler/utils/derived_timeline.h @@ -37,7 +37,7 @@ class DerivedXLineBuilder { std::vector dependent_lines); void ExpandOrAddEvents(const std::vector& event_per_level) { - for (int level = 0; level < event_per_level.size(); ++level) { + for (size_t level = 0; level < event_per_level.size(); ++level) { ExpandOrAddLevelEvent(event_per_level[level], level); } } diff --git a/tensorflow/core/profiler/utils/xplane_utils.cc b/tensorflow/core/profiler/utils/xplane_utils.cc index 7f5221c5391..1fe476ce79c 100644 --- a/tensorflow/core/profiler/utils/xplane_utils.cc +++ b/tensorflow/core/profiler/utils/xplane_utils.cc @@ -266,7 +266,7 @@ void SortXSpace(XSpace* space) { // smaller than these value. void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) { for (XLine& line : *plane->mutable_lines()) { - if (line.timestamp_ns() >= start_time_ns) { + if (line.timestamp_ns() >= static_cast(start_time_ns)) { line.set_timestamp_ns(line.timestamp_ns() - start_time_ns); } } diff --git a/tensorflow/core/util/bcast.h b/tensorflow/core/util/bcast.h index 7bb8ea18ad3..075de84964e 100644 --- a/tensorflow/core/util/bcast.h +++ b/tensorflow/core/util/bcast.h @@ -139,7 +139,7 @@ BCastList::BCastList(const BCastList::Vec (&x)[N], if (x[i] != x[0]) { all_equal = false; } - if (x[i].size() > largest_rank) { + if (static_cast(x[i].size()) > largest_rank) { largest_rank = x[i].size(); } } @@ -176,7 +176,7 @@ BCastList::BCastList(const BCastList::Vec (&x)[N], // 1-extend and align all vectors. for (int i = 0; i < N; ++i) { - if (copy[i].size() < largest_rank) { + if (static_cast(copy[i].size()) < largest_rank) { copy[i].resize(largest_rank, 1); } } diff --git a/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc b/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc index 46288d2a1ed..c19ccf676c9 100644 --- a/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc +++ b/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc @@ -52,7 +52,7 @@ namespace toco { // It then just becomes a concat along that dimension. int non_one_dims = 0; int concat_axis = 0; - for (int i = 0; i < multiples.size(); ++i) { + for (size_t i = 0; i < multiples.size(); ++i) { if (multiples[i] != 1) { ++non_one_dims; concat_axis = i; diff --git a/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc b/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc index 2b5aaea2b23..fa8a69a1e7a 100644 --- a/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc +++ b/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc @@ -31,7 +31,7 @@ bool TransposeAffectsMemoryOrder(std::vector perm, // just the shape) then the flat buffer representation shouldn't change. std::vector old_major_index_ordering; std::vector new_major_index_ordering; - for (int i = 0; i < in_shape.size(); i++) { + for (int i = 0; static_cast(i) < in_shape.size(); i++) { if (in_shape[i] != 1) { old_major_index_ordering.push_back(i); } diff --git a/tensorflow/lite/toco/graph_transformations/dequantize.cc b/tensorflow/lite/toco/graph_transformations/dequantize.cc index cc5dddbb40e..c87c305a70d 100644 --- a/tensorflow/lite/toco/graph_transformations/dequantize.cc +++ b/tensorflow/lite/toco/graph_transformations/dequantize.cc @@ -35,7 +35,7 @@ void DequantizeBuffer(Array* array) { auto& new_data = array->GetMutableBuffer().data; new_data.resize(old_data.size()); const auto& qparams = array->GetQuantizationParams(); - for (int i = 0; i < old_data.size(); i++) { + for (size_t i = 0; i < old_data.size(); i++) { new_data[i] = qparams.scale * (old_data[i] - qparams.zero_point); } } diff --git a/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc b/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc index bb8679bced8..3a0b4d0103f 100644 --- a/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc +++ b/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc @@ -45,7 +45,7 @@ namespace toco { } // Drop min/max inputs - for (int i = 1; i < fakequant_op->inputs.size(); i++) { + for (size_t i = 1; i < fakequant_op->inputs.size(); i++) { if (CountOpsWithInput(*model, fakequant_op->inputs[i]) == 1) { model->EraseArray(fakequant_op->inputs[i]); } diff --git a/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc b/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc index 918bb489995..ce4574cdfbf 100644 --- a/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc +++ b/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc @@ -166,7 +166,7 @@ namespace toco { int index_of_previous_bad_value = 0; bool changed = false; - for (int i = 0; i < buffer_data.size(); i++) { + for (size_t i = 0; i < buffer_data.size(); i++) { if (buffer_data[i] == 0) { count_bad++; if (count_bad > 1) { diff --git a/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc b/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc index ba3e277f676..2c5c2cbb5f1 100644 --- a/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc +++ b/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc @@ -34,7 +34,7 @@ bool IsBroadcastingOp(const Model& model, Operator* op) { // Concatenation of identical inputs is usually a broadcast. if (op->type == OperatorType::kConcatenation) { // Verify that all inputs are the same. - for (int i = 1; i < op->inputs.size(); ++i) { + for (size_t i = 1; i < op->inputs.size(); ++i) { if (op->inputs[i] != op->inputs[0]) { return false; } diff --git a/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc b/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc index fa252b1a61b..a6d95ec43b1 100644 --- a/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc +++ b/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc @@ -125,7 +125,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid( return false; // Make sure the inputs datatype matches. - for (int i = 0; i < fw_sequence_op->inputs.size(); ++i) { + for (size_t i = 0; i < fw_sequence_op->inputs.size(); ++i) { const auto& fw_input_array_name = fw_sequence_op->inputs[i]; const auto& bw_input_array_name = bw_sequence_op->inputs[i]; if (model.HasArray(fw_input_array_name) && @@ -137,7 +137,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid( } // Make sure the outputs datatype matches. - for (int i = 0; i < fw_sequence_op->outputs.size(); ++i) { + for (size_t i = 0; i < fw_sequence_op->outputs.size(); ++i) { const auto& fw_output_array_name = fw_sequence_op->outputs[i]; const auto& bw_output_array_name = bw_sequence_op->outputs[i]; if (model.HasArray(fw_output_array_name) && diff --git a/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc b/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc index 171d522daa7..4250668bcf5 100644 --- a/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc +++ b/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc @@ -405,7 +405,7 @@ bool HardcodeMinMaxForPack(Model* model, Operator* op) { } const auto& first_input_minmax = first_input_array.GetMinMax(); - for (int i = 1; i < op->inputs.size(); i++) { + for (size_t i = 1; i < op->inputs.size(); i++) { const auto& input_array = model->GetArray(op->inputs[i]); if (!input_array.minmax) { return false; diff --git a/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc b/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc index 2ab6692a3a8..08894c93a5b 100644 --- a/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc +++ b/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc @@ -199,7 +199,7 @@ std::vector>::iterator FindOperator( shape_array.data_type = ArrayDataType::kInt32; auto& shape_buffer = shape_array.GetMutableBuffer(); // This is what imagined as the original shape. - for (int i = 0; i < imagined_original_shape.size(); ++i) { + for (size_t i = 0; i < imagined_original_shape.size(); ++i) { shape_buffer.data.push_back(imagined_original_shape.at(i)); } diff --git a/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc b/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc index 80170fe8bcb..a76ae1a0635 100644 --- a/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc @@ -70,7 +70,7 @@ std::vector ReshapeToTranspose(const Model& model, std::vector not_one_indices; // Separate into one indices and not one indices. - for (int i = 0; i < in_shape.size(); i++) { + for (size_t i = 0; i < in_shape.size(); i++) { if (in_shape[i] == 1) { one_indices.push_back(i); } else { @@ -167,7 +167,7 @@ std::vector ReshapeToTranspose(const Model& model, // Combine the permutations. const auto& transpose_perm = transpose_op->perm; - for (int i = 0; i < merged_perm.size(); i++) { + for (size_t i = 0; i < merged_perm.size(); i++) { merged_perm[i] = transpose_perm[merged_perm[i]]; } diff --git a/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc b/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc index 49d59de860b..2f316934311 100644 --- a/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc +++ b/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc @@ -170,7 +170,7 @@ void SetDataTypeForAllOutputs(Model* model, Operator* op, if (unsupported_op->output_data_types.size() < op->outputs.size()) { return ::tensorflow::Status::OK(); } - for (int i = 0; i < op->outputs.size(); ++i) { + for (size_t i = 0; i < op->outputs.size(); ++i) { const string& output = op->outputs[i]; const ArrayDataType data_type = unsupported_op->output_data_types[i]; model->GetArray(output).data_type = data_type; diff --git a/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc b/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc index 1ed618879c1..94779f54af2 100644 --- a/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc +++ b/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc @@ -149,7 +149,7 @@ bool RecursivelyBackwardPropagateDataType(GraphTransformation* transformation, ArrayDataType new_data_type, const MinMax& new_minmax) { bool did_change = false; - for (int input_index = 0; input_index < op->inputs.size(); ++input_index) { + for (size_t input_index = 0; input_index < op->inputs.size(); ++input_index) { const auto& input = op->inputs[input_index]; auto& input_array = model->GetArray(input); diff --git a/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc index 006e624eb7a..520cd8b495a 100644 --- a/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc +++ b/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc @@ -431,7 +431,7 @@ void ProcessTensorFlowReshapeOperator(Model* model, bool has_wildcard = false; int wildcard_index = 0; int product_non_wildcard_dims = 1; - for (int i = 0; i < shape_data.size(); i++) { + for (size_t i = 0; i < shape_data.size(); i++) { if (shape_data[i] == -1) { CHECK(!has_wildcard); has_wildcard = true; @@ -574,7 +574,7 @@ void ProcessTensorFlowReductionOperator(Model* model, Operator* op) { std::set true_indices; const auto& reduction_indices = reduction_indices_array.GetBuffer().data; - for (int i = 0; i < reduction_indices.size(); ++i) { + for (size_t i = 0; i < reduction_indices.size(); ++i) { const int32 reduction_index = reduction_indices[i]; if (reduction_index < -input_rank || reduction_index >= input_rank) { CHECK(false) << "Invalid reduction dimension " << reduction_index @@ -627,7 +627,7 @@ void ProcessSliceOperator(Model* model, SliceOperator* op) { CHECK_EQ(op->begin.size(), op->size.size()); std::vector output_dims; - for (int i = 0; i < op->begin.size(); ++i) { + for (size_t i = 0; i < op->begin.size(); ++i) { int size = op->size[i]; if (size == -1) { size = input_array.shape().dims(i) - op->begin[i]; @@ -883,7 +883,7 @@ void ProcessTensorFlowSplitVOperator(Model* model, CHECK_EQ(op->outputs.size(), op->num_split); - for (int i = 0; i < op->outputs.size(); ++i) { + for (size_t i = 0; i < op->outputs.size(); ++i) { const auto& output = op->outputs[i]; Shape output_shape = input_shape; (*output_shape.mutable_dims())[axis] = size_splits_vector.at(i); @@ -1514,7 +1514,7 @@ void ProcessPadOperator(Model* model, PadOperator* op) { std::vector& dims = *output_shape.mutable_dims(); CHECK_EQ(op->left_padding.size(), dims.size()); - for (int i = 0; i < op->left_padding.size(); ++i) { + for (size_t i = 0; i < op->left_padding.size(); ++i) { dims[i] += op->left_padding[i] + op->right_padding[i]; } @@ -1540,7 +1540,7 @@ void ProcessPadV2Operator(Model* model, PadV2Operator* op) { std::vector& dims = *output_shape.mutable_dims(); CHECK_EQ(op->left_padding.size(), dims.size()); - for (int i = 0; i < op->left_padding.size(); ++i) { + for (size_t i = 0; i < op->left_padding.size(); ++i) { dims[i] += op->left_padding[i] + op->right_padding[i]; } @@ -1683,7 +1683,7 @@ void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) { CHECK_LE(op->strides.size(), num_input_axes) << "StridedSlice op with output \"" << op->outputs[0] << "\", requires no more than " << num_input_axes << " strides"; - for (int i = 0; i < op->strides.size(); i++) { + for (size_t i = 0; i < op->strides.size(); i++) { CHECK_NE(op->strides[i], 0) << "Strides must be non-zero. Axis " << i << " has stride=" << op->strides[i] << "."; } @@ -1814,7 +1814,7 @@ void ProcessTransposeOperator(Model* model, TransposeOperator* op) { << "Transpose permutation input " << op->inputs[1] << " must be same length as input dimensions"; std::vector* output_dims = output_array.mutable_shape()->mutable_dims(); - for (int i = 0; i < perm.size(); i++) { + for (size_t i = 0; i < perm.size(); i++) { int axis = perm[i]; CHECK_GE(axis, 0); CHECK_LT(axis, input_shape.dimensions_count()); @@ -1856,8 +1856,8 @@ void ProcessArgMinMaxOperator(Model* model, Op* op) { std::vector output_dims; output_dims.reserve(input_dims.size() - 1); - for (int i = 0; i < input_dims.size(); ++i) { - if (i != axis) { + for (size_t i = 0; i < input_dims.size(); ++i) { + if ( static_cast(i) != axis) { output_dims.push_back(input_dims[i]); } } @@ -1938,7 +1938,7 @@ void ProcessTileOperator(Model* model, TensorFlowTileOperator* op) { auto* mutable_dims = output_array.mutable_shape()->mutable_dims(); mutable_dims->resize(multiples.size()); - for (int i = 0; i < mutable_dims->size(); ++i) { + for (size_t i = 0; i < mutable_dims->size(); ++i) { (*mutable_dims)[i] = input_shape.dims(i) * multiples[i]; } } @@ -2010,8 +2010,8 @@ void ProcessUnpackOperator(Model* model, UnpackOperator* op) { std::vector output_dims; output_dims.reserve(input_dims.size() - 1); - for (int i = 0; i < input_dims.size(); ++i) { - if (i != op->axis) { + for (size_t i = 0; i < input_dims.size(); ++i) { + if ( static_cast(i) != op->axis) { output_dims.push_back(input_dims[i]); } } @@ -2399,7 +2399,7 @@ void ProcessScatterNdOperator(Model* model, ScatterNdOperator* op) { if (unsupported_op->output_shapes.size() < op->outputs.size()) { return ::tensorflow::Status::OK(); } - for (int i = 0; i < op->outputs.size(); ++i) { + for (size_t i = 0; i < op->outputs.size(); ++i) { const string& output = op->outputs[i]; model->GetArray(output).copy_shape(unsupported_op->output_shapes.at(i)); } diff --git a/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc b/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc index 6eccda04c18..1cb3a300127 100644 --- a/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc @@ -31,12 +31,12 @@ bool TransformsToIdentity(std::vector const& perm1, // perm1 is the order of the indices after first transpose. When perm1 is // reordered according to perm2, if the result is simple increasing sequence // i.e., range(0, perm1.size()), then the two transposes cancel each other. - for (int i = 0; i < perm1.size(); ++i) { - if (perm1[i] < 0 || perm1[i] >= perm1.size() || perm2[i] < 0 || - perm2[i] >= perm1.size()) { + for (size_t i = 0; i < perm1.size(); ++i) { + if (perm1[i] < 0 || perm1[i] >= static_cast(perm1.size()) || perm2[i] < 0 || + perm2[i] >= static_cast(perm1.size())) { return false; } - if (perm1[perm2[i]] != i) { + if (perm1[perm2[i]] != static_cast(i)) { return false; } } @@ -46,7 +46,7 @@ bool TransformsToIdentity(std::vector const& perm1, void ReplaceOpInputsWith(Model* model, const string& lookfor, const string& replacewith) { for (const auto& op : model->operators) { - for (int i = 0; i < op->inputs.size(); ++i) { + for (size_t i = 0; i < op->inputs.size(); ++i) { if (op->inputs[i] == lookfor) { op->inputs[i] = replacewith; } diff --git a/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc b/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc index bd529bd9ecd..eeb8751bf86 100644 --- a/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc +++ b/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc @@ -82,7 +82,7 @@ bool RemoveTrivialPassthroughOp(GraphTransformation* transformation, // We call 'main input' the unique nonconstant input array if there is one, // or else the 0-th input. int count_nonconstant_input_arrays = 0; - for (int i = 0; i < passthru_op->inputs.size(); i++) { + for (size_t i = 0; i < passthru_op->inputs.size(); i++) { if (!model->GetArray(passthru_op->inputs[i]).buffer) { count_nonconstant_input_arrays++; if (count_nonconstant_input_arrays == 1) { diff --git a/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc b/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc index 17a5e9a1d6a..38edff76d55 100644 --- a/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc +++ b/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc @@ -127,9 +127,9 @@ bool IsMoveOperator(OperatorType optype) { move_op->outputs[0] = output_name; } else { // The intermediate array is now the output array. - for (int i = 0; i < model->operators.size(); i++) { + for (size_t i = 0; i < model->operators.size(); i++) { Operator* consumer = model->operators[i].get(); - for (int j = 0; j < consumer->inputs.size(); j++) { + for (size_t j = 0; j < consumer->inputs.size(); j++) { if (consumer->inputs[j] == output_name) { consumer->inputs[j] = intermediate_name; } diff --git a/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc b/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc index 0fbcf9f73b1..b2d184cdc31 100644 --- a/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc @@ -60,7 +60,7 @@ std::vector ComputeNewPerm(std::vector input_dims, std::vector perm) { // These are the major axis of the input. std::vector input_indices; - for (int i = 0; i < input_dims.size(); i++) { + for (size_t i = 0; i < input_dims.size(); i++) { if (input_dims[i] != 1) { input_indices.push_back(i); } @@ -69,7 +69,7 @@ std::vector ComputeNewPerm(std::vector input_dims, // This maps which indices of the input produced the intermediate indices for // non-unary dimensions. std::unordered_map intermediate_to_input_indices_map; - for (int i = 0; i < intermediate_dims.size(); i++) { + for (size_t i = 0; i < intermediate_dims.size(); i++) { if (intermediate_dims[i] != 1) { intermediate_to_input_indices_map[i] = input_indices[intermediate_to_input_indices_map.size()]; @@ -80,14 +80,14 @@ std::vector ComputeNewPerm(std::vector input_dims, // major indices. std::vector new_perm; new_perm.reserve(input_dims.size()); - for (int i = 0; i < perm.size(); i++) { + for (size_t i = 0; i < perm.size(); i++) { if (intermediate_dims[perm[i]] == 1) continue; new_perm.push_back(intermediate_to_input_indices_map[perm[i]]); } // Fill the rest of the transpose in with the ones. - for (int index = 0; index < input_dims.size(); index++) { + for (size_t index = 0; index < input_dims.size(); index++) { if (input_dims[index] == 1) { new_perm.push_back(index); } @@ -193,9 +193,9 @@ std::vector ComputeNewPerm(std::vector input_dims, DeleteArrayIfUnused(intermediate_name, model); } else { // The intermediate array is now the output array. - for (int i = 0; i < model->operators.size(); i++) { + for (size_t i = 0; i < model->operators.size(); i++) { Operator* consumer = model->operators[i].get(); - for (int j = 0; j < consumer->inputs.size(); j++) { + for (size_t j = 0; j < consumer->inputs.size(); j++) { if (consumer->inputs[j] == output_name) { consumer->inputs[j] = intermediate_name; } diff --git a/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc b/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc index 6e5815ee94d..545c53fb31a 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc @@ -124,11 +124,11 @@ namespace toco { const auto& offset_float_data = offset_array.GetBuffer().data; - CHECK(mul_float_data.size() == buffer_size); - CHECK(add_float_data.size() == buffer_size); - CHECK(mean_float_data.size() == buffer_size); - CHECK(multiplier_float_data.size() == buffer_size); - CHECK(offset_float_data.size() == buffer_size); + CHECK(static_cast(mul_float_data.size()) == buffer_size); + CHECK(static_cast(add_float_data.size()) == buffer_size); + CHECK(static_cast(mean_float_data.size()) == buffer_size); + CHECK(static_cast(multiplier_float_data.size()) == buffer_size); + CHECK(static_cast(offset_float_data.size()) == buffer_size); for (int i = 0; i < buffer_size; i++) { mul_float_data[i] = multiplier_float_data[i]; diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc index 7c9aa025f64..20e805a29e0 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc @@ -64,7 +64,7 @@ void CopyTensorSegments(const std::vector& input_arrays, // Copy the data from input_arrays to concatenated_array_buffer. T* dest_ptr = concatenated_array_buffer.data(); for (int s = 0; s < total_copy_steps; s++) { - for (int i = 0; i < input_arrays.size(); i++) { + for (size_t i = 0; i < input_arrays.size(); i++) { std::copy(src_ptr[i], src_ptr[i] + array_copy_size[i], dest_ptr); src_ptr[i] += array_copy_size[i]; dest_ptr += array_copy_size[i]; diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc index 0df35509d3d..c6dc093ba00 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc @@ -36,7 +36,7 @@ void Pack(Model* model, PackOperator const& op) { // Pack inputs into buffer CHECK_EQ(op.axis, 0) << "Packing only supported along first axis"; int dst_offset = 0; - for (int i = 0; i < op.inputs.size(); i++) { + for (size_t i = 0; i < op.inputs.size(); i++) { // Append array data to output for each input array const auto& input_array = model->GetArray(op.inputs[i]); int input_size = RequiredBufferSizeForShape(input_array.shape()); diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc index fd71fb1873a..34a1a1ce899 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc @@ -50,7 +50,7 @@ bool Slice(SliceOperator const& op, Array const& input_array, CHECK_LE(size.size(), 4); std::vector begin = op.begin; std::vector end; - for (int i = 0; i < begin.size(); ++i) { + for (size_t i = 0; i < begin.size(); ++i) { int dim_size = size[i]; if (dim_size == -1) { // -1 means the rest of the dimension. diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc index 7ceffe6307e..a822f7b79e3 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc @@ -40,7 +40,7 @@ void Transpose(Model* model, const Array& input_array, CHECK(input_shape.dimensions_count() == output_shape.dimensions_count()); const int dim = input_shape.dimensions_count(); CHECK_LE(dim, 4); - CHECK(perm.size() >= dim); + CHECK(static_cast(perm.size()) >= dim); for (int i = 0; i < dim; i++) { CHECK(perm[i] >= 0 && perm[i] < dim); CHECK(input_shape.dims(perm[i]) == output_shape.dims(i)); diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc index 197e17eee16..4d6cd188729 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc @@ -62,7 +62,7 @@ void ReduceGeneric(bool keep_dims, const std::vector& axes, } std::vector output_indices(input_shape.dimensions_count()); - for (int input_offset = 0; input_offset < input.size(); ++input_offset) { + for (size_t input_offset = 0; input_offset < input.size(); ++input_offset) { std::vector input_indices = ReverseOffset(input_shape, input_offset); // Calculate the output location by squashing input indices to 0 // in reduced axes. @@ -319,7 +319,7 @@ bool CopyMinMaxFromFirstInput(const Operator& op, Model* model) { } else if (unary_op->type == OperatorType::kRelu6 || unary_op->type == OperatorType::kRelu1 || unary_op->type == OperatorType::kRelu) { - for (size_t i = 0; i < output_buffer_size; ++i) { + for (int i = 0; i < output_buffer_size; ++i) { const float value = (*input_float_data)[i]; float new_value = 0.0f; switch (unary_op->type) { diff --git a/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc b/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc index 1f7035c21e2..84d5922aae8 100644 --- a/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc +++ b/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc @@ -57,10 +57,10 @@ namespace toco { // Split up the DynamicStitch inputs into the indices and data. std::vector stitch_indices_inputs; std::vector stitch_data_inputs; - for (size_t i = 0; i < stitch_op->num_partitions; ++i) { + for (int i = 0; i < stitch_op->num_partitions; ++i) { stitch_indices_inputs.push_back(stitch_op->inputs[i]); } - for (size_t i = stitch_op->num_partitions; i < stitch_op->num_partitions * 2; + for (int i = stitch_op->num_partitions; i < stitch_op->num_partitions * 2; ++i) { stitch_data_inputs.push_back(stitch_op->inputs[i]); } diff --git a/tensorflow/lite/toco/model_cmdline_flags.cc b/tensorflow/lite/toco/model_cmdline_flags.cc index 2434481272f..351884fbf1e 100644 --- a/tensorflow/lite/toco/model_cmdline_flags.cc +++ b/tensorflow/lite/toco/model_cmdline_flags.cc @@ -263,7 +263,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector mean_values = absl::StrSplit(parsed_model_flags.mean_values.value(), ','); - QCHECK(mean_values.size() == model_flags->input_arrays_size()); + QCHECK(static_cast(mean_values.size()) == model_flags->input_arrays_size()); for (size_t i = 0; i < mean_values.size(); ++i) { char* last = nullptr; model_flags->mutable_input_arrays(i)->set_mean_value( @@ -280,7 +280,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector std_values = absl::StrSplit(parsed_model_flags.std_values.value(), ','); - QCHECK(std_values.size() == model_flags->input_arrays_size()); + QCHECK( static_cast(std_values.size()) == model_flags->input_arrays_size()); for (size_t i = 0; i < std_values.size(); ++i) { char* last = nullptr; model_flags->mutable_input_arrays(i)->set_std_value( @@ -298,7 +298,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector input_data_types = absl::StrSplit(parsed_model_flags.input_data_types.value(), ','); - QCHECK(input_data_types.size() == model_flags->input_arrays_size()); + QCHECK(static_cast(input_data_types.size()) == model_flags->input_arrays_size()); for (size_t i = 0; i < input_data_types.size(); ++i) { IODataType type; QCHECK(IODataType_Parse(input_data_types[i], &type)); @@ -321,7 +321,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector input_shapes = absl::StrSplit(parsed_model_flags.input_shapes.value(), ':'); - QCHECK(input_shapes.size() == model_flags->input_arrays_size()); + QCHECK(static_cast(input_shapes.size()) == model_flags->input_arrays_size()); for (size_t i = 0; i < input_shapes.size(); ++i) { auto* shape = model_flags->mutable_input_arrays(i)->mutable_shape(); shape->clear_dims(); diff --git a/tensorflow/lite/toco/toco_cmdline_flags.cc b/tensorflow/lite/toco/toco_cmdline_flags.cc index c133db8f2a4..9697a1ecbbd 100644 --- a/tensorflow/lite/toco/toco_cmdline_flags.cc +++ b/tensorflow/lite/toco/toco_cmdline_flags.cc @@ -320,7 +320,7 @@ void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags, std::vector input_types = absl::StrSplit(parsed_toco_flags.input_types.value(), ','); QCHECK(!input_types.empty()); - for (int i = 1; i < input_types.size(); i++) { + for (size_t i = 1; i < input_types.size(); i++) { QCHECK_EQ(input_types[i], input_types[0]); } toco::IODataType input_type; From ec0217f4ac194741cf6c566f01747a5eb771edc6 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 11:29:41 -0400 Subject: [PATCH 02/19] Update quantization_config.cc --- .../compiler/mlir/lite/quantization/quantization_config.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc index c4cf6e71cf3..634d212409e 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc @@ -60,7 +60,7 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_maxs; if (!max_values.empty()) { std::vector node_maxs_str = absl::StrSplit(max_values, ','); - for (size_t i = 0; i < node_maxs_str.size(); i++) { + for (int i : llvm::seq(node_maxs_str.size())) { double value; if (!absl::SimpleAtod(node_maxs_str[i], &value)) { llvm::errs() << "Unexpected mins: " << node_maxs_str[i] << "\n"; From 02090cac6a7f1958920155522d912de0b2769301 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 12:20:43 -0400 Subject: [PATCH 03/19] Update import_quant_stats_pass.cc --- .../compiler/mlir/lite/quantization/import_quant_stats_pass.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc index 5419a0d5e1b..e00a088c38c 100644 --- a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc +++ b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc @@ -158,7 +158,7 @@ void ImportQuantStatsPass::ImportAsStatsOps(OpBuilder b, Operation *op, InsertStatsOpAtResult(b, op->getResult(index), layer_stats, axis_stats, axis); } else { - for (int i = 0; i < static_cast(op->getNumResults()); ++i) { + for (int i = 0, e = op->getNumResults(); i < e; ++i) { if (IsQuantizableResult(op, i)) { InsertStatsOpAtResult(b, op->getResult(i), layer_stats, axis_stats, axis); From 0052918f2b9332e7eabe4b2ababbbdb464889cec Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 12:23:40 -0400 Subject: [PATCH 04/19] Update quantization_config.cc --- .../compiler/mlir/lite/quantization/quantization_config.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc index 634d212409e..b299fa8f4c2 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc @@ -48,7 +48,7 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_mins; if (!min_values.empty()) { std::vector node_mins_str = absl::StrSplit(min_values, ','); - for (size_t i = 0; i < node_mins_str.size(); i++) { + for (int i : llvm::seq(node_mins_str.size())) { double value; if (!absl::SimpleAtod(node_mins_str[i], &value)) { return true; From 09f45f4f5edba341b21c89266431c7dd9e950af8 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 12:23:47 -0400 Subject: [PATCH 05/19] Update quantization_config.cc From 2f515039033a27b6253ae788e79c7ead32265007 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 14:36:49 -0400 Subject: [PATCH 06/19] Update quantization_driver.cc --- .../compiler/mlir/lite/quantization/quantization_driver.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc index fc11604ef8a..a9f4eb78431 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc @@ -294,7 +294,7 @@ class QuantizationDriver { return; if (current_op == op) llvm::errs() << "===>>>"; llvm::errs() << op->getName() << " : ("; - for (size_t i = 0; i < op->getNumOperands(); ++i) { + for (int i = 0, e = op->getNumOperands(); i < e; ++i) { if (auto params = GetOperandQuantState(op, i).params) params.print(llvm::errs()); else @@ -303,7 +303,7 @@ class QuantizationDriver { llvm::errs() << ","; } llvm::errs() << ") -> ("; - for (size_t i = 0; i < op->getNumResults(); ++i) { + for (int i = 0, e = op->getNumResults(); i < e; ++i) { if (auto params = GetResultQuantState(op, i).params) params.print(llvm::errs()); else From 5ac4a4a3ea522b2d0b6b7fa0058e7bc1bb5ba6d3 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 14:47:27 -0400 Subject: [PATCH 07/19] Update quantization_utils.cc --- .../compiler/mlir/lite/quantization/quantization_utils.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc index b9ca5329519..57b24eb8772 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc @@ -368,7 +368,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias( scales[index_scale.index()] *= index_scale.value(); } } else if (auto type = op_type.dyn_cast()) { - for (size_t index = 0; index != axis_size; ++index) { + for (int index = 0, e = axis_size; index != e; ++index) { scales[index] *= type.getScale(); } } From 5394d892605fd158ad6a9c366e88e95b675a0227 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 14:48:54 -0400 Subject: [PATCH 08/19] Update dump_mlir_util.cc --- tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc b/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc index b5a6c922707..febf2bc096d 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc @@ -41,7 +41,7 @@ std::string MakeUniqueFilename(string name) { static NameCounts& instance = *new NameCounts; // Remove illegal characters from `name`. - for (size_t i = 0; i < name.size(); ++i) { + for (int i = 0, e = name.size(); i < e; ++i) { char ch = name[i]; if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' || ch == '\\') { From 1bccc9e1959fca38ce94a3cb1cfe7be1b6d4050c Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 14:52:29 -0400 Subject: [PATCH 09/19] Update chlo_ops.cc --- tensorflow/compiler/mlir/xla/ir/chlo_ops.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc b/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc index f5b895f0c76..3408f3ed0cc 100644 --- a/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc +++ b/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc @@ -49,7 +49,7 @@ static Type GetBroadcastType(Type x, Type y, Type element_type, if (shape_x.size() == shape_y.size()) { llvm::SmallVector out_shape(shape_x.size()); - for (size_t i = 0; i < shape_x.size(); i++) { + for (int i = 0, e = shape_x.size(); i < e; i++) { auto x_val = shape_x[i]; auto y_val = shape_y[i]; if (x_val == -1 || y_val == -1) { From 340053608bd1ac4168dc1be35e019cc1ac9d595a Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 14:58:56 -0400 Subject: [PATCH 10/19] Update hlo_ops.cc --- tensorflow/compiler/mlir/xla/ir/hlo_ops.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc index 569e45912a2..7f313b56925 100644 --- a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc +++ b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc @@ -143,7 +143,7 @@ DenseIntElementsAttr BuildConvPaddingAttrs( int rank = padding_low.size(); SmallVector padding; - for (unsigned i = 0; i < static_cast(rank); ++i) { + for (unsigned i = 0, e = rank; i < e; ++i) { padding.push_back(GetPaddingValue(padding_attr, {i, 0}) + padding_low[i]); padding.push_back(GetPaddingValue(padding_attr, {i, 1}) + padding_high[i]); } @@ -853,7 +853,7 @@ static Attribute foldConcatenateHelper(ConcatenateOp* op, auto shape = type.getShape(); size_t top_size = 1; - for (size_t i = 0; i < axis; i++) { + for (int i = 0, e = axis; i < e; i++) { top_size = top_size * shape[i]; } @@ -1118,7 +1118,7 @@ static LogicalResult Verify(MapOp op) { // increasing. auto values = op.dimensions().getValues(); auto dimensions = std::vector{values.begin(), values.end()}; - for (int i = 0; static_cast(i) < dimensions.size(); ++i) { + for (int i = 0, e = dimensions.size(); i < e; ++i) { if (dimensions[i] != i) return op.emitOpError() << "requires monotonically increasing dimension " "numbers, but got: " From 9f535c3290cc3a8bdc503a4c48d3b0640b9f4798 Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Sun, 14 Jun 2020 15:22:51 -0400 Subject: [PATCH 11/19] Update quantization_config.cc --- .../mlir/lite/quantization/quantization_config.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc index b299fa8f4c2..cdff93502f2 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc @@ -48,9 +48,9 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_mins; if (!min_values.empty()) { std::vector node_mins_str = absl::StrSplit(min_values, ','); - for (int i : llvm::seq(node_mins_str.size())) { + for (const std::string&node_min : node_mins_str.size()) { double value; - if (!absl::SimpleAtod(node_mins_str[i], &value)) { + if (!absl::SimpleAtod(node_min, &value)) { return true; } node_mins.push_back(value); @@ -60,9 +60,9 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_maxs; if (!max_values.empty()) { std::vector node_maxs_str = absl::StrSplit(max_values, ','); - for (int i : llvm::seq(node_maxs_str.size())) { + for (const std::string&node_max : node_maxs_str.size()) { double value; - if (!absl::SimpleAtod(node_maxs_str[i], &value)) { + if (!absl::SimpleAtod(node_max, &value)) { llvm::errs() << "Unexpected mins: " << node_maxs_str[i] << "\n"; return true; } From 7f3de617db0c4442ac0877cbdcf6261bbe734087 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Mon, 15 Jun 2020 21:18:56 +0000 Subject: [PATCH 12/19] segrating changes 2 --- tensorflow/core/framework/tensor_shape.cc | 2 +- tensorflow/core/kernels/batch_kernels.cc | 6 ++-- .../core/kernels/data/prefetch_autotuner.cc | 4 +-- tensorflow/core/kernels/quantization_utils.h | 2 +- tensorflow/core/lib/io/inputbuffer.cc | 6 ++-- tensorflow/core/lib/io/random_inputstream.cc | 2 +- .../core/lib/io/snappy/snappy_inputbuffer.cc | 2 +- .../core/lib/io/snappy/snappy_outputbuffer.cc | 6 ++-- tensorflow/core/lib/io/zlib_outputbuffer.cc | 6 ++-- tensorflow/core/platform/env.cc | 2 +- tensorflow/core/platform/file_system.cc | 2 +- .../core/platform/file_system_helper.cc | 2 +- tensorflow/core/platform/s3/s3_file_system.cc | 2 +- tensorflow/core/platform/status.cc | 4 +-- .../profiler/internal/parse_annotation.cc | 2 +- .../core/profiler/utils/derived_timeline.cc | 2 +- .../core/profiler/utils/derived_timeline.h | 2 +- .../core/profiler/utils/xplane_utils.cc | 2 +- tensorflow/core/util/bcast.h | 4 +-- .../convert_trivial_tile_to_concat.cc | 2 +- .../convert_trivial_transpose_to_reshape.cc | 2 +- .../toco/graph_transformations/dequantize.cc | 2 +- .../graph_transformations/drop_fake_quant.cc | 2 +- ...int8_weights_safe_for_fast_int8_kernels.cc | 2 +- .../fuse_broadcast_into_following_binary.cc | 2 +- .../group_bidirectional_sequence_ops.cc | 4 +-- .../graph_transformations/hardcode_min_max.cc | 2 +- .../identify_nearest_upsample.cc | 2 +- .../merge_reshape_into_preceding_transpose.cc | 4 +-- .../propagate_array_data_types.cc | 2 +- .../propagate_fake_quant_num_bits.cc | 2 +- .../propagate_fixed_sizes.cc | 28 +++++++++---------- .../remove_successive_transpose.cc | 10 +++---- .../remove_trivial_passthrough.cc | 2 +- .../reorder_elementwise_unary.cc | 4 +-- .../reorder_reshape_transpose.cc | 12 ++++---- .../resolve_batch_normalization.cc | 10 +++---- .../resolve_constant_concatenation.cc | 2 +- .../resolve_constant_pack.cc | 2 +- .../resolve_constant_slice.cc | 2 +- .../resolve_constant_transpose.cc | 2 +- .../resolve_constant_unary.cc | 4 +-- .../unpartition_embedding_lookup.cc | 4 +-- tensorflow/lite/toco/model_cmdline_flags.cc | 8 +++--- tensorflow/lite/toco/toco_cmdline_flags.cc | 2 +- 45 files changed, 89 insertions(+), 91 deletions(-) diff --git a/tensorflow/core/framework/tensor_shape.cc b/tensorflow/core/framework/tensor_shape.cc index 79d0cc0822d..f4b440e9cd1 100644 --- a/tensorflow/core/framework/tensor_shape.cc +++ b/tensorflow/core/framework/tensor_shape.cc @@ -187,7 +187,7 @@ void TensorShapeBase::InitDims(gtl::ArraySlice dim_sizes) { "bad overflow check"); bool large_size = false; for (auto s : dim_sizes) { - if (static_cast(s) > static_cast(kMaxSmall)) { + if (s > kMaxSmall) { large_size = true; break; } diff --git a/tensorflow/core/kernels/batch_kernels.cc b/tensorflow/core/kernels/batch_kernels.cc index ee271f1a123..151f2367c95 100644 --- a/tensorflow/core/kernels/batch_kernels.cc +++ b/tensorflow/core/kernels/batch_kernels.cc @@ -486,18 +486,18 @@ class BatchResource : public ResourceBase { std::map> split_tensors; DCHECK_EQ(batch->task(0).context->num_outputs(), combined_outputs.size()); - if (static_cast(combined_outputs.size()) != batch->task(0).context->num_outputs()) { + if (combined_outputs.size() != batch->task(0).context->num_outputs()) { return errors::Internal("Wrong number of batched output tensors"); } // Generate 'split_tensors' and populate the context outputs. - for (size_t i = 0; i < combined_outputs.size(); ++i) { + for (int i = 0; i < combined_outputs.size(); ++i) { const Tensor& output_tensor = combined_outputs[i]; if (output_tensor.shape().dims() == 0) { return errors::FailedPrecondition( "Batched output tensor has 0 dimensions"); } - if (output_tensor.shape().dim_size(0) != static_cast(batch->size() + padding_size)) { + if (output_tensor.shape().dim_size(0) != batch->size() + padding_size) { return errors::FailedPrecondition( "Batched output tensor's 0th dimension does not equal the sum of " "the 0th dimension sizes of the input tensors"); diff --git a/tensorflow/core/kernels/data/prefetch_autotuner.cc b/tensorflow/core/kernels/data/prefetch_autotuner.cc index a3fd9919d6b..a3bb1acc352 100644 --- a/tensorflow/core/kernels/data/prefetch_autotuner.cc +++ b/tensorflow/core/kernels/data/prefetch_autotuner.cc @@ -40,13 +40,13 @@ void PrefetchAutotuner::RecordConsumption(size_t current_buffer_size) { case Mode::kDisabled: return; case Mode::kUpswing: - if (static_cast(current_buffer_size) == buffer_limit_) { + if (current_buffer_size == buffer_limit_) { mode_ = Mode::kDownswing; } return; case Mode::kDownswing: if (current_buffer_size == 0) { - if (buffer_limit_ >= static_cast(kBufferLimitThreshold)) { + if (buffer_limit_ >= kBufferLimitThreshold) { buffer_limit_ += kBufferLimitThreshold; } else { buffer_limit_ *= 2; diff --git a/tensorflow/core/kernels/quantization_utils.h b/tensorflow/core/kernels/quantization_utils.h index 06c901967b0..315616f3fb3 100644 --- a/tensorflow/core/kernels/quantization_utils.h +++ b/tensorflow/core/kernels/quantization_utils.h @@ -268,7 +268,7 @@ inline void RequantizeManyInNewRangeReference(const qint32* input, int64 count, // that could be easily adapted for a SIMD implementation. It should also be // possible to perform all the calculations in 32-bit rather than 64, but // that's not been implemented yet. - for (size_t index = 0; static_cast(index) < count; ++index) { + for (size_t index = 0; index < count; ++index) { const int64 input_value = static_cast(input[index]); const int64 fp_value = ((input_value * range_scale_fp) >> 32) + input_offset_fp; diff --git a/tensorflow/core/lib/io/inputbuffer.cc b/tensorflow/core/lib/io/inputbuffer.cc index d005ee11d78..2b138b825e4 100644 --- a/tensorflow/core/lib/io/inputbuffer.cc +++ b/tensorflow/core/lib/io/inputbuffer.cc @@ -85,7 +85,7 @@ Status InputBuffer::ReadNBytes(int64 bytes_to_read, string* result) { result->resize(bytes_to_read); size_t bytes_read = 0; Status status = ReadNBytes(bytes_to_read, &(*result)[0], &bytes_read); - if (static_cast(bytes_read) < bytes_to_read) result->resize(bytes_read); + if (bytes_read < bytes_to_read) result->resize(bytes_read); return status; } @@ -204,7 +204,7 @@ Status InputBuffer::Hint(int64 bytes_to_read) { } // The internal buffer is too small. Do nothing. - if (bytes_to_read > static_cast(size_)) { + if (bytes_to_read > size_) { return Status::OK(); } @@ -230,7 +230,7 @@ Status InputBuffer::Hint(int64 bytes_to_read) { limit_ += data.size(); file_pos_ += data.size(); - if (errors::IsOutOfRange(s) && data.size() == static_cast(bytes_to_read)) { + if (errors::IsOutOfRange(s) && data.size() == bytes_to_read) { return Status::OK(); } else { return s; diff --git a/tensorflow/core/lib/io/random_inputstream.cc b/tensorflow/core/lib/io/random_inputstream.cc index bd0054ce753..10f734a5bae 100644 --- a/tensorflow/core/lib/io/random_inputstream.cc +++ b/tensorflow/core/lib/io/random_inputstream.cc @@ -92,7 +92,7 @@ Status RandomAccessInputStream::SkipNBytes(int64 bytes_to_skip) { } else { return s; } - if (data.size() < static_cast(bytes_to_read)) { + if (data.size() < bytes_to_read) { return errors::OutOfRange("reached end of file"); } bytes_to_skip -= bytes_to_read; diff --git a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc index 53939f2d8a3..a331d4173cf 100644 --- a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc @@ -134,7 +134,7 @@ Status SnappyInputBuffer::ReadCompressedBlockLength(uint32* length) { } size_t readable = std::min(bytes_to_read, avail_in_); - for (size_t i = 0; i < readable; i++) { + for (int i = 0; i < readable; i++) { // The "unsigned char" type cast is intentional to avoid implicit type // casting of the signed char to unsigned int during bitwise OR which // causes weird overflow errors. diff --git a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc index fe3a53c6c25..563503a1319 100644 --- a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc @@ -76,7 +76,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { // If there is sufficient free space in input_buffer_ to fit data we // add it there and return. - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -87,7 +87,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered()); // input_buffer_ should be empty at this point. - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -144,7 +144,7 @@ void SnappyOutputBuffer::AddToInputBuffer(StringPiece data) { const int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (static_cast(bytes_to_write) > free_tail_bytes) { + if (bytes_to_write > free_tail_bytes) { memmove(input_buffer_.get(), next_in_, avail_in_); next_in_ = input_buffer_.get(); } diff --git a/tensorflow/core/lib/io/zlib_outputbuffer.cc b/tensorflow/core/lib/io/zlib_outputbuffer.cc index d475d0eaa5c..5840ca60242 100644 --- a/tensorflow/core/lib/io/zlib_outputbuffer.cc +++ b/tensorflow/core/lib/io/zlib_outputbuffer.cc @@ -98,7 +98,7 @@ void ZlibOutputBuffer::AddToInputBuffer(StringPiece data) { int32 unread_bytes = z_stream_->avail_in; int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (static_cast(bytes_to_write) > free_tail_bytes) { + if (bytes_to_write > free_tail_bytes) { memmove(z_stream_input_.get(), z_stream_->next_in, z_stream_->avail_in); z_stream_->next_in = z_stream_input_.get(); } @@ -154,7 +154,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { size_t bytes_to_write = data.size(); - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -162,7 +162,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered(zlib_options_.flush_mode)); // At this point input stream should be empty. - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc index 05d95ba0425..b29cad05459 100644 --- a/tensorflow/core/platform/env.cc +++ b/tensorflow/core/platform/env.cc @@ -214,7 +214,7 @@ bool Env::FilesExist(const std::vector& files, } if (fs_status) { result &= fs_result; - for (size_t i = 0; i < itr.second.size(); ++i) { + for (int i = 0; i < itr.second.size(); ++i) { per_file_status[itr.second[i]] = fs_status->at(i); } } else if (!fs_result) { diff --git a/tensorflow/core/platform/file_system.cc b/tensorflow/core/platform/file_system.cc index c9657e2339f..9e96ceedbdc 100644 --- a/tensorflow/core/platform/file_system.cc +++ b/tensorflow/core/platform/file_system.cc @@ -308,7 +308,7 @@ StringPiece FileSystem::Basename(StringPiece path) const { StringPiece FileSystem::Extension(StringPiece path) const { StringPiece basename = this->Basename(path); - size_t pos = basename.rfind('.'); + int pos = basename.rfind('.'); if (pos == StringPiece::npos) { return StringPiece(path.data() + path.size(), 0); } else { diff --git a/tensorflow/core/platform/file_system_helper.cc b/tensorflow/core/platform/file_system_helper.cc index 909752389e1..64b175c4d17 100644 --- a/tensorflow/core/platform/file_system_helper.cc +++ b/tensorflow/core/platform/file_system_helper.cc @@ -103,7 +103,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, children_dir_status[i] = fs->IsDirectory(child_path); } }); - for (size_t i = 0; i < children.size(); ++i) { + for (int i = 0; i < children.size(); ++i) { const string child_path = io::JoinPath(current_dir, children[i]); // If the IsDirectory call was cancelled we bail. if (children_dir_status[i].code() == tensorflow::error::CANCELLED) { diff --git a/tensorflow/core/platform/s3/s3_file_system.cc b/tensorflow/core/platform/s3/s3_file_system.cc index 45d648abcc0..1726c9fbc6c 100644 --- a/tensorflow/core/platform/s3/s3_file_system.cc +++ b/tensorflow/core/platform/s3/s3_file_system.cc @@ -906,7 +906,7 @@ Status S3FileSystem::MultiPartCopy(const Aws::String& source, // wait on the mutex until notify is called // then check the finished parts as there could be false notifications multi_part_copy_cv.wait(lock, [&finishedPartStates, num_parts] { - return static_cast(finishedPartStates.size()) == num_parts; + return finishedPartStates.size() == num_parts; }); } // check if there was any error for any part diff --git a/tensorflow/core/platform/status.cc b/tensorflow/core/platform/status.cc index e303c18091c..756b8314148 100644 --- a/tensorflow/core/platform/status.cc +++ b/tensorflow/core/platform/status.cc @@ -74,9 +74,7 @@ class StatusLogSink : public TFLogSink { mutex_lock lock(mu_); messages_.emplace_back(entry.ToString()); - if (messages_.size() > static_cast(num_messages_)){ - messages_.pop_front(); - } + if (messages_.size() > num_messages_) messages_.pop_front(); } private: diff --git a/tensorflow/core/profiler/internal/parse_annotation.cc b/tensorflow/core/profiler/internal/parse_annotation.cc index a4cdc09739d..32c26befa3d 100644 --- a/tensorflow/core/profiler/internal/parse_annotation.cc +++ b/tensorflow/core/profiler/internal/parse_annotation.cc @@ -50,7 +50,7 @@ std::vector SplitNameAndMetadata( std::vector SplitPairs(absl::string_view metadata) { std::vector key_value_pairs; std::stack quotes; - size_t start = 0, end = 0; + int start = 0, end = 0; for (; end < metadata.size(); ++end) { char ch = metadata[end]; switch (ch) { diff --git a/tensorflow/core/profiler/utils/derived_timeline.cc b/tensorflow/core/profiler/utils/derived_timeline.cc index 3d03fc22c16..112c0977763 100644 --- a/tensorflow/core/profiler/utils/derived_timeline.cc +++ b/tensorflow/core/profiler/utils/derived_timeline.cc @@ -130,7 +130,7 @@ void DerivedXLineBuilder::ExpandOrAddLevelEvent(const XEvent& event, } void DerivedXLineBuilder::ResetLastEvents(int level) { - for (int i = level; i < static_cast(last_event_by_level_.size()); ++i) { + for (int i = level; i < last_event_by_level_.size(); ++i) { last_event_by_level_[i] = absl::nullopt; } if (level == 0) ResetDependentLines(); diff --git a/tensorflow/core/profiler/utils/derived_timeline.h b/tensorflow/core/profiler/utils/derived_timeline.h index 92489399b8f..cd4da7996c5 100644 --- a/tensorflow/core/profiler/utils/derived_timeline.h +++ b/tensorflow/core/profiler/utils/derived_timeline.h @@ -37,7 +37,7 @@ class DerivedXLineBuilder { std::vector dependent_lines); void ExpandOrAddEvents(const std::vector& event_per_level) { - for (size_t level = 0; level < event_per_level.size(); ++level) { + for (int level = 0; level < event_per_level.size(); ++level) { ExpandOrAddLevelEvent(event_per_level[level], level); } } diff --git a/tensorflow/core/profiler/utils/xplane_utils.cc b/tensorflow/core/profiler/utils/xplane_utils.cc index 1fe476ce79c..7f5221c5391 100644 --- a/tensorflow/core/profiler/utils/xplane_utils.cc +++ b/tensorflow/core/profiler/utils/xplane_utils.cc @@ -266,7 +266,7 @@ void SortXSpace(XSpace* space) { // smaller than these value. void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) { for (XLine& line : *plane->mutable_lines()) { - if (line.timestamp_ns() >= static_cast(start_time_ns)) { + if (line.timestamp_ns() >= start_time_ns) { line.set_timestamp_ns(line.timestamp_ns() - start_time_ns); } } diff --git a/tensorflow/core/util/bcast.h b/tensorflow/core/util/bcast.h index 075de84964e..7bb8ea18ad3 100644 --- a/tensorflow/core/util/bcast.h +++ b/tensorflow/core/util/bcast.h @@ -139,7 +139,7 @@ BCastList::BCastList(const BCastList::Vec (&x)[N], if (x[i] != x[0]) { all_equal = false; } - if (static_cast(x[i].size()) > largest_rank) { + if (x[i].size() > largest_rank) { largest_rank = x[i].size(); } } @@ -176,7 +176,7 @@ BCastList::BCastList(const BCastList::Vec (&x)[N], // 1-extend and align all vectors. for (int i = 0; i < N; ++i) { - if (static_cast(copy[i].size()) < largest_rank) { + if (copy[i].size() < largest_rank) { copy[i].resize(largest_rank, 1); } } diff --git a/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc b/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc index c19ccf676c9..46288d2a1ed 100644 --- a/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc +++ b/tensorflow/lite/toco/graph_transformations/convert_trivial_tile_to_concat.cc @@ -52,7 +52,7 @@ namespace toco { // It then just becomes a concat along that dimension. int non_one_dims = 0; int concat_axis = 0; - for (size_t i = 0; i < multiples.size(); ++i) { + for (int i = 0; i < multiples.size(); ++i) { if (multiples[i] != 1) { ++non_one_dims; concat_axis = i; diff --git a/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc b/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc index fa8a69a1e7a..2b5aaea2b23 100644 --- a/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc +++ b/tensorflow/lite/toco/graph_transformations/convert_trivial_transpose_to_reshape.cc @@ -31,7 +31,7 @@ bool TransposeAffectsMemoryOrder(std::vector perm, // just the shape) then the flat buffer representation shouldn't change. std::vector old_major_index_ordering; std::vector new_major_index_ordering; - for (int i = 0; static_cast(i) < in_shape.size(); i++) { + for (int i = 0; i < in_shape.size(); i++) { if (in_shape[i] != 1) { old_major_index_ordering.push_back(i); } diff --git a/tensorflow/lite/toco/graph_transformations/dequantize.cc b/tensorflow/lite/toco/graph_transformations/dequantize.cc index c87c305a70d..cc5dddbb40e 100644 --- a/tensorflow/lite/toco/graph_transformations/dequantize.cc +++ b/tensorflow/lite/toco/graph_transformations/dequantize.cc @@ -35,7 +35,7 @@ void DequantizeBuffer(Array* array) { auto& new_data = array->GetMutableBuffer().data; new_data.resize(old_data.size()); const auto& qparams = array->GetQuantizationParams(); - for (size_t i = 0; i < old_data.size(); i++) { + for (int i = 0; i < old_data.size(); i++) { new_data[i] = qparams.scale * (old_data[i] - qparams.zero_point); } } diff --git a/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc b/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc index 3a0b4d0103f..bb8679bced8 100644 --- a/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc +++ b/tensorflow/lite/toco/graph_transformations/drop_fake_quant.cc @@ -45,7 +45,7 @@ namespace toco { } // Drop min/max inputs - for (size_t i = 1; i < fakequant_op->inputs.size(); i++) { + for (int i = 1; i < fakequant_op->inputs.size(); i++) { if (CountOpsWithInput(*model, fakequant_op->inputs[i]) == 1) { model->EraseArray(fakequant_op->inputs[i]); } diff --git a/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc b/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc index ce4574cdfbf..918bb489995 100644 --- a/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc +++ b/tensorflow/lite/toco/graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc @@ -166,7 +166,7 @@ namespace toco { int index_of_previous_bad_value = 0; bool changed = false; - for (size_t i = 0; i < buffer_data.size(); i++) { + for (int i = 0; i < buffer_data.size(); i++) { if (buffer_data[i] == 0) { count_bad++; if (count_bad > 1) { diff --git a/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc b/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc index 2c5c2cbb5f1..ba3e277f676 100644 --- a/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc +++ b/tensorflow/lite/toco/graph_transformations/fuse_broadcast_into_following_binary.cc @@ -34,7 +34,7 @@ bool IsBroadcastingOp(const Model& model, Operator* op) { // Concatenation of identical inputs is usually a broadcast. if (op->type == OperatorType::kConcatenation) { // Verify that all inputs are the same. - for (size_t i = 1; i < op->inputs.size(); ++i) { + for (int i = 1; i < op->inputs.size(); ++i) { if (op->inputs[i] != op->inputs[0]) { return false; } diff --git a/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc b/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc index a6d95ec43b1..fa252b1a61b 100644 --- a/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc +++ b/tensorflow/lite/toco/graph_transformations/group_bidirectional_sequence_ops.cc @@ -125,7 +125,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid( return false; // Make sure the inputs datatype matches. - for (size_t i = 0; i < fw_sequence_op->inputs.size(); ++i) { + for (int i = 0; i < fw_sequence_op->inputs.size(); ++i) { const auto& fw_input_array_name = fw_sequence_op->inputs[i]; const auto& bw_input_array_name = bw_sequence_op->inputs[i]; if (model.HasArray(fw_input_array_name) && @@ -137,7 +137,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid( } // Make sure the outputs datatype matches. - for (size_t i = 0; i < fw_sequence_op->outputs.size(); ++i) { + for (int i = 0; i < fw_sequence_op->outputs.size(); ++i) { const auto& fw_output_array_name = fw_sequence_op->outputs[i]; const auto& bw_output_array_name = bw_sequence_op->outputs[i]; if (model.HasArray(fw_output_array_name) && diff --git a/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc b/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc index 4250668bcf5..171d522daa7 100644 --- a/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc +++ b/tensorflow/lite/toco/graph_transformations/hardcode_min_max.cc @@ -405,7 +405,7 @@ bool HardcodeMinMaxForPack(Model* model, Operator* op) { } const auto& first_input_minmax = first_input_array.GetMinMax(); - for (size_t i = 1; i < op->inputs.size(); i++) { + for (int i = 1; i < op->inputs.size(); i++) { const auto& input_array = model->GetArray(op->inputs[i]); if (!input_array.minmax) { return false; diff --git a/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc b/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc index 08894c93a5b..2ab6692a3a8 100644 --- a/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc +++ b/tensorflow/lite/toco/graph_transformations/identify_nearest_upsample.cc @@ -199,7 +199,7 @@ std::vector>::iterator FindOperator( shape_array.data_type = ArrayDataType::kInt32; auto& shape_buffer = shape_array.GetMutableBuffer(); // This is what imagined as the original shape. - for (size_t i = 0; i < imagined_original_shape.size(); ++i) { + for (int i = 0; i < imagined_original_shape.size(); ++i) { shape_buffer.data.push_back(imagined_original_shape.at(i)); } diff --git a/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc b/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc index a76ae1a0635..80170fe8bcb 100644 --- a/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/merge_reshape_into_preceding_transpose.cc @@ -70,7 +70,7 @@ std::vector ReshapeToTranspose(const Model& model, std::vector not_one_indices; // Separate into one indices and not one indices. - for (size_t i = 0; i < in_shape.size(); i++) { + for (int i = 0; i < in_shape.size(); i++) { if (in_shape[i] == 1) { one_indices.push_back(i); } else { @@ -167,7 +167,7 @@ std::vector ReshapeToTranspose(const Model& model, // Combine the permutations. const auto& transpose_perm = transpose_op->perm; - for (size_t i = 0; i < merged_perm.size(); i++) { + for (int i = 0; i < merged_perm.size(); i++) { merged_perm[i] = transpose_perm[merged_perm[i]]; } diff --git a/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc b/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc index 2f316934311..49d59de860b 100644 --- a/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc +++ b/tensorflow/lite/toco/graph_transformations/propagate_array_data_types.cc @@ -170,7 +170,7 @@ void SetDataTypeForAllOutputs(Model* model, Operator* op, if (unsupported_op->output_data_types.size() < op->outputs.size()) { return ::tensorflow::Status::OK(); } - for (size_t i = 0; i < op->outputs.size(); ++i) { + for (int i = 0; i < op->outputs.size(); ++i) { const string& output = op->outputs[i]; const ArrayDataType data_type = unsupported_op->output_data_types[i]; model->GetArray(output).data_type = data_type; diff --git a/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc b/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc index 94779f54af2..1ed618879c1 100644 --- a/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc +++ b/tensorflow/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc @@ -149,7 +149,7 @@ bool RecursivelyBackwardPropagateDataType(GraphTransformation* transformation, ArrayDataType new_data_type, const MinMax& new_minmax) { bool did_change = false; - for (size_t input_index = 0; input_index < op->inputs.size(); ++input_index) { + for (int input_index = 0; input_index < op->inputs.size(); ++input_index) { const auto& input = op->inputs[input_index]; auto& input_array = model->GetArray(input); diff --git a/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc index 520cd8b495a..006e624eb7a 100644 --- a/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc +++ b/tensorflow/lite/toco/graph_transformations/propagate_fixed_sizes.cc @@ -431,7 +431,7 @@ void ProcessTensorFlowReshapeOperator(Model* model, bool has_wildcard = false; int wildcard_index = 0; int product_non_wildcard_dims = 1; - for (size_t i = 0; i < shape_data.size(); i++) { + for (int i = 0; i < shape_data.size(); i++) { if (shape_data[i] == -1) { CHECK(!has_wildcard); has_wildcard = true; @@ -574,7 +574,7 @@ void ProcessTensorFlowReductionOperator(Model* model, Operator* op) { std::set true_indices; const auto& reduction_indices = reduction_indices_array.GetBuffer().data; - for (size_t i = 0; i < reduction_indices.size(); ++i) { + for (int i = 0; i < reduction_indices.size(); ++i) { const int32 reduction_index = reduction_indices[i]; if (reduction_index < -input_rank || reduction_index >= input_rank) { CHECK(false) << "Invalid reduction dimension " << reduction_index @@ -627,7 +627,7 @@ void ProcessSliceOperator(Model* model, SliceOperator* op) { CHECK_EQ(op->begin.size(), op->size.size()); std::vector output_dims; - for (size_t i = 0; i < op->begin.size(); ++i) { + for (int i = 0; i < op->begin.size(); ++i) { int size = op->size[i]; if (size == -1) { size = input_array.shape().dims(i) - op->begin[i]; @@ -883,7 +883,7 @@ void ProcessTensorFlowSplitVOperator(Model* model, CHECK_EQ(op->outputs.size(), op->num_split); - for (size_t i = 0; i < op->outputs.size(); ++i) { + for (int i = 0; i < op->outputs.size(); ++i) { const auto& output = op->outputs[i]; Shape output_shape = input_shape; (*output_shape.mutable_dims())[axis] = size_splits_vector.at(i); @@ -1514,7 +1514,7 @@ void ProcessPadOperator(Model* model, PadOperator* op) { std::vector& dims = *output_shape.mutable_dims(); CHECK_EQ(op->left_padding.size(), dims.size()); - for (size_t i = 0; i < op->left_padding.size(); ++i) { + for (int i = 0; i < op->left_padding.size(); ++i) { dims[i] += op->left_padding[i] + op->right_padding[i]; } @@ -1540,7 +1540,7 @@ void ProcessPadV2Operator(Model* model, PadV2Operator* op) { std::vector& dims = *output_shape.mutable_dims(); CHECK_EQ(op->left_padding.size(), dims.size()); - for (size_t i = 0; i < op->left_padding.size(); ++i) { + for (int i = 0; i < op->left_padding.size(); ++i) { dims[i] += op->left_padding[i] + op->right_padding[i]; } @@ -1683,7 +1683,7 @@ void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) { CHECK_LE(op->strides.size(), num_input_axes) << "StridedSlice op with output \"" << op->outputs[0] << "\", requires no more than " << num_input_axes << " strides"; - for (size_t i = 0; i < op->strides.size(); i++) { + for (int i = 0; i < op->strides.size(); i++) { CHECK_NE(op->strides[i], 0) << "Strides must be non-zero. Axis " << i << " has stride=" << op->strides[i] << "."; } @@ -1814,7 +1814,7 @@ void ProcessTransposeOperator(Model* model, TransposeOperator* op) { << "Transpose permutation input " << op->inputs[1] << " must be same length as input dimensions"; std::vector* output_dims = output_array.mutable_shape()->mutable_dims(); - for (size_t i = 0; i < perm.size(); i++) { + for (int i = 0; i < perm.size(); i++) { int axis = perm[i]; CHECK_GE(axis, 0); CHECK_LT(axis, input_shape.dimensions_count()); @@ -1856,8 +1856,8 @@ void ProcessArgMinMaxOperator(Model* model, Op* op) { std::vector output_dims; output_dims.reserve(input_dims.size() - 1); - for (size_t i = 0; i < input_dims.size(); ++i) { - if ( static_cast(i) != axis) { + for (int i = 0; i < input_dims.size(); ++i) { + if (i != axis) { output_dims.push_back(input_dims[i]); } } @@ -1938,7 +1938,7 @@ void ProcessTileOperator(Model* model, TensorFlowTileOperator* op) { auto* mutable_dims = output_array.mutable_shape()->mutable_dims(); mutable_dims->resize(multiples.size()); - for (size_t i = 0; i < mutable_dims->size(); ++i) { + for (int i = 0; i < mutable_dims->size(); ++i) { (*mutable_dims)[i] = input_shape.dims(i) * multiples[i]; } } @@ -2010,8 +2010,8 @@ void ProcessUnpackOperator(Model* model, UnpackOperator* op) { std::vector output_dims; output_dims.reserve(input_dims.size() - 1); - for (size_t i = 0; i < input_dims.size(); ++i) { - if ( static_cast(i) != op->axis) { + for (int i = 0; i < input_dims.size(); ++i) { + if (i != op->axis) { output_dims.push_back(input_dims[i]); } } @@ -2399,7 +2399,7 @@ void ProcessScatterNdOperator(Model* model, ScatterNdOperator* op) { if (unsupported_op->output_shapes.size() < op->outputs.size()) { return ::tensorflow::Status::OK(); } - for (size_t i = 0; i < op->outputs.size(); ++i) { + for (int i = 0; i < op->outputs.size(); ++i) { const string& output = op->outputs[i]; model->GetArray(output).copy_shape(unsupported_op->output_shapes.at(i)); } diff --git a/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc b/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc index 1cb3a300127..6eccda04c18 100644 --- a/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/remove_successive_transpose.cc @@ -31,12 +31,12 @@ bool TransformsToIdentity(std::vector const& perm1, // perm1 is the order of the indices after first transpose. When perm1 is // reordered according to perm2, if the result is simple increasing sequence // i.e., range(0, perm1.size()), then the two transposes cancel each other. - for (size_t i = 0; i < perm1.size(); ++i) { - if (perm1[i] < 0 || perm1[i] >= static_cast(perm1.size()) || perm2[i] < 0 || - perm2[i] >= static_cast(perm1.size())) { + for (int i = 0; i < perm1.size(); ++i) { + if (perm1[i] < 0 || perm1[i] >= perm1.size() || perm2[i] < 0 || + perm2[i] >= perm1.size()) { return false; } - if (perm1[perm2[i]] != static_cast(i)) { + if (perm1[perm2[i]] != i) { return false; } } @@ -46,7 +46,7 @@ bool TransformsToIdentity(std::vector const& perm1, void ReplaceOpInputsWith(Model* model, const string& lookfor, const string& replacewith) { for (const auto& op : model->operators) { - for (size_t i = 0; i < op->inputs.size(); ++i) { + for (int i = 0; i < op->inputs.size(); ++i) { if (op->inputs[i] == lookfor) { op->inputs[i] = replacewith; } diff --git a/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc b/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc index eeb8751bf86..bd529bd9ecd 100644 --- a/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc +++ b/tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.cc @@ -82,7 +82,7 @@ bool RemoveTrivialPassthroughOp(GraphTransformation* transformation, // We call 'main input' the unique nonconstant input array if there is one, // or else the 0-th input. int count_nonconstant_input_arrays = 0; - for (size_t i = 0; i < passthru_op->inputs.size(); i++) { + for (int i = 0; i < passthru_op->inputs.size(); i++) { if (!model->GetArray(passthru_op->inputs[i]).buffer) { count_nonconstant_input_arrays++; if (count_nonconstant_input_arrays == 1) { diff --git a/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc b/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc index 38edff76d55..17a5e9a1d6a 100644 --- a/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc +++ b/tensorflow/lite/toco/graph_transformations/reorder_elementwise_unary.cc @@ -127,9 +127,9 @@ bool IsMoveOperator(OperatorType optype) { move_op->outputs[0] = output_name; } else { // The intermediate array is now the output array. - for (size_t i = 0; i < model->operators.size(); i++) { + for (int i = 0; i < model->operators.size(); i++) { Operator* consumer = model->operators[i].get(); - for (size_t j = 0; j < consumer->inputs.size(); j++) { + for (int j = 0; j < consumer->inputs.size(); j++) { if (consumer->inputs[j] == output_name) { consumer->inputs[j] = intermediate_name; } diff --git a/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc b/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc index b2d184cdc31..0fbcf9f73b1 100644 --- a/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/reorder_reshape_transpose.cc @@ -60,7 +60,7 @@ std::vector ComputeNewPerm(std::vector input_dims, std::vector perm) { // These are the major axis of the input. std::vector input_indices; - for (size_t i = 0; i < input_dims.size(); i++) { + for (int i = 0; i < input_dims.size(); i++) { if (input_dims[i] != 1) { input_indices.push_back(i); } @@ -69,7 +69,7 @@ std::vector ComputeNewPerm(std::vector input_dims, // This maps which indices of the input produced the intermediate indices for // non-unary dimensions. std::unordered_map intermediate_to_input_indices_map; - for (size_t i = 0; i < intermediate_dims.size(); i++) { + for (int i = 0; i < intermediate_dims.size(); i++) { if (intermediate_dims[i] != 1) { intermediate_to_input_indices_map[i] = input_indices[intermediate_to_input_indices_map.size()]; @@ -80,14 +80,14 @@ std::vector ComputeNewPerm(std::vector input_dims, // major indices. std::vector new_perm; new_perm.reserve(input_dims.size()); - for (size_t i = 0; i < perm.size(); i++) { + for (int i = 0; i < perm.size(); i++) { if (intermediate_dims[perm[i]] == 1) continue; new_perm.push_back(intermediate_to_input_indices_map[perm[i]]); } // Fill the rest of the transpose in with the ones. - for (size_t index = 0; index < input_dims.size(); index++) { + for (int index = 0; index < input_dims.size(); index++) { if (input_dims[index] == 1) { new_perm.push_back(index); } @@ -193,9 +193,9 @@ std::vector ComputeNewPerm(std::vector input_dims, DeleteArrayIfUnused(intermediate_name, model); } else { // The intermediate array is now the output array. - for (size_t i = 0; i < model->operators.size(); i++) { + for (int i = 0; i < model->operators.size(); i++) { Operator* consumer = model->operators[i].get(); - for (size_t j = 0; j < consumer->inputs.size(); j++) { + for (int j = 0; j < consumer->inputs.size(); j++) { if (consumer->inputs[j] == output_name) { consumer->inputs[j] = intermediate_name; } diff --git a/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc b/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc index 545c53fb31a..6e5815ee94d 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_batch_normalization.cc @@ -124,11 +124,11 @@ namespace toco { const auto& offset_float_data = offset_array.GetBuffer().data; - CHECK(static_cast(mul_float_data.size()) == buffer_size); - CHECK(static_cast(add_float_data.size()) == buffer_size); - CHECK(static_cast(mean_float_data.size()) == buffer_size); - CHECK(static_cast(multiplier_float_data.size()) == buffer_size); - CHECK(static_cast(offset_float_data.size()) == buffer_size); + CHECK(mul_float_data.size() == buffer_size); + CHECK(add_float_data.size() == buffer_size); + CHECK(mean_float_data.size() == buffer_size); + CHECK(multiplier_float_data.size() == buffer_size); + CHECK(offset_float_data.size() == buffer_size); for (int i = 0; i < buffer_size; i++) { mul_float_data[i] = multiplier_float_data[i]; diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc index 20e805a29e0..7c9aa025f64 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc @@ -64,7 +64,7 @@ void CopyTensorSegments(const std::vector& input_arrays, // Copy the data from input_arrays to concatenated_array_buffer. T* dest_ptr = concatenated_array_buffer.data(); for (int s = 0; s < total_copy_steps; s++) { - for (size_t i = 0; i < input_arrays.size(); i++) { + for (int i = 0; i < input_arrays.size(); i++) { std::copy(src_ptr[i], src_ptr[i] + array_copy_size[i], dest_ptr); src_ptr[i] += array_copy_size[i]; dest_ptr += array_copy_size[i]; diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc index c6dc093ba00..0df35509d3d 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_pack.cc @@ -36,7 +36,7 @@ void Pack(Model* model, PackOperator const& op) { // Pack inputs into buffer CHECK_EQ(op.axis, 0) << "Packing only supported along first axis"; int dst_offset = 0; - for (size_t i = 0; i < op.inputs.size(); i++) { + for (int i = 0; i < op.inputs.size(); i++) { // Append array data to output for each input array const auto& input_array = model->GetArray(op.inputs[i]); int input_size = RequiredBufferSizeForShape(input_array.shape()); diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc index 34a1a1ce899..fd71fb1873a 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_slice.cc @@ -50,7 +50,7 @@ bool Slice(SliceOperator const& op, Array const& input_array, CHECK_LE(size.size(), 4); std::vector begin = op.begin; std::vector end; - for (size_t i = 0; i < begin.size(); ++i) { + for (int i = 0; i < begin.size(); ++i) { int dim_size = size[i]; if (dim_size == -1) { // -1 means the rest of the dimension. diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc index a822f7b79e3..7ceffe6307e 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_transpose.cc @@ -40,7 +40,7 @@ void Transpose(Model* model, const Array& input_array, CHECK(input_shape.dimensions_count() == output_shape.dimensions_count()); const int dim = input_shape.dimensions_count(); CHECK_LE(dim, 4); - CHECK(static_cast(perm.size()) >= dim); + CHECK(perm.size() >= dim); for (int i = 0; i < dim; i++) { CHECK(perm[i] >= 0 && perm[i] < dim); CHECK(input_shape.dims(perm[i]) == output_shape.dims(i)); diff --git a/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc b/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc index 4d6cd188729..197e17eee16 100644 --- a/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc +++ b/tensorflow/lite/toco/graph_transformations/resolve_constant_unary.cc @@ -62,7 +62,7 @@ void ReduceGeneric(bool keep_dims, const std::vector& axes, } std::vector output_indices(input_shape.dimensions_count()); - for (size_t input_offset = 0; input_offset < input.size(); ++input_offset) { + for (int input_offset = 0; input_offset < input.size(); ++input_offset) { std::vector input_indices = ReverseOffset(input_shape, input_offset); // Calculate the output location by squashing input indices to 0 // in reduced axes. @@ -319,7 +319,7 @@ bool CopyMinMaxFromFirstInput(const Operator& op, Model* model) { } else if (unary_op->type == OperatorType::kRelu6 || unary_op->type == OperatorType::kRelu1 || unary_op->type == OperatorType::kRelu) { - for (int i = 0; i < output_buffer_size; ++i) { + for (size_t i = 0; i < output_buffer_size; ++i) { const float value = (*input_float_data)[i]; float new_value = 0.0f; switch (unary_op->type) { diff --git a/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc b/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc index 84d5922aae8..1f7035c21e2 100644 --- a/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc +++ b/tensorflow/lite/toco/graph_transformations/unpartition_embedding_lookup.cc @@ -57,10 +57,10 @@ namespace toco { // Split up the DynamicStitch inputs into the indices and data. std::vector stitch_indices_inputs; std::vector stitch_data_inputs; - for (int i = 0; i < stitch_op->num_partitions; ++i) { + for (size_t i = 0; i < stitch_op->num_partitions; ++i) { stitch_indices_inputs.push_back(stitch_op->inputs[i]); } - for (int i = stitch_op->num_partitions; i < stitch_op->num_partitions * 2; + for (size_t i = stitch_op->num_partitions; i < stitch_op->num_partitions * 2; ++i) { stitch_data_inputs.push_back(stitch_op->inputs[i]); } diff --git a/tensorflow/lite/toco/model_cmdline_flags.cc b/tensorflow/lite/toco/model_cmdline_flags.cc index 351884fbf1e..2434481272f 100644 --- a/tensorflow/lite/toco/model_cmdline_flags.cc +++ b/tensorflow/lite/toco/model_cmdline_flags.cc @@ -263,7 +263,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector mean_values = absl::StrSplit(parsed_model_flags.mean_values.value(), ','); - QCHECK(static_cast(mean_values.size()) == model_flags->input_arrays_size()); + QCHECK(mean_values.size() == model_flags->input_arrays_size()); for (size_t i = 0; i < mean_values.size(); ++i) { char* last = nullptr; model_flags->mutable_input_arrays(i)->set_mean_value( @@ -280,7 +280,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector std_values = absl::StrSplit(parsed_model_flags.std_values.value(), ','); - QCHECK( static_cast(std_values.size()) == model_flags->input_arrays_size()); + QCHECK(std_values.size() == model_flags->input_arrays_size()); for (size_t i = 0; i < std_values.size(); ++i) { char* last = nullptr; model_flags->mutable_input_arrays(i)->set_std_value( @@ -298,7 +298,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector input_data_types = absl::StrSplit(parsed_model_flags.input_data_types.value(), ','); - QCHECK(static_cast(input_data_types.size()) == model_flags->input_arrays_size()); + QCHECK(input_data_types.size() == model_flags->input_arrays_size()); for (size_t i = 0; i < input_data_types.size(); ++i) { IODataType type; QCHECK(IODataType_Parse(input_data_types[i], &type)); @@ -321,7 +321,7 @@ void ReadModelFlagsFromCommandLineFlags( QCHECK(uses_multi_input_flags); std::vector input_shapes = absl::StrSplit(parsed_model_flags.input_shapes.value(), ':'); - QCHECK(static_cast(input_shapes.size()) == model_flags->input_arrays_size()); + QCHECK(input_shapes.size() == model_flags->input_arrays_size()); for (size_t i = 0; i < input_shapes.size(); ++i) { auto* shape = model_flags->mutable_input_arrays(i)->mutable_shape(); shape->clear_dims(); diff --git a/tensorflow/lite/toco/toco_cmdline_flags.cc b/tensorflow/lite/toco/toco_cmdline_flags.cc index 9697a1ecbbd..c133db8f2a4 100644 --- a/tensorflow/lite/toco/toco_cmdline_flags.cc +++ b/tensorflow/lite/toco/toco_cmdline_flags.cc @@ -320,7 +320,7 @@ void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags, std::vector input_types = absl::StrSplit(parsed_toco_flags.input_types.value(), ','); QCHECK(!input_types.empty()); - for (size_t i = 1; i < input_types.size(); i++) { + for (int i = 1; i < input_types.size(); i++) { QCHECK_EQ(input_types[i], input_types[0]); } toco::IODataType input_type; From a2442ea4077e61a564ab598ac983f4160d9546be Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Mon, 15 Jun 2020 21:24:02 +0000 Subject: [PATCH 13/19] segragrating changes --- tensorflow/core/public/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 3724f06ba4b..9db20363349 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -108,7 +108,7 @@ limitations under the License. #define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 #define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 -#define TF_GRAPH_DEF_VERSION 409 // Updated: 2020/5/22 +#define TF_GRAPH_DEF_VERSION 408 // Updated: 2020/5/21 // Checkpoint compatibility versions (the versions field in SavedSliceMeta). // From 4c004feb3e9b08961d2e3e17639b30104800efd5 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Mon, 15 Jun 2020 21:37:05 +0000 Subject: [PATCH 14/19] segragation attempt 4 --- tensorflow/core/framework/tensor_shape.cc | 2 +- tensorflow/core/lib/io/random_inputstream.cc | 2 +- tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc | 2 +- tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc | 6 +++--- tensorflow/core/lib/io/zlib_outputbuffer.cc | 6 +++--- tensorflow/core/platform/env.cc | 2 +- tensorflow/core/platform/file_system.cc | 2 +- tensorflow/core/platform/file_system_helper.cc | 2 +- tensorflow/core/platform/status.cc | 4 +++- tensorflow/core/profiler/internal/parse_annotation.cc | 2 +- tensorflow/core/public/version.h | 2 +- 11 files changed, 17 insertions(+), 15 deletions(-) diff --git a/tensorflow/core/framework/tensor_shape.cc b/tensorflow/core/framework/tensor_shape.cc index f4b440e9cd1..8040a316a45 100644 --- a/tensorflow/core/framework/tensor_shape.cc +++ b/tensorflow/core/framework/tensor_shape.cc @@ -182,7 +182,7 @@ void TensorShapeBase::InitDims(gtl::ArraySlice dim_sizes) { // Allow sizes that are under kint64max^0.25 so that 4-way multiplication // below cannot overflow. - static const uint64 kMaxSmall = 0xd744; + static const int64 kMaxSmall = 0xd744; static_assert(kMaxSmall * kMaxSmall * kMaxSmall * kMaxSmall <= kint64max, "bad overflow check"); bool large_size = false; diff --git a/tensorflow/core/lib/io/random_inputstream.cc b/tensorflow/core/lib/io/random_inputstream.cc index 10f734a5bae..bd0054ce753 100644 --- a/tensorflow/core/lib/io/random_inputstream.cc +++ b/tensorflow/core/lib/io/random_inputstream.cc @@ -92,7 +92,7 @@ Status RandomAccessInputStream::SkipNBytes(int64 bytes_to_skip) { } else { return s; } - if (data.size() < bytes_to_read) { + if (data.size() < static_cast(bytes_to_read)) { return errors::OutOfRange("reached end of file"); } bytes_to_skip -= bytes_to_read; diff --git a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc index a331d4173cf..53939f2d8a3 100644 --- a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc @@ -134,7 +134,7 @@ Status SnappyInputBuffer::ReadCompressedBlockLength(uint32* length) { } size_t readable = std::min(bytes_to_read, avail_in_); - for (int i = 0; i < readable; i++) { + for (size_t i = 0; i < readable; i++) { // The "unsigned char" type cast is intentional to avoid implicit type // casting of the signed char to unsigned int during bitwise OR which // causes weird overflow errors. diff --git a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc index 563503a1319..fe3a53c6c25 100644 --- a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc @@ -76,7 +76,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { // If there is sufficient free space in input_buffer_ to fit data we // add it there and return. - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -87,7 +87,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered()); // input_buffer_ should be empty at this point. - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -144,7 +144,7 @@ void SnappyOutputBuffer::AddToInputBuffer(StringPiece data) { const int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (bytes_to_write > free_tail_bytes) { + if (static_cast(bytes_to_write) > free_tail_bytes) { memmove(input_buffer_.get(), next_in_, avail_in_); next_in_ = input_buffer_.get(); } diff --git a/tensorflow/core/lib/io/zlib_outputbuffer.cc b/tensorflow/core/lib/io/zlib_outputbuffer.cc index 5840ca60242..d475d0eaa5c 100644 --- a/tensorflow/core/lib/io/zlib_outputbuffer.cc +++ b/tensorflow/core/lib/io/zlib_outputbuffer.cc @@ -98,7 +98,7 @@ void ZlibOutputBuffer::AddToInputBuffer(StringPiece data) { int32 unread_bytes = z_stream_->avail_in; int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (bytes_to_write > free_tail_bytes) { + if (static_cast(bytes_to_write) > free_tail_bytes) { memmove(z_stream_input_.get(), z_stream_->next_in, z_stream_->avail_in); z_stream_->next_in = z_stream_input_.get(); } @@ -154,7 +154,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { size_t bytes_to_write = data.size(); - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -162,7 +162,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered(zlib_options_.flush_mode)); // At this point input stream should be empty. - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc index b29cad05459..05d95ba0425 100644 --- a/tensorflow/core/platform/env.cc +++ b/tensorflow/core/platform/env.cc @@ -214,7 +214,7 @@ bool Env::FilesExist(const std::vector& files, } if (fs_status) { result &= fs_result; - for (int i = 0; i < itr.second.size(); ++i) { + for (size_t i = 0; i < itr.second.size(); ++i) { per_file_status[itr.second[i]] = fs_status->at(i); } } else if (!fs_result) { diff --git a/tensorflow/core/platform/file_system.cc b/tensorflow/core/platform/file_system.cc index 9e96ceedbdc..c9657e2339f 100644 --- a/tensorflow/core/platform/file_system.cc +++ b/tensorflow/core/platform/file_system.cc @@ -308,7 +308,7 @@ StringPiece FileSystem::Basename(StringPiece path) const { StringPiece FileSystem::Extension(StringPiece path) const { StringPiece basename = this->Basename(path); - int pos = basename.rfind('.'); + size_t pos = basename.rfind('.'); if (pos == StringPiece::npos) { return StringPiece(path.data() + path.size(), 0); } else { diff --git a/tensorflow/core/platform/file_system_helper.cc b/tensorflow/core/platform/file_system_helper.cc index 64b175c4d17..909752389e1 100644 --- a/tensorflow/core/platform/file_system_helper.cc +++ b/tensorflow/core/platform/file_system_helper.cc @@ -103,7 +103,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, children_dir_status[i] = fs->IsDirectory(child_path); } }); - for (int i = 0; i < children.size(); ++i) { + for (size_t i = 0; i < children.size(); ++i) { const string child_path = io::JoinPath(current_dir, children[i]); // If the IsDirectory call was cancelled we bail. if (children_dir_status[i].code() == tensorflow::error::CANCELLED) { diff --git a/tensorflow/core/platform/status.cc b/tensorflow/core/platform/status.cc index 756b8314148..e303c18091c 100644 --- a/tensorflow/core/platform/status.cc +++ b/tensorflow/core/platform/status.cc @@ -74,7 +74,9 @@ class StatusLogSink : public TFLogSink { mutex_lock lock(mu_); messages_.emplace_back(entry.ToString()); - if (messages_.size() > num_messages_) messages_.pop_front(); + if (messages_.size() > static_cast(num_messages_)){ + messages_.pop_front(); + } } private: diff --git a/tensorflow/core/profiler/internal/parse_annotation.cc b/tensorflow/core/profiler/internal/parse_annotation.cc index 32c26befa3d..a4cdc09739d 100644 --- a/tensorflow/core/profiler/internal/parse_annotation.cc +++ b/tensorflow/core/profiler/internal/parse_annotation.cc @@ -50,7 +50,7 @@ std::vector SplitNameAndMetadata( std::vector SplitPairs(absl::string_view metadata) { std::vector key_value_pairs; std::stack quotes; - int start = 0, end = 0; + size_t start = 0, end = 0; for (; end < metadata.size(); ++end) { char ch = metadata[end]; switch (ch) { diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h index 9db20363349..3724f06ba4b 100644 --- a/tensorflow/core/public/version.h +++ b/tensorflow/core/public/version.h @@ -108,7 +108,7 @@ limitations under the License. #define TF_GRAPH_DEF_VERSION_MIN_PRODUCER 0 #define TF_GRAPH_DEF_VERSION_MIN_CONSUMER 0 -#define TF_GRAPH_DEF_VERSION 408 // Updated: 2020/5/21 +#define TF_GRAPH_DEF_VERSION 409 // Updated: 2020/5/22 // Checkpoint compatibility versions (the versions field in SavedSliceMeta). // From 0096d0a19b5543b368a5d2426cb2810931913272 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Mon, 15 Jun 2020 21:49:18 +0000 Subject: [PATCH 15/19] final segratation --- tensorflow/compiler/xla/window_util.cc | 2 +- tensorflow/core/framework/tensor_shape.cc | 2 +- tensorflow/core/lib/io/random_inputstream.cc | 2 +- tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc | 2 +- tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc | 6 +++--- tensorflow/core/lib/io/zlib_outputbuffer.cc | 6 +++--- tensorflow/core/platform/env.cc | 2 +- tensorflow/core/platform/file_system.cc | 2 +- tensorflow/core/platform/file_system_helper.cc | 2 +- tensorflow/core/platform/status.cc | 4 +--- tensorflow/core/profiler/internal/parse_annotation.cc | 2 +- 11 files changed, 15 insertions(+), 17 deletions(-) diff --git a/tensorflow/compiler/xla/window_util.cc b/tensorflow/compiler/xla/window_util.cc index e33d0b6d1dc..a58179c3ee0 100644 --- a/tensorflow/compiler/xla/window_util.cc +++ b/tensorflow/compiler/xla/window_util.cc @@ -42,7 +42,7 @@ Window MakeWindow(absl::Span sizes, absl::Span strides) { Window window; CHECK_EQ(sizes.size(), strides.size()); - for (auto nb = 0; static_cast(nb) < sizes.size(); ++nb) { + for (auto nb = 0; nb < sizes.size(); ++nb) { auto* dimension = window.add_dimensions(); dimension->set_size(sizes[nb]); dimension->set_stride(strides[nb]); diff --git a/tensorflow/core/framework/tensor_shape.cc b/tensorflow/core/framework/tensor_shape.cc index 8040a316a45..f4b440e9cd1 100644 --- a/tensorflow/core/framework/tensor_shape.cc +++ b/tensorflow/core/framework/tensor_shape.cc @@ -182,7 +182,7 @@ void TensorShapeBase::InitDims(gtl::ArraySlice dim_sizes) { // Allow sizes that are under kint64max^0.25 so that 4-way multiplication // below cannot overflow. - static const int64 kMaxSmall = 0xd744; + static const uint64 kMaxSmall = 0xd744; static_assert(kMaxSmall * kMaxSmall * kMaxSmall * kMaxSmall <= kint64max, "bad overflow check"); bool large_size = false; diff --git a/tensorflow/core/lib/io/random_inputstream.cc b/tensorflow/core/lib/io/random_inputstream.cc index bd0054ce753..10f734a5bae 100644 --- a/tensorflow/core/lib/io/random_inputstream.cc +++ b/tensorflow/core/lib/io/random_inputstream.cc @@ -92,7 +92,7 @@ Status RandomAccessInputStream::SkipNBytes(int64 bytes_to_skip) { } else { return s; } - if (data.size() < static_cast(bytes_to_read)) { + if (data.size() < bytes_to_read) { return errors::OutOfRange("reached end of file"); } bytes_to_skip -= bytes_to_read; diff --git a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc index 53939f2d8a3..a331d4173cf 100644 --- a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc @@ -134,7 +134,7 @@ Status SnappyInputBuffer::ReadCompressedBlockLength(uint32* length) { } size_t readable = std::min(bytes_to_read, avail_in_); - for (size_t i = 0; i < readable; i++) { + for (int i = 0; i < readable; i++) { // The "unsigned char" type cast is intentional to avoid implicit type // casting of the signed char to unsigned int during bitwise OR which // causes weird overflow errors. diff --git a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc index fe3a53c6c25..563503a1319 100644 --- a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc @@ -76,7 +76,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { // If there is sufficient free space in input_buffer_ to fit data we // add it there and return. - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -87,7 +87,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered()); // input_buffer_ should be empty at this point. - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -144,7 +144,7 @@ void SnappyOutputBuffer::AddToInputBuffer(StringPiece data) { const int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (static_cast(bytes_to_write) > free_tail_bytes) { + if (bytes_to_write > free_tail_bytes) { memmove(input_buffer_.get(), next_in_, avail_in_); next_in_ = input_buffer_.get(); } diff --git a/tensorflow/core/lib/io/zlib_outputbuffer.cc b/tensorflow/core/lib/io/zlib_outputbuffer.cc index d475d0eaa5c..5840ca60242 100644 --- a/tensorflow/core/lib/io/zlib_outputbuffer.cc +++ b/tensorflow/core/lib/io/zlib_outputbuffer.cc @@ -98,7 +98,7 @@ void ZlibOutputBuffer::AddToInputBuffer(StringPiece data) { int32 unread_bytes = z_stream_->avail_in; int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (static_cast(bytes_to_write) > free_tail_bytes) { + if (bytes_to_write > free_tail_bytes) { memmove(z_stream_input_.get(), z_stream_->next_in, z_stream_->avail_in); z_stream_->next_in = z_stream_input_.get(); } @@ -154,7 +154,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { size_t bytes_to_write = data.size(); - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -162,7 +162,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered(zlib_options_.flush_mode)); // At this point input stream should be empty. - if (static_cast(bytes_to_write) <= AvailableInputSpace()) { + if (bytes_to_write <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc index 05d95ba0425..b29cad05459 100644 --- a/tensorflow/core/platform/env.cc +++ b/tensorflow/core/platform/env.cc @@ -214,7 +214,7 @@ bool Env::FilesExist(const std::vector& files, } if (fs_status) { result &= fs_result; - for (size_t i = 0; i < itr.second.size(); ++i) { + for (int i = 0; i < itr.second.size(); ++i) { per_file_status[itr.second[i]] = fs_status->at(i); } } else if (!fs_result) { diff --git a/tensorflow/core/platform/file_system.cc b/tensorflow/core/platform/file_system.cc index c9657e2339f..9e96ceedbdc 100644 --- a/tensorflow/core/platform/file_system.cc +++ b/tensorflow/core/platform/file_system.cc @@ -308,7 +308,7 @@ StringPiece FileSystem::Basename(StringPiece path) const { StringPiece FileSystem::Extension(StringPiece path) const { StringPiece basename = this->Basename(path); - size_t pos = basename.rfind('.'); + int pos = basename.rfind('.'); if (pos == StringPiece::npos) { return StringPiece(path.data() + path.size(), 0); } else { diff --git a/tensorflow/core/platform/file_system_helper.cc b/tensorflow/core/platform/file_system_helper.cc index 909752389e1..64b175c4d17 100644 --- a/tensorflow/core/platform/file_system_helper.cc +++ b/tensorflow/core/platform/file_system_helper.cc @@ -103,7 +103,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, children_dir_status[i] = fs->IsDirectory(child_path); } }); - for (size_t i = 0; i < children.size(); ++i) { + for (int i = 0; i < children.size(); ++i) { const string child_path = io::JoinPath(current_dir, children[i]); // If the IsDirectory call was cancelled we bail. if (children_dir_status[i].code() == tensorflow::error::CANCELLED) { diff --git a/tensorflow/core/platform/status.cc b/tensorflow/core/platform/status.cc index e303c18091c..756b8314148 100644 --- a/tensorflow/core/platform/status.cc +++ b/tensorflow/core/platform/status.cc @@ -74,9 +74,7 @@ class StatusLogSink : public TFLogSink { mutex_lock lock(mu_); messages_.emplace_back(entry.ToString()); - if (messages_.size() > static_cast(num_messages_)){ - messages_.pop_front(); - } + if (messages_.size() > num_messages_) messages_.pop_front(); } private: diff --git a/tensorflow/core/profiler/internal/parse_annotation.cc b/tensorflow/core/profiler/internal/parse_annotation.cc index a4cdc09739d..32c26befa3d 100644 --- a/tensorflow/core/profiler/internal/parse_annotation.cc +++ b/tensorflow/core/profiler/internal/parse_annotation.cc @@ -50,7 +50,7 @@ std::vector SplitNameAndMetadata( std::vector SplitPairs(absl::string_view metadata) { std::vector key_value_pairs; std::stack quotes; - size_t start = 0, end = 0; + int start = 0, end = 0; for (; end < metadata.size(); ++end) { char ch = metadata[end]; switch (ch) { From caf465347e8e9520f1b810809f88f0ea229ba835 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Mon, 15 Jun 2020 23:39:29 +0000 Subject: [PATCH 16/19] segregation attempt 5 --- tensorflow/core/framework/tensor_shape.cc | 2 +- tensorflow/core/lib/io/inputbuffer.cc | 6 +++--- tensorflow/core/lib/io/random_inputstream.cc | 2 +- tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc | 2 +- tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc | 6 +++--- tensorflow/core/lib/io/zlib_outputbuffer.cc | 6 +++--- tensorflow/core/platform/env.cc | 2 +- tensorflow/core/platform/file_system.cc | 2 +- tensorflow/core/platform/file_system_helper.cc | 2 +- tensorflow/core/platform/status.cc | 4 +++- tensorflow/core/profiler/internal/parse_annotation.cc | 2 +- 11 files changed, 19 insertions(+), 17 deletions(-) diff --git a/tensorflow/core/framework/tensor_shape.cc b/tensorflow/core/framework/tensor_shape.cc index f4b440e9cd1..79d0cc0822d 100644 --- a/tensorflow/core/framework/tensor_shape.cc +++ b/tensorflow/core/framework/tensor_shape.cc @@ -187,7 +187,7 @@ void TensorShapeBase::InitDims(gtl::ArraySlice dim_sizes) { "bad overflow check"); bool large_size = false; for (auto s : dim_sizes) { - if (s > kMaxSmall) { + if (static_cast(s) > static_cast(kMaxSmall)) { large_size = true; break; } diff --git a/tensorflow/core/lib/io/inputbuffer.cc b/tensorflow/core/lib/io/inputbuffer.cc index 2b138b825e4..d005ee11d78 100644 --- a/tensorflow/core/lib/io/inputbuffer.cc +++ b/tensorflow/core/lib/io/inputbuffer.cc @@ -85,7 +85,7 @@ Status InputBuffer::ReadNBytes(int64 bytes_to_read, string* result) { result->resize(bytes_to_read); size_t bytes_read = 0; Status status = ReadNBytes(bytes_to_read, &(*result)[0], &bytes_read); - if (bytes_read < bytes_to_read) result->resize(bytes_read); + if (static_cast(bytes_read) < bytes_to_read) result->resize(bytes_read); return status; } @@ -204,7 +204,7 @@ Status InputBuffer::Hint(int64 bytes_to_read) { } // The internal buffer is too small. Do nothing. - if (bytes_to_read > size_) { + if (bytes_to_read > static_cast(size_)) { return Status::OK(); } @@ -230,7 +230,7 @@ Status InputBuffer::Hint(int64 bytes_to_read) { limit_ += data.size(); file_pos_ += data.size(); - if (errors::IsOutOfRange(s) && data.size() == bytes_to_read) { + if (errors::IsOutOfRange(s) && data.size() == static_cast(bytes_to_read)) { return Status::OK(); } else { return s; diff --git a/tensorflow/core/lib/io/random_inputstream.cc b/tensorflow/core/lib/io/random_inputstream.cc index 10f734a5bae..bd0054ce753 100644 --- a/tensorflow/core/lib/io/random_inputstream.cc +++ b/tensorflow/core/lib/io/random_inputstream.cc @@ -92,7 +92,7 @@ Status RandomAccessInputStream::SkipNBytes(int64 bytes_to_skip) { } else { return s; } - if (data.size() < bytes_to_read) { + if (data.size() < static_cast(bytes_to_read)) { return errors::OutOfRange("reached end of file"); } bytes_to_skip -= bytes_to_read; diff --git a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc index a331d4173cf..53939f2d8a3 100644 --- a/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_inputbuffer.cc @@ -134,7 +134,7 @@ Status SnappyInputBuffer::ReadCompressedBlockLength(uint32* length) { } size_t readable = std::min(bytes_to_read, avail_in_); - for (int i = 0; i < readable; i++) { + for (size_t i = 0; i < readable; i++) { // The "unsigned char" type cast is intentional to avoid implicit type // casting of the signed char to unsigned int during bitwise OR which // causes weird overflow errors. diff --git a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc index 563503a1319..fe3a53c6c25 100644 --- a/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc +++ b/tensorflow/core/lib/io/snappy/snappy_outputbuffer.cc @@ -76,7 +76,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { // If there is sufficient free space in input_buffer_ to fit data we // add it there and return. - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -87,7 +87,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered()); // input_buffer_ should be empty at this point. - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -144,7 +144,7 @@ void SnappyOutputBuffer::AddToInputBuffer(StringPiece data) { const int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (bytes_to_write > free_tail_bytes) { + if (static_cast(bytes_to_write) > free_tail_bytes) { memmove(input_buffer_.get(), next_in_, avail_in_); next_in_ = input_buffer_.get(); } diff --git a/tensorflow/core/lib/io/zlib_outputbuffer.cc b/tensorflow/core/lib/io/zlib_outputbuffer.cc index 5840ca60242..d475d0eaa5c 100644 --- a/tensorflow/core/lib/io/zlib_outputbuffer.cc +++ b/tensorflow/core/lib/io/zlib_outputbuffer.cc @@ -98,7 +98,7 @@ void ZlibOutputBuffer::AddToInputBuffer(StringPiece data) { int32 unread_bytes = z_stream_->avail_in; int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes); - if (bytes_to_write > free_tail_bytes) { + if (static_cast(bytes_to_write) > free_tail_bytes) { memmove(z_stream_input_.get(), z_stream_->next_in, z_stream_->avail_in); z_stream_->next_in = z_stream_input_.get(); } @@ -154,7 +154,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { size_t bytes_to_write = data.size(); - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } @@ -162,7 +162,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) { TF_RETURN_IF_ERROR(DeflateBuffered(zlib_options_.flush_mode)); // At this point input stream should be empty. - if (bytes_to_write <= AvailableInputSpace()) { + if (static_cast(bytes_to_write) <= AvailableInputSpace()) { AddToInputBuffer(data); return Status::OK(); } diff --git a/tensorflow/core/platform/env.cc b/tensorflow/core/platform/env.cc index b29cad05459..05d95ba0425 100644 --- a/tensorflow/core/platform/env.cc +++ b/tensorflow/core/platform/env.cc @@ -214,7 +214,7 @@ bool Env::FilesExist(const std::vector& files, } if (fs_status) { result &= fs_result; - for (int i = 0; i < itr.second.size(); ++i) { + for (size_t i = 0; i < itr.second.size(); ++i) { per_file_status[itr.second[i]] = fs_status->at(i); } } else if (!fs_result) { diff --git a/tensorflow/core/platform/file_system.cc b/tensorflow/core/platform/file_system.cc index 9e96ceedbdc..c9657e2339f 100644 --- a/tensorflow/core/platform/file_system.cc +++ b/tensorflow/core/platform/file_system.cc @@ -308,7 +308,7 @@ StringPiece FileSystem::Basename(StringPiece path) const { StringPiece FileSystem::Extension(StringPiece path) const { StringPiece basename = this->Basename(path); - int pos = basename.rfind('.'); + size_t pos = basename.rfind('.'); if (pos == StringPiece::npos) { return StringPiece(path.data() + path.size(), 0); } else { diff --git a/tensorflow/core/platform/file_system_helper.cc b/tensorflow/core/platform/file_system_helper.cc index 64b175c4d17..909752389e1 100644 --- a/tensorflow/core/platform/file_system_helper.cc +++ b/tensorflow/core/platform/file_system_helper.cc @@ -103,7 +103,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, children_dir_status[i] = fs->IsDirectory(child_path); } }); - for (int i = 0; i < children.size(); ++i) { + for (size_t i = 0; i < children.size(); ++i) { const string child_path = io::JoinPath(current_dir, children[i]); // If the IsDirectory call was cancelled we bail. if (children_dir_status[i].code() == tensorflow::error::CANCELLED) { diff --git a/tensorflow/core/platform/status.cc b/tensorflow/core/platform/status.cc index 756b8314148..e303c18091c 100644 --- a/tensorflow/core/platform/status.cc +++ b/tensorflow/core/platform/status.cc @@ -74,7 +74,9 @@ class StatusLogSink : public TFLogSink { mutex_lock lock(mu_); messages_.emplace_back(entry.ToString()); - if (messages_.size() > num_messages_) messages_.pop_front(); + if (messages_.size() > static_cast(num_messages_)){ + messages_.pop_front(); + } } private: diff --git a/tensorflow/core/profiler/internal/parse_annotation.cc b/tensorflow/core/profiler/internal/parse_annotation.cc index 32c26befa3d..a4cdc09739d 100644 --- a/tensorflow/core/profiler/internal/parse_annotation.cc +++ b/tensorflow/core/profiler/internal/parse_annotation.cc @@ -50,7 +50,7 @@ std::vector SplitNameAndMetadata( std::vector SplitPairs(absl::string_view metadata) { std::vector key_value_pairs; std::stack quotes; - int start = 0, end = 0; + size_t start = 0, end = 0; for (; end < metadata.size(); ++end) { char ch = metadata[end]; switch (ch) { From e0b43845d711e9dc520b9b6716ff89c3b4cd631f Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Tue, 16 Jun 2020 03:51:57 +0000 Subject: [PATCH 17/19] Sign compare warning fixes batch 2 --- .../core/grappler/costs/graph_memory.cc | 5 +- .../core/grappler/costs/graph_properties.cc | 59 +++++++++++-------- .../grappler/costs/op_level_cost_estimator.cc | 8 +-- .../core/grappler/costs/virtual_scheduler.cc | 14 +++-- .../optimizers/common_subgraph_elimination.cc | 2 +- .../grappler/optimizers/debug_stripper.cc | 2 +- .../grappler/optimizers/function_optimizer.cc | 10 ++-- .../core/grappler/optimizers/model_pruner.cc | 3 +- .../optimizers/pin_to_host_optimizer.cc | 3 +- .../grappler/optimizers/shape_optimizer.cc | 3 +- tensorflow/core/grappler/utils.cc | 2 +- tensorflow/core/grappler/utils/graph_view.cc | 37 +++++++----- .../core/grappler/utils/graph_view_internal.h | 34 +++++++---- .../core/grappler/utils/topological_sort.cc | 4 +- .../sql/sqlite_query_connection.cc | 2 +- tensorflow/python/grappler/model_analyzer.cc | 4 +- 16 files changed, 113 insertions(+), 79 deletions(-) diff --git a/tensorflow/core/grappler/costs/graph_memory.cc b/tensorflow/core/grappler/costs/graph_memory.cc index 020e8cf1d1f..768a025b0e6 100644 --- a/tensorflow/core/grappler/costs/graph_memory.cc +++ b/tensorflow/core/grappler/costs/graph_memory.cc @@ -255,7 +255,8 @@ void GraphMemory::InferFromTrace(const StepStats& timeline) { std::unordered_set live_at_peak; size_t current = 0; std::unordered_set currently_live; - for (int i = 0; i < events.size(); ++i) { + int events_size = events.size(); + for (int i = 0; i < events_size; ++i) { const auto& event = events[i]; if (event.allocated) { @@ -271,7 +272,7 @@ void GraphMemory::InferFromTrace(const StepStats& timeline) { current -= event.tensor->memory_used; currently_live.erase(event.tensor); } - if (i + 1 == events.size() || + if (i + 1 == events_size || event.timestamp != events[i + 1].timestamp) { if (current > peak) { peak = current; diff --git a/tensorflow/core/grappler/costs/graph_properties.cc b/tensorflow/core/grappler/costs/graph_properties.cc index ee691e7a081..0c14607e9e2 100644 --- a/tensorflow/core/grappler/costs/graph_properties.cc +++ b/tensorflow/core/grappler/costs/graph_properties.cc @@ -363,7 +363,7 @@ void VerboseLogUnknownDimensionSources( std::vector ReplaceUnknownDimFromConstWithUnknownDim( InferenceContext* ic, const std::vector& shapes) { std::vector converted_shapes(shapes.size()); - for (int i = 0; i < shapes.size(); i++) { + for (int i = 0, shapes_size = shapes.size(); i < shapes_size; i++) { const auto& shape = shapes[i]; if (!ic->RankKnown(shape)) { converted_shapes[i] = shape; @@ -502,7 +502,7 @@ class TopoQueue { const std::vector& topo_order) const { absl::flat_hash_map map; map.reserve(topo_order.size()); - for (int i = 0; i < topo_order.size(); ++i) { + for (int i = 0, topo_order_size = topo_order.size(); i < topo_order_size; ++i) { map.emplace(topo_order[i], i); } return map; @@ -680,14 +680,16 @@ class SymbolicShapeRefiner { ", shape: ", ic->DebugString(ic->input(i)), ", tensor: "); Tensor t1; - if (input_tensor_protos.size() > i && + int input_tensor_protos_size = input_tensor_protos.size(); + if (input_tensor_protos_size > i && input_tensor_protos.at(i) != nullptr && t1.FromProto(*input_tensor_protos.at(i))) { absl::StrAppend(&output, t1.DebugString(), ", tensor_as_shape: "); } else { absl::StrAppend(&output, " null, tensor_as_shape: "); } - if (input_tensors_as_shapes_to_propagate.size() > i) { + int input_tensors_as_shapes_to_propagate_size = input_tensors_as_shapes_to_propagate.size(); + if (input_tensors_as_shapes_to_propagate_size > i) { absl::StrAppend( &output, StringifyShapeHandle(input_tensors_as_shapes_to_propagate.at(i)), @@ -702,14 +704,16 @@ class SymbolicShapeRefiner { ", shape: ", ic->DebugString(ic->output(i)), ", tensor: "); Tensor t2; - if (output_tensor_protos.size() > i && + int output_tensor_protos_size = output_tensor_protos.size(); + if (output_tensor_protos_size > i && output_tensor_protos.at(i) != nullptr && t2.FromProto(*output_tensor_protos.at(i))) { absl::StrAppend(&output, t2.DebugString(), ", tensor_as_shape: "); } else { absl::StrAppend(&output, " null, tensor_as_shape: "); } - if (output_tensors_as_shapes.size() > i) { + int output_tensors_as_shapes_size = output_tensors_as_shapes.size(); + if (output_tensors_as_shapes_size > i) { absl::StrAppend(&output, StringifyShapeHandle(output_tensors_as_shapes.at(i)), "\n"); @@ -779,7 +783,7 @@ class SymbolicShapeRefiner { MutableGraphView gv(&grappler_function_item.graph); // Forward shapes from function input nodes to argument nodes. - for (int i = 0; i < grappler_function_item.inputs().size(); ++i) { + for (int i = 0, iter_limit = grappler_function_item.inputs().size(); i < iter_limit; ++i) { auto& fun_input = grappler_function_item.input(i); NodeDef* fun_node = gv.GetNode(fun_input.node_name); const TensorId input_tensor = ParseTensorName(function_node->input(i)); @@ -858,13 +862,13 @@ class SymbolicShapeRefiner { if (IsConstant(*input_node)) { TF_CHECK_OK( ReplaceInputWithConst(*input_node, i, &grappler_function_item)); - } else if (ctx->input_tensor_protos.size() > i && + } else if (static_cast(ctx->input_tensor_protos.size()) > i && ctx->input_tensor_protos[i] != nullptr) { NodeDef const_input_node = MakeConstNodeDefFromTensorProto( ic, *ctx->input_tensor_protos[i], ctx->input_types[i]); TF_CHECK_OK(ReplaceInputWithConst(const_input_node, i, &grappler_function_item)); - } else if (ic->input_tensors_as_shapes().size() > i && + } else if (static_cast(ic->input_tensors_as_shapes().size()) > i && IsShapeFullyDefinedIntegerVectorOrScalar( ic, ic->input(i), ic->input_tensors_as_shapes()[i], ctx->input_types[i])) { @@ -912,7 +916,8 @@ class SymbolicShapeRefiner { } auto output_properties = gp.GetOutputProperties(retnode->name()); - if (out_tensor.index() >= output_properties.size()) { + int output_properties_size = output_properties.size(); + if (out_tensor.index() >= output_properties_size) { return errors::InvalidArgument( out_tensor.ToString(), " has invalid position ", out_tensor.index(), " (output_properties.size() = ", output_properties.size(), ")."); @@ -975,12 +980,12 @@ class SymbolicShapeRefiner { // NodeContext: // output_tensor_protos to input_tensor_protos and input_tensors, and // output_tensors_as_shapes to input_tensors_as_shapes. - if (src_ctx->output_tensors_as_shapes.size() > src_output) { + if (static_cast(src_ctx->output_tensors_as_shapes.size()) > src_output) { ctx->input_tensors_as_shapes_to_propagate[dst_input] = src_ctx->output_tensors_as_shapes[src_output]; } - if (src_ctx->output_tensor_protos.size() > src_output) { + if (static_cast(src_ctx->output_tensor_protos.size()) > src_output) { const auto* tensor_proto = src_ctx->output_tensor_protos[src_output]; if (tensor_proto != nullptr) { ctx->input_tensor_protos[dst_input] = tensor_proto; @@ -1233,7 +1238,7 @@ class SymbolicShapeRefiner { if (st1.size() != st2.size()) { return false; } - for (int i = 0; i < st1.size(); ++i) { + for (int i = 0, st1_size = st1.size(); i < st1_size; ++i) { const ShapeAndType& s1 = st1[i]; const ShapeAndType& s2 = st2[i]; if (s1.dtype != s2.dtype) { @@ -1268,13 +1273,13 @@ class SymbolicShapeRefiner { return Status::OK(); } - if (grappler_function_item.inputs().size() > function_node->input_size()) { + if (static_cast(grappler_function_item.inputs().size()) > function_node->input_size()) { return errors::FailedPrecondition( "Function input size should be smaller than node input size."); } - for (int i = grappler_function_item.inputs().size(); - i < function_node->input_size(); ++i) { + for (int i = grappler_function_item.inputs().size(), iter_limit = function_node->input_size(); + i < iter_limit; ++i) { const string& input = function_node->input(i); if (!IsControlInput(input)) { return errors::FailedPrecondition( @@ -1357,18 +1362,20 @@ class SymbolicShapeRefiner { // Returns true if all the output tensors have known values. bool AllOutputValuesKnown(NodeContext* c) { InferenceContext* ic = c->inference_context.get(); - if (c->output_tensors_as_shapes.size() < ic->num_outputs() && - c->output_tensor_protos.size() < ic->num_outputs()) { + int c_output_tensors_as_shapes_size = c->output_tensors_as_shapes.size(); + int c_output_tensor_protos_size = c->output_tensor_protos.size(); + if (c_output_tensors_as_shapes_size < ic->num_outputs() && + c_output_tensor_protos_size < ic->num_outputs()) { return false; } else { // Checks if we can get output value via either output_tensor_proto or // output_tensors_as_shapes. for (int i = 0; i < ic->num_outputs(); i++) { - if (c->output_tensor_protos.size() > i && + if (c_output_tensor_protos_size > i && c->output_tensor_protos[i] != nullptr) { continue; } - if (c->output_tensors_as_shapes.size() > i && + if (c_output_tensors_as_shapes_size > i && ic->FullyDefined(c->output_tensors_as_shapes[i])) { bool no_unknown_dim_from_const = true; for (int32 j = 0; j < ic->Rank(c->output_tensors_as_shapes[i]); ++j) { @@ -1539,7 +1546,7 @@ class SymbolicShapeRefiner { &resource_mgr_, &outputs)); c->output_tensors_as_shapes.resize(outputs.size()); c->output_tensor_protos.resize(outputs.size(), nullptr); - for (int k = 0; k < outputs.size(); k++) { + for (int k = 0, outputs_size = outputs.size(); k < outputs_size; k++) { const auto& t = outputs[k]; // Override output shape. ShapeHandle output_shape; @@ -2297,7 +2304,7 @@ Status GraphProperties::UpdateEnqueue( // TODO(bsteiner): handle EnqueueMany as well. std::vector shapes_and_types; - for (int i = 1; i < ctx->input_types.size(); ++i) { + for (int i = 1, iter_limit = ctx->input_types.size(); i < iter_limit; ++i) { GraphView::InputPort inp(enqueue_node, i); GraphView::OutputPort fanin = shape_refiner->graph().GetRegularFanin(inp); InferenceContext* in = shape_refiner->GetContext(fanin.node); @@ -2490,10 +2497,10 @@ Status GraphProperties::InferStatically(bool assume_valid_feeds, const TensorProto& raw_val = fanin.node->attr().at("value").tensor(); *input_properties[i].mutable_value() = raw_val; - } else if (ctx->input_tensor_protos.size() > i && + } else if (static_cast(ctx->input_tensor_protos.size()) > i && ctx->input_tensor_protos[i] != nullptr) { *input_properties[i].mutable_value() = *ctx->input_tensor_protos[i]; - } else if (ic->input_tensors_as_shapes().size() > i && + } else if (static_cast(ic->input_tensors_as_shapes().size()) > i && IsShapeFullyDefinedIntegerVectorOrScalar( ic, ic->input(i), ic->input_tensors_as_shapes()[i], ctx->input_types[i])) { @@ -2525,11 +2532,11 @@ Status GraphProperties::InferStatically(bool assume_valid_feeds, // TODO(rmlarsen): Eliminate this copy. const TensorProto& raw_val = node.attr().at("value").tensor(); *output_properties[i].mutable_value() = raw_val; - } else if (ctx->output_tensor_protos.size() > i && + } else if (static_cast(ctx->output_tensor_protos.size()) > i && ctx->output_tensor_protos[i] != nullptr) { *output_properties[i].mutable_value() = *ctx->output_tensor_protos[i]; - } else if (converted_output_tensors_as_shapes.size() > i && + } else if (static_cast(converted_output_tensors_as_shapes.size()) > i && IsShapeFullyDefinedIntegerVectorOrScalar( ic, ic->output(i), converted_output_tensors_as_shapes[i], diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc index b8b62cbd6e5..a62359025be 100644 --- a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc +++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc @@ -1470,8 +1470,8 @@ Costs OpLevelCostEstimator::PredictEinsum(const OpContext& op_context) const { (a_input.shape().dim_size() < matrix_rank) || (b_input.shape().dim_size() < matrix_rank); - if (a_input_str.size() != a_input_shape.dim_size() || - b_input_str.size() != b_input_shape.dim_size()) { + if (a_input_str.size() != static_cast(a_input_shape.dim_size()) || + b_input_str.size() != static_cast(b_input_shape.dim_size())) { VLOG(1) << "Missing accurate estimator for op: " << op_info.op() << ", equation subscripts don't match tensor rank."; return PredictCostOfAnUnknownOp(op_context); @@ -1513,7 +1513,7 @@ Costs OpLevelCostEstimator::PredictEinsum(const OpContext& op_context) const { n_dim.set_size(1); k_dim.set_size(1); - for (int i_idx = 0; i_idx < a_input_str.size(); ++i_idx) { + for (int i_idx = 0, a_input_str_size = a_input_str.size(); i_idx < a_input_str_size; ++i_idx) { if (b_input_str.find(a_input_str[i_idx]) == std::string::npos) { if (rhs_str.find(a_input_str[i_idx]) == std::string::npos) { VLOG(1) << "Missing accurate estimator for op: " << op_info.op(); @@ -1533,7 +1533,7 @@ Costs OpLevelCostEstimator::PredictEinsum(const OpContext& op_context) const { *(a_matrix_shape->add_dim()) = a_input_shape.dim(i_idx); *(b_matrix_shape->add_dim()) = a_input_shape.dim(i_idx); } - for (int i_idx = 0; i_idx < b_input_str.size(); ++i_idx) { + for (int i_idx = 0, b_input_str_size = b_input_str.size(); i_idx < b_input_str_size; ++i_idx) { if (a_input_str.find(b_input_str[i_idx]) == std::string::npos) { if (rhs_str.find(b_input_str[i_idx]) == std::string::npos) { VLOG(1) << "Missing accurate estimator for op: " << op_info.op(); diff --git a/tensorflow/core/grappler/costs/virtual_scheduler.cc b/tensorflow/core/grappler/costs/virtual_scheduler.cc index 5339b00627e..b20fad8b41c 100644 --- a/tensorflow/core/grappler/costs/virtual_scheduler.cc +++ b/tensorflow/core/grappler/costs/virtual_scheduler.cc @@ -522,8 +522,8 @@ Status SchedulerState::Init(const GrapplerItem* item, if (IsPersistent(*curr_node)) { auto& device_state = device_[curr_node_device]; - for (int port_num = 0; - port_num < curr_node_state.output_properties.size(); ++port_num) { + for (int port_num = 0, port_num_iter_limit = curr_node_state.output_properties.size(); + port_num < port_num_iter_limit; ++port_num) { device_state.persistent_nodes.insert( std::make_pair(curr_node, port_num)); } @@ -795,7 +795,8 @@ void SchedulerState::GetOutputNodes(const NodeDef* node, // Execute a node as soon as all its inputs are ready. Merge nodes are // special since they run as soon as one of their inputs becomes // available. - if (output_state.num_inputs_ready == output_state.inputs.size() || + int output_state_inputs_size = output_state.inputs.size(); + if (output_state.num_inputs_ready == output_state_inputs_size || IsMerge(*output_node)) { // This output node is now ready. output_state.time_ready = curr_time; @@ -900,8 +901,9 @@ std::vector SchedulerState::MarkNodeExecuted( auto port = input_port.second; auto& input_state = node_map_[input]; input_state.num_outputs_executed[port]++; - if (input_state.num_outputs_executed[port] == - input_state.outputs[port].size() && + int input_state_outputs_size_ = input_state.outputs[port].size(); + if (input_state.num_outputs_executed[port] == input_state_outputs_size_ + && !IsPersistent(*input)) { // All the outputs are executed; no reference to this output port of // input node. @@ -1119,7 +1121,7 @@ void SchedulerState::GenerateRunMetadata(RunMetadata* metadata) { const NodeState& nodestate = node_map_.at(node_def); NodeExecStats* node_stats = device_stepstats->add_node_stats(); uint64 total_output_size = 0; - for (int slot = 0; slot < nodestate.output_properties.size(); slot++) { + for (int slot = 0, slot_iter_limit = nodestate.output_properties.size(); slot < slot_iter_limit; slot++) { const auto& properties = nodestate.output_properties[slot]; NodeOutput* no = node_stats->add_output(); no->set_slot(slot); diff --git a/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc b/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc index af323e913a7..4f385797f20 100644 --- a/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc +++ b/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc @@ -73,7 +73,7 @@ class UniqueNodes { if (it == memoized_signatures_.end()) return; std::vector& candidates = rep_[it->second]; - for (int i = 0; i < candidates.size(); ++i) { + for (int i = 0, candidates_size = candidates.size(); i < candidates_size; ++i) { if (candidates[i] == node) { std::swap(candidates[i], candidates[candidates.size() - 1]); candidates.resize(candidates.size() - 1); diff --git a/tensorflow/core/grappler/optimizers/debug_stripper.cc b/tensorflow/core/grappler/optimizers/debug_stripper.cc index d4b3bf395c3..de62e8fe6b9 100644 --- a/tensorflow/core/grappler/optimizers/debug_stripper.cc +++ b/tensorflow/core/grappler/optimizers/debug_stripper.cc @@ -63,7 +63,7 @@ Status DebugStripper::Optimize(Cluster* cluster, const GrapplerItem& item, node.mutable_attr()->swap(new_attr); // As Identity op only takes one input, mark redundant inputs as control // input. - for (size_t i = 1; i < node.input_size(); ++i) { + for (int i = 1, node_input_size = node.input_size(); i < node_input_size; ++i) { if (!IsControlInput(node.input(i))) { *node.mutable_input(i) = AsControlDependency(NodeName(node.input(i))); } diff --git a/tensorflow/core/grappler/optimizers/function_optimizer.cc b/tensorflow/core/grappler/optimizers/function_optimizer.cc index ed3af955c13..5c703b18a6d 100644 --- a/tensorflow/core/grappler/optimizers/function_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/function_optimizer.cc @@ -438,8 +438,8 @@ bool HasUnusedOutputs(const NodeDef& func_node, const FunctionDef& func, int num_outputs = func.signature().output_arg_size(); const absl::flat_hash_set active_outputs = GetActiveOutputs(func_node, ctx, /*size_hind*/ num_outputs); - - return active_outputs.size() != num_outputs; + int active_outputs_size = active_outputs.size(); + return active_outputs_size != num_outputs; } // Return pruned FunctionDefLibrary with functions that are reachable from @@ -563,7 +563,8 @@ void RemoveUnusedOutputsTypes(const FunctionSpecialization& specialization, if (tout == nullptr || !tout->has_list()) return; // Nothing to do if all outputs are active. - if (specialization.active_outputs.size() == tout->list().type_size()) return; + int specialization_active_outputs_size = specialization.active_outputs.size(); + if (specialization_active_outputs_size == tout->list().type_size()) return; // Clear input types for the specialized node. auto* attr = specialized_func_node->mutable_attr(); @@ -1142,7 +1143,8 @@ void AddFrameForwardingControlEdge(const std::vector& info, Node* caller, Graph* g) { // All nodes added to the graph by v2 control flow lowering and function // inlining are guaranteed to have control edges to nested function calls. - if (caller->id() >= info.size()) return; + int info_size = info.size(); + if (caller->id() >= info_size ) return; // Check if a lowered node is executing inside a while loop. const Node* frame = info[caller->id()].frame; diff --git a/tensorflow/core/grappler/optimizers/model_pruner.cc b/tensorflow/core/grappler/optimizers/model_pruner.cc index 20db4360f73..634ef35ab21 100644 --- a/tensorflow/core/grappler/optimizers/model_pruner.cc +++ b/tensorflow/core/grappler/optimizers/model_pruner.cc @@ -401,9 +401,10 @@ Status SplitIdentityNInputs(GraphDef* graph, } const int num_non_control_inputs = NumNonControlInputs(*node); + int terminal_second_size = terminal.second.size(); if (node->attr().count("T") == 0 || node->attr().at("T").list().type_size() != num_non_control_inputs || - terminal.second.size() >= num_non_control_inputs) { + terminal_second_size >= num_non_control_inputs) { continue; } diff --git a/tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc b/tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc index ec16de1294b..35d0c5b0e40 100644 --- a/tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc @@ -107,7 +107,8 @@ Status IsNodeOutputPortHostFriendly(const GraphView& graph, /*include_tensor_values=*/false)); } const auto& output_properties = properties->GetOutputProperties(node.name()); - if (port_id >= output_properties.size()) { + int output_properties_size = output_properties.size(); + if (port_id >= output_properties_size) { LOG(WARNING) << "port_id=" << port_id << " but output_properties.size()=" << output_properties.size() << "\n" diff --git a/tensorflow/core/grappler/optimizers/shape_optimizer.cc b/tensorflow/core/grappler/optimizers/shape_optimizer.cc index 69de1cde4ca..656c1a1db1c 100644 --- a/tensorflow/core/grappler/optimizers/shape_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/shape_optimizer.cc @@ -99,7 +99,8 @@ Status ShapeOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item, } const auto& prop = properties.GetOutputProperties(reduce_indices.node->name()); - if (prop.size() <= reduce_indices.port_id) { + int prop_size = prop.size(); + if (prop_size <= reduce_indices.port_id) { continue; } const TensorShapeProto& reduction_indices_shape = diff --git a/tensorflow/core/grappler/utils.cc b/tensorflow/core/grappler/utils.cc index cd6b4855583..240a52d1c6b 100644 --- a/tensorflow/core/grappler/utils.cc +++ b/tensorflow/core/grappler/utils.cc @@ -357,7 +357,7 @@ void PermuteNodesInPlace(GraphDef* graph, std::vector* permutation, } permutation->swap(inv_perm); } - for (std::size_t n = 0; n + 1 < permutation->size(); ++n) { + for (int n = 0, permutation_size = permutation->size(); n + 1 < permutation_size; ++n) { while (n != (*permutation)[n]) { std::size_t r = (*permutation)[n]; graph->mutable_node()->SwapElements(n, r); diff --git a/tensorflow/core/grappler/utils/graph_view.cc b/tensorflow/core/grappler/utils/graph_view.cc index 5a9a1cd2abb..c19e600e869 100644 --- a/tensorflow/core/grappler/utils/graph_view.cc +++ b/tensorflow/core/grappler/utils/graph_view.cc @@ -63,7 +63,7 @@ bool NodeView::HasFanout(const FaninView& fanout) const { return false; } else if (fanout.index() == Graph::kControlSlot) { return view->fanins_set_.contains({this->node(), Graph::kControlSlot}); - } else if (fanout.index() >= view->regular_fanins_.size()) { + } else if (fanout.index() >= static_cast(view->regular_fanins_.size())) { return false; } return view->regular_fanins_[fanout.index()].node_index_ == node_index_; @@ -152,7 +152,8 @@ Status GraphView::CheckAndAddFaninsInternal(NodeView* node_view) { Graph::kControlSlot); has_observed_control = true; } else { - if (fanin_node_view.regular_fanouts_by_port_.size() < + int fanin_node_view_regular_fanouts_by_port_size = fanin_node_view.regular_fanouts_by_port_.size(); + if (fanin_node_view_regular_fanouts_by_port_size < fanin_id.index() + 1) { fanin_node_view.regular_fanouts_by_port_.resize(fanin_id.index() + 1); } @@ -197,7 +198,7 @@ bool MutableNodeView::HasFanout(const MutableFaninView& fanout) const { return false; } else if (fanout.index() == Graph::kControlSlot) { return view->fanins_count_.contains({this->node(), Graph::kControlSlot}); - } else if (fanout.index() >= view->regular_fanins_.size()) { + } else if (fanout.index() >= static_cast(view->regular_fanins_.size())) { return false; } return view->regular_fanins_[fanout.index()].node_index_ == node_index_; @@ -279,7 +280,8 @@ void Mutation::AddMutation( void Mutation::RemoveNode(MutableNodeView* node) { auto& update_index = node->update_index_; if (update_index != internal::kMissingIndex) { - if (update_index < updated_nodes_.size() - 1) { + int updated_nodes_size = updated_nodes_.size(); + if (update_index < updated_nodes_size - 1) { graph_view_->nodes_[updated_nodes_.back().node_index].update_index_ = update_index; std::swap(updated_nodes_[update_index], updated_nodes_.back()); @@ -574,7 +576,8 @@ void MutableGraphView::AddFaninsInternal( --last_pos; } } else { - if (fanin_node_view.regular_fanouts_by_port_.size() < + int fanin_node_view_regular_fanouts_by_port_size = fanin_node_view.regular_fanouts_by_port_.size(); + if (fanin_node_view_regular_fanouts_by_port_size < fanin_id.index() + 1) { fanin_node_view.regular_fanouts_by_port_.resize(fanin_id.index() + 1); } @@ -852,8 +855,8 @@ template void MutableGraphView::ReplaceNodeFanouts(MutableNodeView* node, T* fanouts) { node->num_regular_fanouts_ = fanouts->num_regular_fanouts_; node->regular_fanouts_by_port_ = std::move(fanouts->regular_fanouts_by_port_); - for (int i = 0; i < node->regular_fanouts_by_port_.size(); ++i) { - for (int j = 0; j < node->regular_fanouts_by_port_[i].size(); ++j) { + for (int i = 0, i_max = node->regular_fanouts_by_port_.size(); i < i_max; ++i) { + for (int j = 0, j_max = node->regular_fanouts_by_port_[i].size(); j < j_max; ++j) { auto& fanout = node->regular_fanouts_by_port_[i][j]; auto* fanout_node_view = fanout.node_view(); auto& fanout_fanin = fanout_node_view->regular_fanins_[fanout.index()]; @@ -868,7 +871,7 @@ void MutableGraphView::ReplaceNodeFanouts(MutableNodeView* node, T* fanouts) { } } node->controlled_fanouts_ = std::move(fanouts->controlled_fanouts_); - for (int i = 0; i < node->controlled_fanouts_.size(); ++i) { + for (int i = 0, i_max = node->controlled_fanouts_.size(); i < i_max; ++i) { auto& fanout = node->controlled_fanouts_[i]; auto* fanout_node_view = fanout.node_view(); auto& fanout_fanin = @@ -1017,7 +1020,8 @@ inline void MutableGraphView::RemoveRegularFaninFanoutInternal( {&graph_->node(fanin.node_index_), fanin.index()}); auto* fanin_node_view = fanin.node_view(); auto& fanouts = fanin_node_view->regular_fanouts_by_port_[fanin.index()]; - if (fanin.fanout_index_ < fanouts.size() - 1) { + int fanouts_size = fanouts.size(); + if (fanin.fanout_index_ < fanouts_size - 1) { // Swap fanout with last fanout in vector, and update it's associated fanin // index. MutableFaninView& last_fanout = fanouts.back(); @@ -1043,7 +1047,8 @@ inline void MutableGraphView::RemoveRegularFaninFanoutInternal( break; } } - if (last_fanout_index < fanin_node_view->regular_fanouts_by_port_.size()) { + int fanin_node_view_regular_fanouts_by_port_size = fanin_node_view->regular_fanouts_by_port_.size(); + if (last_fanout_index < fanin_node_view_regular_fanouts_by_port_size) { fanin_node_view->regular_fanouts_by_port_.resize(last_fanout_index); } } @@ -1052,7 +1057,8 @@ inline void MutableGraphView::AddRegularFaninInternal( MutableNodeView* node_view, const SafeTensorId& fanin_id) { MutableNodeView* fanin_node_view = GetNode(fanin_id.node()); // Resize fanouts to include new output port index. - if (fanin_node_view->regular_fanouts_by_port_.size() < fanin_id.index() + 1) { + int fanin_node_view_regular_fanouts_by_port_size = fanin_node_view->regular_fanouts_by_port_.size(); + if (fanin_node_view_regular_fanouts_by_port_size < fanin_id.index() + 1) { fanin_node_view->regular_fanouts_by_port_.resize(fanin_id.index() + 1); } @@ -1078,7 +1084,8 @@ inline void MutableGraphView::UpdateRegularFaninInternal( MutableNodeView* fanin_node_view = GetNode(fanin_id.node()); // Resize fanouts to include new output port index. - if (fanin_node_view->regular_fanouts_by_port_.size() < fanin_id.index() + 1) { + int fanin_node_view_regular_fanouts_by_port_size = fanin_node_view->regular_fanouts_by_port_.size(); + if (fanin_node_view_regular_fanouts_by_port_size < fanin_id.index() + 1) { fanin_node_view->regular_fanouts_by_port_.resize(fanin_id.index() + 1); } @@ -1110,8 +1117,9 @@ inline void MutableGraphView::RemoveControllingFaninFanoutInternal( // controlled fanout in controlling fanin with controlled fanout to be // removed. auto* control_to_remove_view = control_to_remove.node_view(); + int control_to_remove_view_controlled_fanouts_size = control_to_remove_view->controlled_fanouts_.size(); if (control_to_remove.fanout_index_ < - control_to_remove_view->controlled_fanouts_.size() - 1) { + control_to_remove_view_controlled_fanouts_size - 1) { auto& control_to_remove_view_last_control = control_to_remove_view->controlled_fanouts_.back(); control_to_remove_view_last_control.node_view() @@ -1137,7 +1145,8 @@ inline void MutableGraphView::RemoveControllingFaninInternal( RemoveControllingFaninFanoutInternal(node_view, control_index); // Swap last controlling fanin in node with controlling fanin to be removed. - if (control_index < node_view->controlling_fanins_.size() - 1) { + int node_view_controlling_fanins_size = node_view->controlling_fanins_.size(); + if (control_index < node_view_controlling_fanins_size - 1) { auto& last_control = node_view->controlling_fanins_.back(); auto* last_control_view = last_control.node_view(); last_control_view->controlled_fanouts_[last_control.fanout_index_] diff --git a/tensorflow/core/grappler/utils/graph_view_internal.h b/tensorflow/core/grappler/utils/graph_view_internal.h index d07f9f71640..9b142444d8a 100644 --- a/tensorflow/core/grappler/utils/graph_view_internal.h +++ b/tensorflow/core/grappler/utils/graph_view_internal.h @@ -172,7 +172,8 @@ class NodeViewInternal { // Returns a regular fanin based on input index. If no such fanin exist, a // missing fanin is returned, with no NodeView set and an index of -2. const FanoutViewT& GetRegularFanin(int i) const { - if (i < 0 || i >= regular_fanins_.size()) { + int regular_fanins_size = regular_fanins_.size(); + if (i < 0 || i >= regular_fanins_size) { return GetMissingFanin(); } return regular_fanins_[i]; @@ -191,7 +192,8 @@ class NodeViewInternal { // Returns a regular fanout(s) based on output index. If no such output index // exists, no fanouts will be returned. const std::vector& GetRegularFanout(int i) const { - if (i < 0 || i >= regular_fanouts_by_port_.size()) { + int regular_fanouts_by_port_size = regular_fanouts_by_port_.size(); + if (i < 0 || i >= regular_fanouts_by_port_size) { return GetMissingFanout(); } return regular_fanouts_by_port_[i]; @@ -289,14 +291,16 @@ class GraphViewInternal { // Finds node by index in the graph. If no such node exists in the graph, a // `nullptr` is returned. const NodeViewT* GetNode(int node_index) const { - if (node_index < 0 || node_index >= nodes_.size()) { + int nodes_size = nodes_.size(); + if (node_index < 0 || node_index >= nodes_size) { return nullptr; } return &nodes_[node_index]; } NodeViewT* GetNode(int node_index) { - if (node_index < 0 || node_index >= nodes_.size()) { + int nodes_size = nodes_.size(); + if (node_index < 0 || node_index >= nodes_size) { return nullptr; } return &nodes_[node_index]; @@ -444,13 +448,14 @@ inline bool UpdateDevice(NodeViewDiff* diff, template inline bool AddOrUpdateAtIndex(std::vector* v, int i, const U& value, const T& default_value) { - if (i > v->size()) { + int v_size = v->size(); + if (i > v_size) { // Resize to include `value`, filling the newly introduced gap with // `default_value` for later checks of validity (gaps in vector). v->reserve(i + 1); v->resize(i, default_value); v->push_back({value}); - } else if (i == v->size()) { + } else if (i == v_size) { // Vector is large enough, simply append `value` to the end. v->push_back({value}); } else { @@ -494,7 +499,8 @@ inline bool AddOrUpdateRegularFanin(NodeViewDiff* diff, int index, // index from beginning of regular fanins. const int relative_removal_index = num_regular_fanins - index - 1; // Check if at relative index fanin was already marked for removal. - if (relative_removal_index < diff->regular_inputs_to_remove.size() && + int diff_regular_inputs_to_remove_size = diff->regular_inputs_to_remove.size(); + if (relative_removal_index < diff_regular_inputs_to_remove_size && diff->regular_inputs_to_remove[relative_removal_index]) { // Unmark fanin for removal. diff->regular_inputs_to_remove[relative_removal_index] = false; @@ -543,7 +549,8 @@ inline bool RemoveRegularFanin(NodeViewDiff* diff, int index) { } else { // Relative index from end of regular fanins. const int relative_add_index = index - num_regular_fanins; - if (relative_add_index >= diff->regular_inputs_to_add.size() || + int diff_regular_inputs_to_add_size = diff->regular_inputs_to_add.size(); + if (relative_add_index >= diff_regular_inputs_to_add_size || IsEmptyTensorId(diff->regular_inputs_to_add[relative_add_index])) { // At relative index, appended regular fanin was already marked for // removal. @@ -671,7 +678,8 @@ inline bool IsWellFormed( const absl::flat_hash_map& updated_node_names) { ResizeByTrimmingEndForValue(&diff->regular_inputs_to_remove, false); ResizeByTrimmingEndForValue(&diff->regular_inputs_to_add, EmptyTensorId()); - if (diff->regular_inputs_to_add.size() != diff->num_regular_inputs_to_add) { + int diff_regular_inputs_to_add_size = diff->regular_inputs_to_add.size(); + if (diff_regular_inputs_to_add_size != diff->num_regular_inputs_to_add) { // Missing regular fanins in between appended fanins. return false; } else if (diff->num_regular_inputs_to_add > 0 && @@ -679,7 +687,7 @@ inline bool IsWellFormed( // Appending new fanins while removing existing fanins, resulting in missing // regular fanins in between. return false; - } else if (diff->regular_inputs_to_remove.size() != + } else if ( static_cast(diff->regular_inputs_to_remove.size()) != diff->num_regular_inputs_to_remove) { // Regular fanins exist in between removed fanins. return false; @@ -830,7 +838,8 @@ inline void AddOrUpdateRegularFanin(NewNode* new_node, int index, // remove existing fanins and updated/added fanins via AddOrUpdateRegularFanins. template inline void RemoveRegularFanin(NewNode* new_node, int index) { - if (index < 0 || index >= new_node->regular_fanins.size() || + int new_node_regular_fanins_size = new_node->regular_fanins.size(); + if (index < 0 || index >= new_node_regular_fanins_size || IsEmptyTensorId(new_node->regular_fanins[index])) { return; } @@ -874,7 +883,8 @@ inline bool IsWellFormed( NewNode* new_node, const absl::flat_hash_map& updated_node_names) { ResizeByTrimmingEndForValue(&new_node->regular_fanins, EmptyTensorId()); - if (new_node->regular_fanins.size() != new_node->num_regular_fanins) { + int new_node_regular_fanins_size = new_node->regular_fanins.size(); + if (new_node_regular_fanins_size != new_node->num_regular_fanins) { return false; } diff --git a/tensorflow/core/grappler/utils/topological_sort.cc b/tensorflow/core/grappler/utils/topological_sort.cc index e24a457593a..5ed292d1983 100644 --- a/tensorflow/core/grappler/utils/topological_sort.cc +++ b/tensorflow/core/grappler/utils/topological_sort.cc @@ -81,7 +81,7 @@ Status ComputeTopologicalOrder( int ready_node = (*ready_nodes)[front]; for (int fanout : graph_view.GetFanout(ready_node)) { ++num_ready_inputs[fanout]; - if (num_ready_inputs[fanout] == graph_view.GetFanin(fanout).size()) { + if (num_ready_inputs[fanout] == static_cast(graph_view.GetFanin(fanout).size())) { ready_nodes->push_back(fanout); ++back; } @@ -95,7 +95,7 @@ Status ComputeTopologicalOrder( "at node = " << graph.node(back).DebugString(); for (int i = 0; i < graph_view.num_nodes(); ++i) { - if (num_ready_inputs[i] != graph_view.GetFanin(i).size()) { + if (num_ready_inputs[i] != static_cast(graph_view.GetFanin(i).size())) { VLOG(1) << "Node not ready: " << graph.node(i).DebugString(); } } diff --git a/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc b/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc index e86cbc7684c..ada94be15bf 100644 --- a/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc +++ b/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc @@ -68,7 +68,7 @@ Status SqliteQueryConnection::GetNext(IteratorContext* ctx, Status SqliteQueryConnection::PrepareQuery() { TF_RETURN_IF_ERROR(db_->Prepare(query_, &stmt_)); - int column_count = stmt_.ColumnCount(); + size_t column_count = stmt_.ColumnCount(); if (column_count != output_types_.size()) { stmt_ = SqliteStatement(); return errors::InvalidArgument(tensorflow::strings::Printf( diff --git a/tensorflow/python/grappler/model_analyzer.cc b/tensorflow/python/grappler/model_analyzer.cc index 5a76cdd8fb2..250010c0fed 100644 --- a/tensorflow/python/grappler/model_analyzer.cc +++ b/tensorflow/python/grappler/model_analyzer.cc @@ -48,7 +48,7 @@ void ModelAnalyzer::PrintNodeInfo(const NodeDef* node, if (properties.HasOutputProperties(node->name())) { const std::vector& props = properties.GetOutputProperties(node->name()); - for (int i = 0; i < props.size(); ++i) { + for (int i = 0, props_size = props.size(); i < props_size; ++i) { const OpInfo::TensorProperties& prop = props[i]; os << "\t" << "output " << i << " (" << DataTypeString(prop.dtype()) @@ -88,7 +88,7 @@ void ModelAnalyzer::PrintNodeInfo(const NodeDef* node, } else if (properties.HasInputProperties(node->name())) { const std::vector& props = properties.GetInputProperties(node->name()); - for (int i = 0; i < props.size(); ++i) { + for (int i = 0, props_size = props.size(); i < props_size; ++i) { const OpInfo::TensorProperties& prop = props[i]; if (prop.has_value()) { os << "\t" From 52e1dba6b14da82ddd30344526e13557cf33cc32 Mon Sep 17 00:00:00 2001 From: Tare Gaskin Date: Tue, 16 Jun 2020 04:02:50 +0000 Subject: [PATCH 18/19] getting rid of pesky lingering commits --- .../mlir/lite/quantization/import_quant_stats_pass.cc | 4 ++-- .../mlir/lite/quantization/quantization_config.cc | 8 ++++---- .../mlir/lite/quantization/quantization_driver.cc | 4 ++-- .../mlir/lite/quantization/quantization_utils.cc | 10 +++++----- .../compiler/mlir/tensorflow/utils/dump_mlir_util.cc | 2 +- tensorflow/compiler/mlir/xla/ir/chlo_ops.cc | 2 +- tensorflow/compiler/mlir/xla/ir/hlo_ops.cc | 6 +++--- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc index e00a088c38c..d924a3e82ac 100644 --- a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc +++ b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc @@ -76,7 +76,7 @@ class ImportQuantStatsPass // If the index is out of range, this method returns false. Otherwise it // returns true if the value is a float tensor. bool IsQuantizableResult(Operation *op, int index) { - if (index < 0 || index >= static_cast(op->getNumResults())) return false; + if (index < 0 || index >= op->getNumResults()) return false; Value res = op->getResult(index); return res.getType().isa() && res.getType().cast().getElementType().isa(); @@ -158,7 +158,7 @@ void ImportQuantStatsPass::ImportAsStatsOps(OpBuilder b, Operation *op, InsertStatsOpAtResult(b, op->getResult(index), layer_stats, axis_stats, axis); } else { - for (int i = 0, e = op->getNumResults(); i < e; ++i) { + for (int i = 0; i < op->getNumResults(); ++i) { if (IsQuantizableResult(op, i)) { InsertStatsOpAtResult(b, op->getResult(i), layer_stats, axis_stats, axis); diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc index cdff93502f2..6b897bd5608 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_config.cc @@ -48,9 +48,9 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_mins; if (!min_values.empty()) { std::vector node_mins_str = absl::StrSplit(min_values, ','); - for (const std::string&node_min : node_mins_str.size()) { + for (int i = 0; i < node_mins_str.size(); i++) { double value; - if (!absl::SimpleAtod(node_min, &value)) { + if (!absl::SimpleAtod(node_mins_str[i], &value)) { return true; } node_mins.push_back(value); @@ -60,9 +60,9 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names, std::vector node_maxs; if (!max_values.empty()) { std::vector node_maxs_str = absl::StrSplit(max_values, ','); - for (const std::string&node_max : node_maxs_str.size()) { + for (int i = 0; i < node_maxs_str.size(); i++) { double value; - if (!absl::SimpleAtod(node_max, &value)) { + if (!absl::SimpleAtod(node_maxs_str[i], &value)) { llvm::errs() << "Unexpected mins: " << node_maxs_str[i] << "\n"; return true; } diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc index a9f4eb78431..2964a3e79f8 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc @@ -294,7 +294,7 @@ class QuantizationDriver { return; if (current_op == op) llvm::errs() << "===>>>"; llvm::errs() << op->getName() << " : ("; - for (int i = 0, e = op->getNumOperands(); i < e; ++i) { + for (auto i = 0; i < op->getNumOperands(); ++i) { if (auto params = GetOperandQuantState(op, i).params) params.print(llvm::errs()); else @@ -303,7 +303,7 @@ class QuantizationDriver { llvm::errs() << ","; } llvm::errs() << ") -> ("; - for (int i = 0, e = op->getNumResults(); i < e; ++i) { + for (auto i = 0; i < op->getNumResults(); ++i) { if (auto params = GetResultQuantState(op, i).params) params.print(llvm::errs()); else diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc index 57b24eb8772..3d50f280d0f 100644 --- a/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc +++ b/tensorflow/compiler/mlir/lite/quantization/quantization_utils.cc @@ -54,7 +54,7 @@ static Type GetQuantizedType(Builder builder, Type input_type, } else if (min.size() == max.size()) { auto shape = input_type.dyn_cast(); if (!shape || shape.getRank() <= quant_dim || - static_cast(min.size()) != shape.getDimSize(quant_dim)) { + min.size() != shape.getDimSize(quant_dim)) { return {}; } // TODO(b/141508873): the quantization dim is set to the last dimension. @@ -75,7 +75,7 @@ TypeAttr RescaleQuantizedType(Type input, Attribute factor) { if (auto qtype = ele_type.dyn_cast()) { ArrayRef scales = qtype.getScales(); // Broadcasting hasn't been implemented yet. - if (static_cast(scales.size()) != factor_values.getNumElements()) return {}; + if (scales.size() != factor_values.getNumElements()) return {}; SmallVector new_scales; new_scales.reserve(scales.size()); auto scales_iter = scales.begin(); @@ -269,7 +269,7 @@ Type GetUniformQuantizedPerAxisTypeForWeight(ElementsAttr attr, int quant_dim, bool narrow_range) { Builder builder(attr.getContext()); auto shape = attr.getType().cast().getShape(); - if (static_cast(shape.size()) <= quant_dim) return {}; + if (shape.size() <= quant_dim) return {}; // `symmetric` can only be used when it is `signed` and `narrow_range`. if (symmetric && (!is_signed || !narrow_range)) return {}; @@ -334,7 +334,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias( const std::vector& op_types) { if (op_types.empty()) return {}; - size_t axis_size = 1; + int axis_size = 1; int32_t quant_dim = -1; Type expressed_type; // Requires all the op types are valid UniformQuantizedTypes or @@ -368,7 +368,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias( scales[index_scale.index()] *= index_scale.value(); } } else if (auto type = op_type.dyn_cast()) { - for (int index = 0, e = axis_size; index != e; ++index) { + for (int index = 0; index != axis_size; ++index) { scales[index] *= type.getScale(); } } diff --git a/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc b/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc index febf2bc096d..797687ea658 100644 --- a/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc +++ b/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc @@ -41,7 +41,7 @@ std::string MakeUniqueFilename(string name) { static NameCounts& instance = *new NameCounts; // Remove illegal characters from `name`. - for (int i = 0, e = name.size(); i < e; ++i) { + for (int i = 0; i < name.size(); ++i) { char ch = name[i]; if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' || ch == '\\') { diff --git a/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc b/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc index 3408f3ed0cc..26db4549a2a 100644 --- a/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc +++ b/tensorflow/compiler/mlir/xla/ir/chlo_ops.cc @@ -49,7 +49,7 @@ static Type GetBroadcastType(Type x, Type y, Type element_type, if (shape_x.size() == shape_y.size()) { llvm::SmallVector out_shape(shape_x.size()); - for (int i = 0, e = shape_x.size(); i < e; i++) { + for (int i = 0; i < shape_x.size(); i++) { auto x_val = shape_x[i]; auto y_val = shape_y[i]; if (x_val == -1 || y_val == -1) { diff --git a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc index 7f313b56925..d20f1713eba 100644 --- a/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc +++ b/tensorflow/compiler/mlir/xla/ir/hlo_ops.cc @@ -143,7 +143,7 @@ DenseIntElementsAttr BuildConvPaddingAttrs( int rank = padding_low.size(); SmallVector padding; - for (unsigned i = 0, e = rank; i < e; ++i) { + for (unsigned i = 0; i < rank; ++i) { padding.push_back(GetPaddingValue(padding_attr, {i, 0}) + padding_low[i]); padding.push_back(GetPaddingValue(padding_attr, {i, 1}) + padding_high[i]); } @@ -853,7 +853,7 @@ static Attribute foldConcatenateHelper(ConcatenateOp* op, auto shape = type.getShape(); size_t top_size = 1; - for (int i = 0, e = axis; i < e; i++) { + for (int i = 0; i < axis; i++) { top_size = top_size * shape[i]; } @@ -1118,7 +1118,7 @@ static LogicalResult Verify(MapOp op) { // increasing. auto values = op.dimensions().getValues(); auto dimensions = std::vector{values.begin(), values.end()}; - for (int i = 0, e = dimensions.size(); i < e; ++i) { + for (int i = 0; i < dimensions.size(); ++i) { if (dimensions[i] != i) return op.emitOpError() << "requires monotonically increasing dimension " "numbers, but got: " From 9338f4da0d648cb73339c202fafbbc9376bb3fcb Mon Sep 17 00:00:00 2001 From: tg-at-google Date: Tue, 16 Jun 2020 22:19:40 -0400 Subject: [PATCH 19/19] Update sqlite_query_connection.cc --- .../kernels/data/experimental/sql/sqlite_query_connection.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc b/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc index ada94be15bf..9a7eb125f95 100644 --- a/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc +++ b/tensorflow/core/kernels/data/experimental/sql/sqlite_query_connection.cc @@ -68,8 +68,8 @@ Status SqliteQueryConnection::GetNext(IteratorContext* ctx, Status SqliteQueryConnection::PrepareQuery() { TF_RETURN_IF_ERROR(db_->Prepare(query_, &stmt_)); - size_t column_count = stmt_.ColumnCount(); - if (column_count != output_types_.size()) { + int column_count = stmt_.ColumnCount(); + if (column_count != static_cast(output_types_.size())) { stmt_ = SqliteStatement(); return errors::InvalidArgument(tensorflow::strings::Printf( "The number of columns in query (%d) must match the number of "