Merge pull request #40371 from tg-at-google:sign-compare-warning-fixes-batch-1

PiperOrigin-RevId: 316929432
Change-Id: Iea886c60e7aee2308157f1e4a0a06a4ff01ca6ad
This commit is contained in:
TensorFlower Gardener 2020-06-17 11:39:36 -07:00
commit 9eed8b3cf5
7 changed files with 18 additions and 16 deletions

View File

@ -76,7 +76,8 @@ class ImportQuantStatsPass
// If the index is out of range, this method returns false. Otherwise it
// returns true if the value is a float tensor.
bool IsQuantizableResult(Operation *op, int index) {
if (index < 0 || index >= op->getNumResults()) return false;
if (index < 0 || index >= static_cast<int>(op->getNumResults()))
return false;
Value res = op->getResult(index);
return res.getType().isa<ShapedType>() &&
res.getType().cast<ShapedType>().getElementType().isa<FloatType>();
@ -158,7 +159,7 @@ void ImportQuantStatsPass::ImportAsStatsOps(OpBuilder b, Operation *op,
InsertStatsOpAtResult(b, op->getResult(index), layer_stats, axis_stats,
axis);
} else {
for (int i = 0; i < op->getNumResults(); ++i) {
for (int i = 0, e = op->getNumResults(); i < e; ++i) {
if (IsQuantizableResult(op, i)) {
InsertStatsOpAtResult(b, op->getResult(i), layer_stats, axis_stats,
axis);

View File

@ -48,7 +48,7 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names,
std::vector<llvm::Optional<double>> node_mins;
if (!min_values.empty()) {
std::vector<std::string> node_mins_str = absl::StrSplit(min_values, ',');
for (int i = 0; i < node_mins_str.size(); i++) {
for (int i = 0, e = node_mins_str.size(); i < e; i++) {
double value;
if (!absl::SimpleAtod(node_mins_str[i], &value)) {
return true;
@ -60,7 +60,7 @@ bool ParseInputNodeQuantSpecs(absl::string_view node_names,
std::vector<llvm::Optional<double>> node_maxs;
if (!max_values.empty()) {
std::vector<std::string> node_maxs_str = absl::StrSplit(max_values, ',');
for (int i = 0; i < node_maxs_str.size(); i++) {
for (int i = 0, e = node_maxs_str.size(); i < e; i++) {
double value;
if (!absl::SimpleAtod(node_maxs_str[i], &value)) {
llvm::errs() << "Unexpected mins: " << node_maxs_str[i] << "\n";

View File

@ -294,7 +294,7 @@ class QuantizationDriver {
return;
if (current_op == op) llvm::errs() << "===>>>";
llvm::errs() << op->getName() << " : (";
for (auto i = 0; i < op->getNumOperands(); ++i) {
for (int i = 0, e = op->getNumOperands(); i < e; ++i) {
if (auto params = GetOperandQuantState(op, i).params)
params.print(llvm::errs());
else
@ -303,7 +303,7 @@ class QuantizationDriver {
llvm::errs() << ",";
}
llvm::errs() << ") -> (";
for (auto i = 0; i < op->getNumResults(); ++i) {
for (int i = 0, e = op->getNumResults(); i < e; ++i) {
if (auto params = GetResultQuantState(op, i).params)
params.print(llvm::errs());
else

View File

@ -55,7 +55,7 @@ static Type GetQuantizedType(Builder builder, Type input_type,
} else if (min.size() == max.size()) {
auto shape = input_type.dyn_cast<ShapedType>();
if (!shape || shape.getRank() <= quant_dim ||
min.size() != shape.getDimSize(quant_dim)) {
static_cast<int64_t>(min.size()) != shape.getDimSize(quant_dim)) {
return {};
}
// TODO(b/141508873): the quantization dim is set to the last dimension.
@ -76,7 +76,8 @@ TypeAttr RescaleQuantizedType(Type input, Attribute factor) {
if (auto qtype = ele_type.dyn_cast<quant::UniformQuantizedPerAxisType>()) {
ArrayRef<double> scales = qtype.getScales();
// Broadcasting hasn't been implemented yet.
if (scales.size() != factor_values.getNumElements()) return {};
if (static_cast<int64_t>(scales.size()) != factor_values.getNumElements())
return {};
SmallVector<double, 4> new_scales;
new_scales.reserve(scales.size());
auto scales_iter = scales.begin();
@ -270,7 +271,7 @@ Type GetUniformQuantizedPerAxisTypeForWeight(ElementsAttr attr, int quant_dim,
bool narrow_range) {
Builder builder(attr.getContext());
auto shape = attr.getType().cast<ShapedType>().getShape();
if (shape.size() <= quant_dim) return {};
if (static_cast<int>(shape.size()) <= quant_dim) return {};
// `symmetric` can only be used when it is `signed` and `narrow_range`.
if (symmetric && (!is_signed || !narrow_range)) return {};
@ -335,7 +336,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias(
const std::vector<quant::QuantizedType>& op_types) {
if (op_types.empty()) return {};
int axis_size = 1;
size_t axis_size = 1;
int32_t quant_dim = -1;
Type expressed_type;
// Requires all the op types are valid UniformQuantizedTypes or
@ -369,7 +370,7 @@ quant::QuantizedType GetUniformQuantizedTypeForBias(
scales[index_scale.index()] *= index_scale.value();
}
} else if (auto type = op_type.dyn_cast<quant::UniformQuantizedType>()) {
for (int index = 0; index != axis_size; ++index) {
for (int index = 0, e = axis_size; index != e; ++index) {
scales[index] *= type.getScale();
}
}

View File

@ -41,7 +41,7 @@ std::string MakeUniqueFilename(string name) {
static NameCounts& instance = *new NameCounts;
// Remove illegal characters from `name`.
for (int i = 0; i < name.size(); ++i) {
for (int i = 0, e = name.size(); i < e; ++i) {
char ch = name[i];
if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' ||
ch == '\\') {

View File

@ -49,7 +49,7 @@ static Type GetBroadcastType(Type x, Type y, Type element_type,
if (shape_x.size() == shape_y.size()) {
llvm::SmallVector<int64_t, 4> out_shape(shape_x.size());
for (int i = 0; i < shape_x.size(); i++) {
for (int i = 0, e = shape_x.size(); i < e; i++) {
auto x_val = shape_x[i];
auto y_val = shape_y[i];
if (x_val == -1 || y_val == -1) {

View File

@ -143,7 +143,7 @@ DenseIntElementsAttr BuildConvPaddingAttrs(
int rank = padding_low.size();
SmallVector<int64_t, 8> padding;
for (unsigned i = 0; i < rank; ++i) {
for (unsigned i = 0, e = rank; i < e; ++i) {
padding.push_back(GetPaddingValue(padding_attr, {i, 0}) + padding_low[i]);
padding.push_back(GetPaddingValue(padding_attr, {i, 1}) + padding_high[i]);
}
@ -891,7 +891,7 @@ static Attribute foldConcatenateHelper(ConcatenateOp* op,
auto shape = type.getShape();
size_t top_size = 1;
for (int i = 0; i < axis; i++) {
for (int i = 0, e = axis; i < e; i++) {
top_size = top_size * shape[i];
}
@ -1169,7 +1169,7 @@ static LogicalResult Verify(MapOp op) {
// increasing.
auto values = op.dimensions().getValues<int64_t>();
auto dimensions = std::vector<int64_t>{values.begin(), values.end()};
for (int i = 0; i < dimensions.size(); ++i) {
for (int i = 0, e = dimensions.size(); i < e; ++i) {
if (dimensions[i] != i)
return op.emitOpError() << "requires monotonically increasing dimension "
"numbers, but got: "