segrating changes 2
This commit is contained in:
parent
5cbd0bcf41
commit
7f3de617db
@ -187,7 +187,7 @@ void TensorShapeBase<Shape>::InitDims(gtl::ArraySlice<int64> dim_sizes) {
|
||||
"bad overflow check");
|
||||
bool large_size = false;
|
||||
for (auto s : dim_sizes) {
|
||||
if (static_cast<size_t>(s) > static_cast<size_t>(kMaxSmall)) {
|
||||
if (s > kMaxSmall) {
|
||||
large_size = true;
|
||||
break;
|
||||
}
|
||||
|
@ -486,18 +486,18 @@ class BatchResource : public ResourceBase {
|
||||
std::map<string, std::vector<Tensor>> split_tensors;
|
||||
|
||||
DCHECK_EQ(batch->task(0).context->num_outputs(), combined_outputs.size());
|
||||
if (static_cast<int>(combined_outputs.size()) != batch->task(0).context->num_outputs()) {
|
||||
if (combined_outputs.size() != batch->task(0).context->num_outputs()) {
|
||||
return errors::Internal("Wrong number of batched output tensors");
|
||||
}
|
||||
|
||||
// Generate 'split_tensors' and populate the context outputs.
|
||||
for (size_t i = 0; i < combined_outputs.size(); ++i) {
|
||||
for (int i = 0; i < combined_outputs.size(); ++i) {
|
||||
const Tensor& output_tensor = combined_outputs[i];
|
||||
if (output_tensor.shape().dims() == 0) {
|
||||
return errors::FailedPrecondition(
|
||||
"Batched output tensor has 0 dimensions");
|
||||
}
|
||||
if (output_tensor.shape().dim_size(0) != static_cast<long long int>(batch->size() + padding_size)) {
|
||||
if (output_tensor.shape().dim_size(0) != batch->size() + padding_size) {
|
||||
return errors::FailedPrecondition(
|
||||
"Batched output tensor's 0th dimension does not equal the sum of "
|
||||
"the 0th dimension sizes of the input tensors");
|
||||
|
@ -40,13 +40,13 @@ void PrefetchAutotuner::RecordConsumption(size_t current_buffer_size) {
|
||||
case Mode::kDisabled:
|
||||
return;
|
||||
case Mode::kUpswing:
|
||||
if (static_cast<tensorflow::int64>(current_buffer_size) == buffer_limit_) {
|
||||
if (current_buffer_size == buffer_limit_) {
|
||||
mode_ = Mode::kDownswing;
|
||||
}
|
||||
return;
|
||||
case Mode::kDownswing:
|
||||
if (current_buffer_size == 0) {
|
||||
if (buffer_limit_ >= static_cast<tensorflow::int64>(kBufferLimitThreshold)) {
|
||||
if (buffer_limit_ >= kBufferLimitThreshold) {
|
||||
buffer_limit_ += kBufferLimitThreshold;
|
||||
} else {
|
||||
buffer_limit_ *= 2;
|
||||
|
@ -268,7 +268,7 @@ inline void RequantizeManyInNewRangeReference(const qint32* input, int64 count,
|
||||
// that could be easily adapted for a SIMD implementation. It should also be
|
||||
// possible to perform all the calculations in 32-bit rather than 64, but
|
||||
// that's not been implemented yet.
|
||||
for (size_t index = 0; static_cast<tensorflow::int64>(index) < count; ++index) {
|
||||
for (size_t index = 0; index < count; ++index) {
|
||||
const int64 input_value = static_cast<int64>(input[index]);
|
||||
const int64 fp_value =
|
||||
((input_value * range_scale_fp) >> 32) + input_offset_fp;
|
||||
|
@ -85,7 +85,7 @@ Status InputBuffer::ReadNBytes(int64 bytes_to_read, string* result) {
|
||||
result->resize(bytes_to_read);
|
||||
size_t bytes_read = 0;
|
||||
Status status = ReadNBytes(bytes_to_read, &(*result)[0], &bytes_read);
|
||||
if (static_cast<int64>(bytes_read) < bytes_to_read) result->resize(bytes_read);
|
||||
if (bytes_read < bytes_to_read) result->resize(bytes_read);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ Status InputBuffer::Hint(int64 bytes_to_read) {
|
||||
}
|
||||
|
||||
// The internal buffer is too small. Do nothing.
|
||||
if (bytes_to_read > static_cast<int64>(size_)) {
|
||||
if (bytes_to_read > size_) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ Status InputBuffer::Hint(int64 bytes_to_read) {
|
||||
limit_ += data.size();
|
||||
file_pos_ += data.size();
|
||||
|
||||
if (errors::IsOutOfRange(s) && data.size() == static_cast<size_t>(bytes_to_read)) {
|
||||
if (errors::IsOutOfRange(s) && data.size() == bytes_to_read) {
|
||||
return Status::OK();
|
||||
} else {
|
||||
return s;
|
||||
|
@ -92,7 +92,7 @@ Status RandomAccessInputStream::SkipNBytes(int64 bytes_to_skip) {
|
||||
} else {
|
||||
return s;
|
||||
}
|
||||
if (data.size() < static_cast<size_t>(bytes_to_read)) {
|
||||
if (data.size() < bytes_to_read) {
|
||||
return errors::OutOfRange("reached end of file");
|
||||
}
|
||||
bytes_to_skip -= bytes_to_read;
|
||||
|
@ -134,7 +134,7 @@ Status SnappyInputBuffer::ReadCompressedBlockLength(uint32* length) {
|
||||
}
|
||||
size_t readable = std::min(bytes_to_read, avail_in_);
|
||||
|
||||
for (size_t i = 0; i < readable; i++) {
|
||||
for (int i = 0; i < readable; i++) {
|
||||
// The "unsigned char" type cast is intentional to avoid implicit type
|
||||
// casting of the signed char to unsigned int during bitwise OR which
|
||||
// causes weird overflow errors.
|
||||
|
@ -76,7 +76,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) {
|
||||
|
||||
// If there is sufficient free space in input_buffer_ to fit data we
|
||||
// add it there and return.
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
@ -87,7 +87,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) {
|
||||
TF_RETURN_IF_ERROR(DeflateBuffered());
|
||||
|
||||
// input_buffer_ should be empty at this point.
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
@ -144,7 +144,7 @@ void SnappyOutputBuffer::AddToInputBuffer(StringPiece data) {
|
||||
const int32 free_tail_bytes =
|
||||
input_buffer_capacity_ - (read_bytes + unread_bytes);
|
||||
|
||||
if (static_cast<int32>(bytes_to_write) > free_tail_bytes) {
|
||||
if (bytes_to_write > free_tail_bytes) {
|
||||
memmove(input_buffer_.get(), next_in_, avail_in_);
|
||||
next_in_ = input_buffer_.get();
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ void ZlibOutputBuffer::AddToInputBuffer(StringPiece data) {
|
||||
int32 unread_bytes = z_stream_->avail_in;
|
||||
int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes);
|
||||
|
||||
if (static_cast<int32>(bytes_to_write) > free_tail_bytes) {
|
||||
if (bytes_to_write > free_tail_bytes) {
|
||||
memmove(z_stream_input_.get(), z_stream_->next_in, z_stream_->avail_in);
|
||||
z_stream_->next_in = z_stream_input_.get();
|
||||
}
|
||||
@ -154,7 +154,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) {
|
||||
|
||||
size_t bytes_to_write = data.size();
|
||||
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
@ -162,7 +162,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) {
|
||||
TF_RETURN_IF_ERROR(DeflateBuffered(zlib_options_.flush_mode));
|
||||
|
||||
// At this point input stream should be empty.
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ bool Env::FilesExist(const std::vector<string>& files,
|
||||
}
|
||||
if (fs_status) {
|
||||
result &= fs_result;
|
||||
for (size_t i = 0; i < itr.second.size(); ++i) {
|
||||
for (int i = 0; i < itr.second.size(); ++i) {
|
||||
per_file_status[itr.second[i]] = fs_status->at(i);
|
||||
}
|
||||
} else if (!fs_result) {
|
||||
|
@ -308,7 +308,7 @@ StringPiece FileSystem::Basename(StringPiece path) const {
|
||||
StringPiece FileSystem::Extension(StringPiece path) const {
|
||||
StringPiece basename = this->Basename(path);
|
||||
|
||||
size_t pos = basename.rfind('.');
|
||||
int pos = basename.rfind('.');
|
||||
if (pos == StringPiece::npos) {
|
||||
return StringPiece(path.data() + path.size(), 0);
|
||||
} else {
|
||||
|
@ -103,7 +103,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern,
|
||||
children_dir_status[i] = fs->IsDirectory(child_path);
|
||||
}
|
||||
});
|
||||
for (size_t i = 0; i < children.size(); ++i) {
|
||||
for (int i = 0; i < children.size(); ++i) {
|
||||
const string child_path = io::JoinPath(current_dir, children[i]);
|
||||
// If the IsDirectory call was cancelled we bail.
|
||||
if (children_dir_status[i].code() == tensorflow::error::CANCELLED) {
|
||||
|
@ -906,7 +906,7 @@ Status S3FileSystem::MultiPartCopy(const Aws::String& source,
|
||||
// wait on the mutex until notify is called
|
||||
// then check the finished parts as there could be false notifications
|
||||
multi_part_copy_cv.wait(lock, [&finishedPartStates, num_parts] {
|
||||
return static_cast<const int>(finishedPartStates.size()) == num_parts;
|
||||
return finishedPartStates.size() == num_parts;
|
||||
});
|
||||
}
|
||||
// check if there was any error for any part
|
||||
|
@ -74,9 +74,7 @@ class StatusLogSink : public TFLogSink {
|
||||
|
||||
mutex_lock lock(mu_);
|
||||
messages_.emplace_back(entry.ToString());
|
||||
if (messages_.size() > static_cast<size_t>(num_messages_)){
|
||||
messages_.pop_front();
|
||||
}
|
||||
if (messages_.size() > num_messages_) messages_.pop_front();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -50,7 +50,7 @@ std::vector<absl::string_view> SplitNameAndMetadata(
|
||||
std::vector<absl::string_view> SplitPairs(absl::string_view metadata) {
|
||||
std::vector<absl::string_view> key_value_pairs;
|
||||
std::stack<char> quotes;
|
||||
size_t start = 0, end = 0;
|
||||
int start = 0, end = 0;
|
||||
for (; end < metadata.size(); ++end) {
|
||||
char ch = metadata[end];
|
||||
switch (ch) {
|
||||
|
@ -130,7 +130,7 @@ void DerivedXLineBuilder::ExpandOrAddLevelEvent(const XEvent& event,
|
||||
}
|
||||
|
||||
void DerivedXLineBuilder::ResetLastEvents(int level) {
|
||||
for (int i = level; i < static_cast<int>(last_event_by_level_.size()); ++i) {
|
||||
for (int i = level; i < last_event_by_level_.size(); ++i) {
|
||||
last_event_by_level_[i] = absl::nullopt;
|
||||
}
|
||||
if (level == 0) ResetDependentLines();
|
||||
|
@ -37,7 +37,7 @@ class DerivedXLineBuilder {
|
||||
std::vector<DerivedXLineBuilder*> dependent_lines);
|
||||
|
||||
void ExpandOrAddEvents(const std::vector<XEvent>& event_per_level) {
|
||||
for (size_t level = 0; level < event_per_level.size(); ++level) {
|
||||
for (int level = 0; level < event_per_level.size(); ++level) {
|
||||
ExpandOrAddLevelEvent(event_per_level[level], level);
|
||||
}
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ void SortXSpace(XSpace* space) {
|
||||
// smaller than these value.
|
||||
void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) {
|
||||
for (XLine& line : *plane->mutable_lines()) {
|
||||
if (line.timestamp_ns() >= static_cast<long int>(start_time_ns)) {
|
||||
if (line.timestamp_ns() >= start_time_ns) {
|
||||
line.set_timestamp_ns(line.timestamp_ns() - start_time_ns);
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ BCastList<N>::BCastList(const BCastList::Vec (&x)[N],
|
||||
if (x[i] != x[0]) {
|
||||
all_equal = false;
|
||||
}
|
||||
if (static_cast<int>(x[i].size()) > largest_rank) {
|
||||
if (x[i].size() > largest_rank) {
|
||||
largest_rank = x[i].size();
|
||||
}
|
||||
}
|
||||
@ -176,7 +176,7 @@ BCastList<N>::BCastList(const BCastList::Vec (&x)[N],
|
||||
|
||||
// 1-extend and align all vectors.
|
||||
for (int i = 0; i < N; ++i) {
|
||||
if (static_cast<int>(copy[i].size()) < largest_rank) {
|
||||
if (copy[i].size() < largest_rank) {
|
||||
copy[i].resize(largest_rank, 1);
|
||||
}
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ namespace toco {
|
||||
// It then just becomes a concat along that dimension.
|
||||
int non_one_dims = 0;
|
||||
int concat_axis = 0;
|
||||
for (size_t i = 0; i < multiples.size(); ++i) {
|
||||
for (int i = 0; i < multiples.size(); ++i) {
|
||||
if (multiples[i] != 1) {
|
||||
++non_one_dims;
|
||||
concat_axis = i;
|
||||
|
@ -31,7 +31,7 @@ bool TransposeAffectsMemoryOrder(std::vector<int> perm,
|
||||
// just the shape) then the flat buffer representation shouldn't change.
|
||||
std::vector<int> old_major_index_ordering;
|
||||
std::vector<int> new_major_index_ordering;
|
||||
for (int i = 0; static_cast<size_t>(i) < in_shape.size(); i++) {
|
||||
for (int i = 0; i < in_shape.size(); i++) {
|
||||
if (in_shape[i] != 1) {
|
||||
old_major_index_ordering.push_back(i);
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ void DequantizeBuffer(Array* array) {
|
||||
auto& new_data = array->GetMutableBuffer<ArrayDataType::kFloat>().data;
|
||||
new_data.resize(old_data.size());
|
||||
const auto& qparams = array->GetQuantizationParams();
|
||||
for (size_t i = 0; i < old_data.size(); i++) {
|
||||
for (int i = 0; i < old_data.size(); i++) {
|
||||
new_data[i] = qparams.scale * (old_data[i] - qparams.zero_point);
|
||||
}
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ namespace toco {
|
||||
}
|
||||
|
||||
// Drop min/max inputs
|
||||
for (size_t i = 1; i < fakequant_op->inputs.size(); i++) {
|
||||
for (int i = 1; i < fakequant_op->inputs.size(); i++) {
|
||||
if (CountOpsWithInput(*model, fakequant_op->inputs[i]) == 1) {
|
||||
model->EraseArray(fakequant_op->inputs[i]);
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ namespace toco {
|
||||
int index_of_previous_bad_value = 0;
|
||||
bool changed = false;
|
||||
|
||||
for (size_t i = 0; i < buffer_data.size(); i++) {
|
||||
for (int i = 0; i < buffer_data.size(); i++) {
|
||||
if (buffer_data[i] == 0) {
|
||||
count_bad++;
|
||||
if (count_bad > 1) {
|
||||
|
@ -34,7 +34,7 @@ bool IsBroadcastingOp(const Model& model, Operator* op) {
|
||||
// Concatenation of identical inputs is usually a broadcast.
|
||||
if (op->type == OperatorType::kConcatenation) {
|
||||
// Verify that all inputs are the same.
|
||||
for (size_t i = 1; i < op->inputs.size(); ++i) {
|
||||
for (int i = 1; i < op->inputs.size(); ++i) {
|
||||
if (op->inputs[i] != op->inputs[0]) {
|
||||
return false;
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid(
|
||||
return false;
|
||||
|
||||
// Make sure the inputs datatype matches.
|
||||
for (size_t i = 0; i < fw_sequence_op->inputs.size(); ++i) {
|
||||
for (int i = 0; i < fw_sequence_op->inputs.size(); ++i) {
|
||||
const auto& fw_input_array_name = fw_sequence_op->inputs[i];
|
||||
const auto& bw_input_array_name = bw_sequence_op->inputs[i];
|
||||
if (model.HasArray(fw_input_array_name) &&
|
||||
@ -137,7 +137,7 @@ bool CheckTwoUnidirectionalSequenceOpsAreValid(
|
||||
}
|
||||
|
||||
// Make sure the outputs datatype matches.
|
||||
for (size_t i = 0; i < fw_sequence_op->outputs.size(); ++i) {
|
||||
for (int i = 0; i < fw_sequence_op->outputs.size(); ++i) {
|
||||
const auto& fw_output_array_name = fw_sequence_op->outputs[i];
|
||||
const auto& bw_output_array_name = bw_sequence_op->outputs[i];
|
||||
if (model.HasArray(fw_output_array_name) &&
|
||||
|
@ -405,7 +405,7 @@ bool HardcodeMinMaxForPack(Model* model, Operator* op) {
|
||||
}
|
||||
const auto& first_input_minmax = first_input_array.GetMinMax();
|
||||
|
||||
for (size_t i = 1; i < op->inputs.size(); i++) {
|
||||
for (int i = 1; i < op->inputs.size(); i++) {
|
||||
const auto& input_array = model->GetArray(op->inputs[i]);
|
||||
if (!input_array.minmax) {
|
||||
return false;
|
||||
|
@ -199,7 +199,7 @@ std::vector<std::unique_ptr<Operator>>::iterator FindOperator(
|
||||
shape_array.data_type = ArrayDataType::kInt32;
|
||||
auto& shape_buffer = shape_array.GetMutableBuffer<ArrayDataType::kInt32>();
|
||||
// This is what imagined as the original shape.
|
||||
for (size_t i = 0; i < imagined_original_shape.size(); ++i) {
|
||||
for (int i = 0; i < imagined_original_shape.size(); ++i) {
|
||||
shape_buffer.data.push_back(imagined_original_shape.at(i));
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ std::vector<int32> ReshapeToTranspose(const Model& model,
|
||||
std::vector<int> not_one_indices;
|
||||
|
||||
// Separate into one indices and not one indices.
|
||||
for (size_t i = 0; i < in_shape.size(); i++) {
|
||||
for (int i = 0; i < in_shape.size(); i++) {
|
||||
if (in_shape[i] == 1) {
|
||||
one_indices.push_back(i);
|
||||
} else {
|
||||
@ -167,7 +167,7 @@ std::vector<int32> ReshapeToTranspose(const Model& model,
|
||||
|
||||
// Combine the permutations.
|
||||
const auto& transpose_perm = transpose_op->perm;
|
||||
for (size_t i = 0; i < merged_perm.size(); i++) {
|
||||
for (int i = 0; i < merged_perm.size(); i++) {
|
||||
merged_perm[i] = transpose_perm[merged_perm[i]];
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ void SetDataTypeForAllOutputs(Model* model, Operator* op,
|
||||
if (unsupported_op->output_data_types.size() < op->outputs.size()) {
|
||||
return ::tensorflow::Status::OK();
|
||||
}
|
||||
for (size_t i = 0; i < op->outputs.size(); ++i) {
|
||||
for (int i = 0; i < op->outputs.size(); ++i) {
|
||||
const string& output = op->outputs[i];
|
||||
const ArrayDataType data_type = unsupported_op->output_data_types[i];
|
||||
model->GetArray(output).data_type = data_type;
|
||||
|
@ -149,7 +149,7 @@ bool RecursivelyBackwardPropagateDataType(GraphTransformation* transformation,
|
||||
ArrayDataType new_data_type,
|
||||
const MinMax& new_minmax) {
|
||||
bool did_change = false;
|
||||
for (size_t input_index = 0; input_index < op->inputs.size(); ++input_index) {
|
||||
for (int input_index = 0; input_index < op->inputs.size(); ++input_index) {
|
||||
const auto& input = op->inputs[input_index];
|
||||
auto& input_array = model->GetArray(input);
|
||||
|
||||
|
@ -431,7 +431,7 @@ void ProcessTensorFlowReshapeOperator(Model* model,
|
||||
bool has_wildcard = false;
|
||||
int wildcard_index = 0;
|
||||
int product_non_wildcard_dims = 1;
|
||||
for (size_t i = 0; i < shape_data.size(); i++) {
|
||||
for (int i = 0; i < shape_data.size(); i++) {
|
||||
if (shape_data[i] == -1) {
|
||||
CHECK(!has_wildcard);
|
||||
has_wildcard = true;
|
||||
@ -574,7 +574,7 @@ void ProcessTensorFlowReductionOperator(Model* model, Operator* op) {
|
||||
std::set<int32> true_indices;
|
||||
const auto& reduction_indices =
|
||||
reduction_indices_array.GetBuffer<ArrayDataType::kInt32>().data;
|
||||
for (size_t i = 0; i < reduction_indices.size(); ++i) {
|
||||
for (int i = 0; i < reduction_indices.size(); ++i) {
|
||||
const int32 reduction_index = reduction_indices[i];
|
||||
if (reduction_index < -input_rank || reduction_index >= input_rank) {
|
||||
CHECK(false) << "Invalid reduction dimension " << reduction_index
|
||||
@ -627,7 +627,7 @@ void ProcessSliceOperator(Model* model, SliceOperator* op) {
|
||||
CHECK_EQ(op->begin.size(), op->size.size());
|
||||
|
||||
std::vector<int> output_dims;
|
||||
for (size_t i = 0; i < op->begin.size(); ++i) {
|
||||
for (int i = 0; i < op->begin.size(); ++i) {
|
||||
int size = op->size[i];
|
||||
if (size == -1) {
|
||||
size = input_array.shape().dims(i) - op->begin[i];
|
||||
@ -883,7 +883,7 @@ void ProcessTensorFlowSplitVOperator(Model* model,
|
||||
|
||||
CHECK_EQ(op->outputs.size(), op->num_split);
|
||||
|
||||
for (size_t i = 0; i < op->outputs.size(); ++i) {
|
||||
for (int i = 0; i < op->outputs.size(); ++i) {
|
||||
const auto& output = op->outputs[i];
|
||||
Shape output_shape = input_shape;
|
||||
(*output_shape.mutable_dims())[axis] = size_splits_vector.at(i);
|
||||
@ -1514,7 +1514,7 @@ void ProcessPadOperator(Model* model, PadOperator* op) {
|
||||
std::vector<int>& dims = *output_shape.mutable_dims();
|
||||
CHECK_EQ(op->left_padding.size(), dims.size());
|
||||
|
||||
for (size_t i = 0; i < op->left_padding.size(); ++i) {
|
||||
for (int i = 0; i < op->left_padding.size(); ++i) {
|
||||
dims[i] += op->left_padding[i] + op->right_padding[i];
|
||||
}
|
||||
|
||||
@ -1540,7 +1540,7 @@ void ProcessPadV2Operator(Model* model, PadV2Operator* op) {
|
||||
std::vector<int>& dims = *output_shape.mutable_dims();
|
||||
CHECK_EQ(op->left_padding.size(), dims.size());
|
||||
|
||||
for (size_t i = 0; i < op->left_padding.size(); ++i) {
|
||||
for (int i = 0; i < op->left_padding.size(); ++i) {
|
||||
dims[i] += op->left_padding[i] + op->right_padding[i];
|
||||
}
|
||||
|
||||
@ -1683,7 +1683,7 @@ void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) {
|
||||
CHECK_LE(op->strides.size(), num_input_axes)
|
||||
<< "StridedSlice op with output \"" << op->outputs[0]
|
||||
<< "\", requires no more than " << num_input_axes << " strides";
|
||||
for (size_t i = 0; i < op->strides.size(); i++) {
|
||||
for (int i = 0; i < op->strides.size(); i++) {
|
||||
CHECK_NE(op->strides[i], 0) << "Strides must be non-zero. Axis " << i
|
||||
<< " has stride=" << op->strides[i] << ".";
|
||||
}
|
||||
@ -1814,7 +1814,7 @@ void ProcessTransposeOperator(Model* model, TransposeOperator* op) {
|
||||
<< "Transpose permutation input " << op->inputs[1]
|
||||
<< " must be same length as input dimensions";
|
||||
std::vector<int>* output_dims = output_array.mutable_shape()->mutable_dims();
|
||||
for (size_t i = 0; i < perm.size(); i++) {
|
||||
for (int i = 0; i < perm.size(); i++) {
|
||||
int axis = perm[i];
|
||||
CHECK_GE(axis, 0);
|
||||
CHECK_LT(axis, input_shape.dimensions_count());
|
||||
@ -1856,8 +1856,8 @@ void ProcessArgMinMaxOperator(Model* model, Op* op) {
|
||||
std::vector<int> output_dims;
|
||||
|
||||
output_dims.reserve(input_dims.size() - 1);
|
||||
for (size_t i = 0; i < input_dims.size(); ++i) {
|
||||
if ( static_cast<int>(i) != axis) {
|
||||
for (int i = 0; i < input_dims.size(); ++i) {
|
||||
if (i != axis) {
|
||||
output_dims.push_back(input_dims[i]);
|
||||
}
|
||||
}
|
||||
@ -1938,7 +1938,7 @@ void ProcessTileOperator(Model* model, TensorFlowTileOperator* op) {
|
||||
|
||||
auto* mutable_dims = output_array.mutable_shape()->mutable_dims();
|
||||
mutable_dims->resize(multiples.size());
|
||||
for (size_t i = 0; i < mutable_dims->size(); ++i) {
|
||||
for (int i = 0; i < mutable_dims->size(); ++i) {
|
||||
(*mutable_dims)[i] = input_shape.dims(i) * multiples[i];
|
||||
}
|
||||
}
|
||||
@ -2010,8 +2010,8 @@ void ProcessUnpackOperator(Model* model, UnpackOperator* op) {
|
||||
std::vector<int> output_dims;
|
||||
|
||||
output_dims.reserve(input_dims.size() - 1);
|
||||
for (size_t i = 0; i < input_dims.size(); ++i) {
|
||||
if ( static_cast<int>(i) != op->axis) {
|
||||
for (int i = 0; i < input_dims.size(); ++i) {
|
||||
if (i != op->axis) {
|
||||
output_dims.push_back(input_dims[i]);
|
||||
}
|
||||
}
|
||||
@ -2399,7 +2399,7 @@ void ProcessScatterNdOperator(Model* model, ScatterNdOperator* op) {
|
||||
if (unsupported_op->output_shapes.size() < op->outputs.size()) {
|
||||
return ::tensorflow::Status::OK();
|
||||
}
|
||||
for (size_t i = 0; i < op->outputs.size(); ++i) {
|
||||
for (int i = 0; i < op->outputs.size(); ++i) {
|
||||
const string& output = op->outputs[i];
|
||||
model->GetArray(output).copy_shape(unsupported_op->output_shapes.at(i));
|
||||
}
|
||||
|
@ -31,12 +31,12 @@ bool TransformsToIdentity(std::vector<int> const& perm1,
|
||||
// perm1 is the order of the indices after first transpose. When perm1 is
|
||||
// reordered according to perm2, if the result is simple increasing sequence
|
||||
// i.e., range(0, perm1.size()), then the two transposes cancel each other.
|
||||
for (size_t i = 0; i < perm1.size(); ++i) {
|
||||
if (perm1[i] < 0 || perm1[i] >= static_cast<int>(perm1.size()) || perm2[i] < 0 ||
|
||||
perm2[i] >= static_cast<int>(perm1.size())) {
|
||||
for (int i = 0; i < perm1.size(); ++i) {
|
||||
if (perm1[i] < 0 || perm1[i] >= perm1.size() || perm2[i] < 0 ||
|
||||
perm2[i] >= perm1.size()) {
|
||||
return false;
|
||||
}
|
||||
if (perm1[perm2[i]] != static_cast<int>(i)) {
|
||||
if (perm1[perm2[i]] != i) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -46,7 +46,7 @@ bool TransformsToIdentity(std::vector<int> const& perm1,
|
||||
void ReplaceOpInputsWith(Model* model, const string& lookfor,
|
||||
const string& replacewith) {
|
||||
for (const auto& op : model->operators) {
|
||||
for (size_t i = 0; i < op->inputs.size(); ++i) {
|
||||
for (int i = 0; i < op->inputs.size(); ++i) {
|
||||
if (op->inputs[i] == lookfor) {
|
||||
op->inputs[i] = replacewith;
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ bool RemoveTrivialPassthroughOp(GraphTransformation* transformation,
|
||||
// We call 'main input' the unique nonconstant input array if there is one,
|
||||
// or else the 0-th input.
|
||||
int count_nonconstant_input_arrays = 0;
|
||||
for (size_t i = 0; i < passthru_op->inputs.size(); i++) {
|
||||
for (int i = 0; i < passthru_op->inputs.size(); i++) {
|
||||
if (!model->GetArray(passthru_op->inputs[i]).buffer) {
|
||||
count_nonconstant_input_arrays++;
|
||||
if (count_nonconstant_input_arrays == 1) {
|
||||
|
@ -127,9 +127,9 @@ bool IsMoveOperator(OperatorType optype) {
|
||||
move_op->outputs[0] = output_name;
|
||||
} else {
|
||||
// The intermediate array is now the output array.
|
||||
for (size_t i = 0; i < model->operators.size(); i++) {
|
||||
for (int i = 0; i < model->operators.size(); i++) {
|
||||
Operator* consumer = model->operators[i].get();
|
||||
for (size_t j = 0; j < consumer->inputs.size(); j++) {
|
||||
for (int j = 0; j < consumer->inputs.size(); j++) {
|
||||
if (consumer->inputs[j] == output_name) {
|
||||
consumer->inputs[j] = intermediate_name;
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ std::vector<int> ComputeNewPerm(std::vector<int> input_dims,
|
||||
std::vector<int> perm) {
|
||||
// These are the major axis of the input.
|
||||
std::vector<int> input_indices;
|
||||
for (size_t i = 0; i < input_dims.size(); i++) {
|
||||
for (int i = 0; i < input_dims.size(); i++) {
|
||||
if (input_dims[i] != 1) {
|
||||
input_indices.push_back(i);
|
||||
}
|
||||
@ -69,7 +69,7 @@ std::vector<int> ComputeNewPerm(std::vector<int> input_dims,
|
||||
// This maps which indices of the input produced the intermediate indices for
|
||||
// non-unary dimensions.
|
||||
std::unordered_map<int, int> intermediate_to_input_indices_map;
|
||||
for (size_t i = 0; i < intermediate_dims.size(); i++) {
|
||||
for (int i = 0; i < intermediate_dims.size(); i++) {
|
||||
if (intermediate_dims[i] != 1) {
|
||||
intermediate_to_input_indices_map[i] =
|
||||
input_indices[intermediate_to_input_indices_map.size()];
|
||||
@ -80,14 +80,14 @@ std::vector<int> ComputeNewPerm(std::vector<int> input_dims,
|
||||
// major indices.
|
||||
std::vector<int> new_perm;
|
||||
new_perm.reserve(input_dims.size());
|
||||
for (size_t i = 0; i < perm.size(); i++) {
|
||||
for (int i = 0; i < perm.size(); i++) {
|
||||
if (intermediate_dims[perm[i]] == 1) continue;
|
||||
|
||||
new_perm.push_back(intermediate_to_input_indices_map[perm[i]]);
|
||||
}
|
||||
|
||||
// Fill the rest of the transpose in with the ones.
|
||||
for (size_t index = 0; index < input_dims.size(); index++) {
|
||||
for (int index = 0; index < input_dims.size(); index++) {
|
||||
if (input_dims[index] == 1) {
|
||||
new_perm.push_back(index);
|
||||
}
|
||||
@ -193,9 +193,9 @@ std::vector<int> ComputeNewPerm(std::vector<int> input_dims,
|
||||
DeleteArrayIfUnused(intermediate_name, model);
|
||||
} else {
|
||||
// The intermediate array is now the output array.
|
||||
for (size_t i = 0; i < model->operators.size(); i++) {
|
||||
for (int i = 0; i < model->operators.size(); i++) {
|
||||
Operator* consumer = model->operators[i].get();
|
||||
for (size_t j = 0; j < consumer->inputs.size(); j++) {
|
||||
for (int j = 0; j < consumer->inputs.size(); j++) {
|
||||
if (consumer->inputs[j] == output_name) {
|
||||
consumer->inputs[j] = intermediate_name;
|
||||
}
|
||||
|
@ -124,11 +124,11 @@ namespace toco {
|
||||
const auto& offset_float_data =
|
||||
offset_array.GetBuffer<ArrayDataType::kFloat>().data;
|
||||
|
||||
CHECK(static_cast<int>(mul_float_data.size()) == buffer_size);
|
||||
CHECK(static_cast<int>(add_float_data.size()) == buffer_size);
|
||||
CHECK(static_cast<int>(mean_float_data.size()) == buffer_size);
|
||||
CHECK(static_cast<int>(multiplier_float_data.size()) == buffer_size);
|
||||
CHECK(static_cast<int>(offset_float_data.size()) == buffer_size);
|
||||
CHECK(mul_float_data.size() == buffer_size);
|
||||
CHECK(add_float_data.size() == buffer_size);
|
||||
CHECK(mean_float_data.size() == buffer_size);
|
||||
CHECK(multiplier_float_data.size() == buffer_size);
|
||||
CHECK(offset_float_data.size() == buffer_size);
|
||||
|
||||
for (int i = 0; i < buffer_size; i++) {
|
||||
mul_float_data[i] = multiplier_float_data[i];
|
||||
|
@ -64,7 +64,7 @@ void CopyTensorSegments(const std::vector<Array*>& input_arrays,
|
||||
// Copy the data from input_arrays to concatenated_array_buffer.
|
||||
T* dest_ptr = concatenated_array_buffer.data();
|
||||
for (int s = 0; s < total_copy_steps; s++) {
|
||||
for (size_t i = 0; i < input_arrays.size(); i++) {
|
||||
for (int i = 0; i < input_arrays.size(); i++) {
|
||||
std::copy(src_ptr[i], src_ptr[i] + array_copy_size[i], dest_ptr);
|
||||
src_ptr[i] += array_copy_size[i];
|
||||
dest_ptr += array_copy_size[i];
|
||||
|
@ -36,7 +36,7 @@ void Pack(Model* model, PackOperator const& op) {
|
||||
// Pack inputs into buffer
|
||||
CHECK_EQ(op.axis, 0) << "Packing only supported along first axis";
|
||||
int dst_offset = 0;
|
||||
for (size_t i = 0; i < op.inputs.size(); i++) {
|
||||
for (int i = 0; i < op.inputs.size(); i++) {
|
||||
// Append array data to output for each input array
|
||||
const auto& input_array = model->GetArray(op.inputs[i]);
|
||||
int input_size = RequiredBufferSizeForShape(input_array.shape());
|
||||
|
@ -50,7 +50,7 @@ bool Slice(SliceOperator const& op, Array const& input_array,
|
||||
CHECK_LE(size.size(), 4);
|
||||
std::vector<int> begin = op.begin;
|
||||
std::vector<int> end;
|
||||
for (size_t i = 0; i < begin.size(); ++i) {
|
||||
for (int i = 0; i < begin.size(); ++i) {
|
||||
int dim_size = size[i];
|
||||
if (dim_size == -1) {
|
||||
// -1 means the rest of the dimension.
|
||||
|
@ -40,7 +40,7 @@ void Transpose(Model* model, const Array& input_array,
|
||||
CHECK(input_shape.dimensions_count() == output_shape.dimensions_count());
|
||||
const int dim = input_shape.dimensions_count();
|
||||
CHECK_LE(dim, 4);
|
||||
CHECK(static_cast<int>(perm.size()) >= dim);
|
||||
CHECK(perm.size() >= dim);
|
||||
for (int i = 0; i < dim; i++) {
|
||||
CHECK(perm[i] >= 0 && perm[i] < dim);
|
||||
CHECK(input_shape.dims(perm[i]) == output_shape.dims(i));
|
||||
|
@ -62,7 +62,7 @@ void ReduceGeneric(bool keep_dims, const std::vector<int>& axes,
|
||||
}
|
||||
|
||||
std::vector<int> output_indices(input_shape.dimensions_count());
|
||||
for (size_t input_offset = 0; input_offset < input.size(); ++input_offset) {
|
||||
for (int input_offset = 0; input_offset < input.size(); ++input_offset) {
|
||||
std::vector<int> input_indices = ReverseOffset(input_shape, input_offset);
|
||||
// Calculate the output location by squashing input indices to 0
|
||||
// in reduced axes.
|
||||
@ -319,7 +319,7 @@ bool CopyMinMaxFromFirstInput(const Operator& op, Model* model) {
|
||||
} else if (unary_op->type == OperatorType::kRelu6 ||
|
||||
unary_op->type == OperatorType::kRelu1 ||
|
||||
unary_op->type == OperatorType::kRelu) {
|
||||
for (int i = 0; i < output_buffer_size; ++i) {
|
||||
for (size_t i = 0; i < output_buffer_size; ++i) {
|
||||
const float value = (*input_float_data)[i];
|
||||
float new_value = 0.0f;
|
||||
switch (unary_op->type) {
|
||||
|
@ -57,10 +57,10 @@ namespace toco {
|
||||
// Split up the DynamicStitch inputs into the indices and data.
|
||||
std::vector<string> stitch_indices_inputs;
|
||||
std::vector<string> stitch_data_inputs;
|
||||
for (int i = 0; i < stitch_op->num_partitions; ++i) {
|
||||
for (size_t i = 0; i < stitch_op->num_partitions; ++i) {
|
||||
stitch_indices_inputs.push_back(stitch_op->inputs[i]);
|
||||
}
|
||||
for (int i = stitch_op->num_partitions; i < stitch_op->num_partitions * 2;
|
||||
for (size_t i = stitch_op->num_partitions; i < stitch_op->num_partitions * 2;
|
||||
++i) {
|
||||
stitch_data_inputs.push_back(stitch_op->inputs[i]);
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ void ReadModelFlagsFromCommandLineFlags(
|
||||
QCHECK(uses_multi_input_flags);
|
||||
std::vector<string> mean_values =
|
||||
absl::StrSplit(parsed_model_flags.mean_values.value(), ',');
|
||||
QCHECK(static_cast<int>(mean_values.size()) == model_flags->input_arrays_size());
|
||||
QCHECK(mean_values.size() == model_flags->input_arrays_size());
|
||||
for (size_t i = 0; i < mean_values.size(); ++i) {
|
||||
char* last = nullptr;
|
||||
model_flags->mutable_input_arrays(i)->set_mean_value(
|
||||
@ -280,7 +280,7 @@ void ReadModelFlagsFromCommandLineFlags(
|
||||
QCHECK(uses_multi_input_flags);
|
||||
std::vector<string> std_values =
|
||||
absl::StrSplit(parsed_model_flags.std_values.value(), ',');
|
||||
QCHECK( static_cast<int>(std_values.size()) == model_flags->input_arrays_size());
|
||||
QCHECK(std_values.size() == model_flags->input_arrays_size());
|
||||
for (size_t i = 0; i < std_values.size(); ++i) {
|
||||
char* last = nullptr;
|
||||
model_flags->mutable_input_arrays(i)->set_std_value(
|
||||
@ -298,7 +298,7 @@ void ReadModelFlagsFromCommandLineFlags(
|
||||
QCHECK(uses_multi_input_flags);
|
||||
std::vector<string> input_data_types =
|
||||
absl::StrSplit(parsed_model_flags.input_data_types.value(), ',');
|
||||
QCHECK(static_cast<int>(input_data_types.size()) == model_flags->input_arrays_size());
|
||||
QCHECK(input_data_types.size() == model_flags->input_arrays_size());
|
||||
for (size_t i = 0; i < input_data_types.size(); ++i) {
|
||||
IODataType type;
|
||||
QCHECK(IODataType_Parse(input_data_types[i], &type));
|
||||
@ -321,7 +321,7 @@ void ReadModelFlagsFromCommandLineFlags(
|
||||
QCHECK(uses_multi_input_flags);
|
||||
std::vector<string> input_shapes =
|
||||
absl::StrSplit(parsed_model_flags.input_shapes.value(), ':');
|
||||
QCHECK(static_cast<int>(input_shapes.size()) == model_flags->input_arrays_size());
|
||||
QCHECK(input_shapes.size() == model_flags->input_arrays_size());
|
||||
for (size_t i = 0; i < input_shapes.size(); ++i) {
|
||||
auto* shape = model_flags->mutable_input_arrays(i)->mutable_shape();
|
||||
shape->clear_dims();
|
||||
|
@ -320,7 +320,7 @@ void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags,
|
||||
std::vector<string> input_types =
|
||||
absl::StrSplit(parsed_toco_flags.input_types.value(), ',');
|
||||
QCHECK(!input_types.empty());
|
||||
for (size_t i = 1; i < input_types.size(); i++) {
|
||||
for (int i = 1; i < input_types.size(); i++) {
|
||||
QCHECK_EQ(input_types[i], input_types[0]);
|
||||
}
|
||||
toco::IODataType input_type;
|
||||
|
Loading…
Reference in New Issue
Block a user