Merge pull request #40133 from tg-at-google:master
PiperOrigin-RevId: 315036791 Change-Id: I2a345bf39bf6eda7af7f4356781f3d83ff1ca502
This commit is contained in:
commit
5b70031ebf
tensorflow/core
framework
lib/io
platform
profiler/internal
@ -182,7 +182,7 @@ void TensorShapeBase<Shape>::InitDims(gtl::ArraySlice<int64> dim_sizes) {
|
||||
|
||||
// Allow sizes that are under kint64max^0.25 so that 4-way multiplication
|
||||
// below cannot overflow.
|
||||
static const uint64 kMaxSmall = 0xd744;
|
||||
static const int64 kMaxSmall = 0xd744;
|
||||
static_assert(kMaxSmall * kMaxSmall * kMaxSmall * kMaxSmall <= kint64max,
|
||||
"bad overflow check");
|
||||
bool large_size = false;
|
||||
|
@ -92,7 +92,7 @@ Status RandomAccessInputStream::SkipNBytes(int64 bytes_to_skip) {
|
||||
} else {
|
||||
return s;
|
||||
}
|
||||
if (data.size() < bytes_to_read) {
|
||||
if (data.size() < static_cast<size_t>(bytes_to_read)) {
|
||||
return errors::OutOfRange("reached end of file");
|
||||
}
|
||||
bytes_to_skip -= bytes_to_read;
|
||||
|
@ -134,7 +134,7 @@ Status SnappyInputBuffer::ReadCompressedBlockLength(uint32* length) {
|
||||
}
|
||||
size_t readable = std::min(bytes_to_read, avail_in_);
|
||||
|
||||
for (int i = 0; i < readable; i++) {
|
||||
for (size_t i = 0; i < readable; i++) {
|
||||
// The "unsigned char" type cast is intentional to avoid implicit type
|
||||
// casting of the signed char to unsigned int during bitwise OR which
|
||||
// causes weird overflow errors.
|
||||
|
@ -76,7 +76,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) {
|
||||
|
||||
// If there is sufficient free space in input_buffer_ to fit data we
|
||||
// add it there and return.
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
@ -87,7 +87,7 @@ Status SnappyOutputBuffer::Write(StringPiece data) {
|
||||
TF_RETURN_IF_ERROR(DeflateBuffered());
|
||||
|
||||
// input_buffer_ should be empty at this point.
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
@ -144,7 +144,7 @@ void SnappyOutputBuffer::AddToInputBuffer(StringPiece data) {
|
||||
const int32 free_tail_bytes =
|
||||
input_buffer_capacity_ - (read_bytes + unread_bytes);
|
||||
|
||||
if (bytes_to_write > free_tail_bytes) {
|
||||
if (static_cast<int32>(bytes_to_write) > free_tail_bytes) {
|
||||
memmove(input_buffer_.get(), next_in_, avail_in_);
|
||||
next_in_ = input_buffer_.get();
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ void ZlibOutputBuffer::AddToInputBuffer(StringPiece data) {
|
||||
int32 unread_bytes = z_stream_->avail_in;
|
||||
int32 free_tail_bytes = input_buffer_capacity_ - (read_bytes + unread_bytes);
|
||||
|
||||
if (bytes_to_write > free_tail_bytes) {
|
||||
if (static_cast<int32>(bytes_to_write) > free_tail_bytes) {
|
||||
memmove(z_stream_input_.get(), z_stream_->next_in, z_stream_->avail_in);
|
||||
z_stream_->next_in = z_stream_input_.get();
|
||||
}
|
||||
@ -154,7 +154,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) {
|
||||
|
||||
size_t bytes_to_write = data.size();
|
||||
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
@ -162,7 +162,7 @@ Status ZlibOutputBuffer::Append(StringPiece data) {
|
||||
TF_RETURN_IF_ERROR(DeflateBuffered(zlib_options_.flush_mode));
|
||||
|
||||
// At this point input stream should be empty.
|
||||
if (bytes_to_write <= AvailableInputSpace()) {
|
||||
if (static_cast<int32>(bytes_to_write) <= AvailableInputSpace()) {
|
||||
AddToInputBuffer(data);
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ bool Env::FilesExist(const std::vector<string>& files,
|
||||
}
|
||||
if (fs_status) {
|
||||
result &= fs_result;
|
||||
for (int i = 0; i < itr.second.size(); ++i) {
|
||||
for (size_t i = 0; i < itr.second.size(); ++i) {
|
||||
per_file_status[itr.second[i]] = fs_status->at(i);
|
||||
}
|
||||
} else if (!fs_result) {
|
||||
|
@ -308,7 +308,7 @@ StringPiece FileSystem::Basename(StringPiece path) const {
|
||||
StringPiece FileSystem::Extension(StringPiece path) const {
|
||||
StringPiece basename = this->Basename(path);
|
||||
|
||||
int pos = basename.rfind('.');
|
||||
size_t pos = basename.rfind('.');
|
||||
if (pos == StringPiece::npos) {
|
||||
return StringPiece(path.data() + path.size(), 0);
|
||||
} else {
|
||||
|
@ -103,7 +103,7 @@ Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern,
|
||||
children_dir_status[i] = fs->IsDirectory(child_path);
|
||||
}
|
||||
});
|
||||
for (int i = 0; i < children.size(); ++i) {
|
||||
for (size_t i = 0; i < children.size(); ++i) {
|
||||
const string child_path = io::JoinPath(current_dir, children[i]);
|
||||
// If the IsDirectory call was cancelled we bail.
|
||||
if (children_dir_status[i].code() == tensorflow::error::CANCELLED) {
|
||||
|
@ -74,7 +74,9 @@ class StatusLogSink : public TFLogSink {
|
||||
|
||||
mutex_lock lock(mu_);
|
||||
messages_.emplace_back(entry.ToString());
|
||||
if (messages_.size() > num_messages_) messages_.pop_front();
|
||||
if (messages_.size() > static_cast<size_t>(num_messages_)) {
|
||||
messages_.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -50,7 +50,7 @@ std::vector<absl::string_view> SplitNameAndMetadata(
|
||||
std::vector<absl::string_view> SplitPairs(absl::string_view metadata) {
|
||||
std::vector<absl::string_view> key_value_pairs;
|
||||
std::stack<char> quotes;
|
||||
int start = 0, end = 0;
|
||||
size_t start = 0, end = 0;
|
||||
for (; end < metadata.size(); ++end) {
|
||||
char ch = metadata[end];
|
||||
switch (ch) {
|
||||
|
Loading…
Reference in New Issue
Block a user