diff --git a/tensorflow/core/common_runtime/bfc_allocator.cc b/tensorflow/core/common_runtime/bfc_allocator.cc index b18209cb605..2cf668400e6 100644 --- a/tensorflow/core/common_runtime/bfc_allocator.cc +++ b/tensorflow/core/common_runtime/bfc_allocator.cc @@ -453,8 +453,8 @@ void BFCAllocator::RemoveFreeChunkIterFromBin( void BFCAllocator::RemoveFreeChunkFromBin(BFCAllocator::ChunkHandle h) { Chunk* c = ChunkFromHandle(h); CHECK(!c->in_use() && (c->bin_num != kInvalidBinNum)); - int count = BinFromIndex(c->bin_num)->free_chunks.erase(h); - CHECK(count > 0) << "Could not find chunk in bin"; + CHECK_GT(BinFromIndex(c->bin_num)->free_chunks.erase(h), 0) + << "Could not find chunk in bin"; c->bin_num = kInvalidBinNum; } diff --git a/tensorflow/core/common_runtime/bfc_allocator.h b/tensorflow/core/common_runtime/bfc_allocator.h index 0b528cb0c27..b74c161dcec 100644 --- a/tensorflow/core/common_runtime/bfc_allocator.h +++ b/tensorflow/core/common_runtime/bfc_allocator.h @@ -78,7 +78,7 @@ class BFCAllocator : public VisitableAllocator { // A ChunkHandle is an index into the chunks_ vector in BFCAllocator // kInvalidChunkHandle means an invalid chunk - typedef int ChunkHandle; + typedef size_t ChunkHandle; static const int kInvalidChunkHandle = -1; typedef int BinNum; diff --git a/tensorflow/core/common_runtime/device_mgr.cc b/tensorflow/core/common_runtime/device_mgr.cc index 820c4370e21..7807656cb25 100644 --- a/tensorflow/core/common_runtime/device_mgr.cc +++ b/tensorflow/core/common_runtime/device_mgr.cc @@ -44,7 +44,7 @@ DeviceMgr::~DeviceMgr() { } StringPiece DeviceMgr::CopyToBackingStore(StringPiece s) { - int n = s.size(); + size_t n = s.size(); char* space = name_backing_store_.Alloc(n); memcpy(space, s.data(), n); return StringPiece(space, n); diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc index c4b2b6c12a5..eda2be3e70f 100644 --- a/tensorflow/core/common_runtime/direct_session.cc +++ b/tensorflow/core/common_runtime/direct_session.cc @@ -427,7 +427,7 @@ Status DirectSession::Run(const RunOptions& run_options, TF_RETURN_IF_ERROR(SendInputs(inputs, executors_and_keys, run_state.rendez)); // Start parallel Executors. - const int num_executors = executors_and_keys->items.size(); + const size_t num_executors = executors_and_keys->items.size(); ExecutorBarrier* barrier = new ExecutorBarrier( num_executors, run_state.rendez, [&run_state](const Status& ret) { { @@ -458,7 +458,7 @@ Status DirectSession::Run(const RunOptions& run_options, options_.config.graph_options().build_cost_model(); const int64 build_cost_model_after = options_.config.graph_options().build_cost_model_after(); - int measure_step_count = executor_step_count - build_cost_model_after; + int64 measure_step_count = executor_step_count - build_cost_model_after; if (measure_step_count >= 0) { update_cost_model = ((measure_step_count + 1) % build_cost_model_every == 0); @@ -611,7 +611,7 @@ Status DirectSession::PRunSetup(const std::vector& input_names, } // Start parallel Executors. - const int num_executors = executors_and_keys->items.size(); + const size_t num_executors = executors_and_keys->items.size(); ExecutorBarrier* barrier = new ExecutorBarrier( num_executors, run_state->rendez, [run_state](const Status& ret) { if (!ret.ok()) { diff --git a/tensorflow/core/common_runtime/executor.cc b/tensorflow/core/common_runtime/executor.cc index eaa54c2c48a..561e185ac4e 100644 --- a/tensorflow/core/common_runtime/executor.cc +++ b/tensorflow/core/common_runtime/executor.cc @@ -232,7 +232,7 @@ struct NodeItem { int input_start = 0; // Number of output edges. - int num_output_edges; + size_t num_output_edges; PendingCounts::Handle pending_id; @@ -307,7 +307,7 @@ class GraphView { void Initialize(const Graph* g); Status SetAllocAttrs(const Graph* g, const Device* device); - NodeItem* node(int id) const { + NodeItem* node(size_t id) const { DCHECK_GE(id, 0); DCHECK_LT(id, num_nodes_); uint32 offset = node_offsets_[id]; @@ -454,7 +454,7 @@ GraphView::~GraphView() { } size_t GraphView::NodeItemBytes(const Node* n) { - const int num_output_edges = n->out_edges().size(); + const size_t num_output_edges = n->out_edges().size(); const int num_inputs = n->num_inputs(); const int num_outputs = n->num_outputs(); @@ -500,11 +500,11 @@ char* GraphView::InitializeNode(char* ptr, const Node* n) { // pointers). Casting to int64 is needed on 32bit CPU to avoid comparing // values as "int" vs "size_t" in CHECK_LE. CHECK_LE(static_cast(ptr - space_), kuint32max); - const uint32 offset = ptr - space_; + const uint32 offset = static_cast(ptr - space_); node_offsets_[id] = offset; ptr += bytes; - const int num_output_edges = n->out_edges().size(); + const size_t num_output_edges = n->out_edges().size(); const int num_inputs = n->num_inputs(); const int num_outputs = n->num_outputs(); @@ -580,9 +580,10 @@ void GraphView::Initialize(const Graph* g) { CHECK_EQ(ptr, space_ + total_bytes); } -void GetMaxPendingCounts(const Node* n, int* max_pending, int* max_dead_count) { - const int num_in_edges = n->in_edges().size(); - int initial_count; +void GetMaxPendingCounts(const Node* n, size_t* max_pending, + size_t* max_dead_count) { + const size_t num_in_edges = n->in_edges().size(); + size_t initial_count; if (IsMerge(n)) { // merge waits all control inputs so we initialize the pending // count to be the number of control edges. @@ -626,8 +627,7 @@ Status ExecutorImpl::Initialize() { FrameInfo* frame_info = EnsureFrameInfo(frame_name); // See if this node is a root node, and if so, add to root_nodes_. - const int num_in_edges = n->in_edges().size(); - if (num_in_edges == 0) { + if (n->in_edges().empty()) { root_nodes_.push_back(n); } @@ -659,7 +659,7 @@ Status ExecutorImpl::Initialize() { // pending counts data structure, and allocate a handle in // that frame's pending counts data structure that has enough // space to store these maximal count values. - int max_pending, max_dead; + size_t max_pending, max_dead; GetMaxPendingCounts(n, &max_pending, &max_dead); item->pending_id = frame_info->pending_counts_layout.CreateHandle(max_pending, max_dead); @@ -896,7 +896,7 @@ class ExecutorState { Entry* input_tensors; // The number of outstanding ops for each iteration. - int outstanding_ops; + size_t outstanding_ops; // The number of outstanding frames for each iteration. int outstanding_frame_count; @@ -1037,13 +1037,13 @@ class ExecutorState { inline IterationState* GetIteration(int64 iter) EXCLUSIVE_LOCKS_REQUIRED(mu) { - int index = iter % iterations.size(); + size_t index = iter % iterations.size(); return iterations[index]; } inline void SetIteration(int64 iter, IterationState* state) EXCLUSIVE_LOCKS_REQUIRED(mu) { - int index = iter % iterations.size(); + size_t index = iter % iterations.size(); DCHECK(state == nullptr || iterations[index] == nullptr); iterations[index] = state; } @@ -1404,7 +1404,7 @@ void ExecutorImpl::InitializePending(const Graph* graph, for (const Node* n : graph->nodes()) { const int id = n->id(); const string& name = cf_info.frame_names[id]; - int max_pending, max_dead; + size_t max_pending, max_dead; GetMaxPendingCounts(n, &max_pending, &max_dead); const NodeItem* item = gview_.node(id); PendingCounts* counts = EnsureFrameInfo(name)->pending_counts; @@ -2027,7 +2027,7 @@ bool ExecutorState::NodeDone(const Status& s, const Node* node, } bool completed = false; - int ready_size = ready.size(); + size_t ready_size = ready.size(); if (ready_size == 0 || !s.ok()) { completed = (num_outstanding_ops_.fetch_sub(1) == 1); } else if (ready_size > 1) { @@ -2375,10 +2375,10 @@ void ExecutorState::FrameState::ActivateNodes(const NodeItem* item, TaggedNodeSeq* ready) { const GraphView& gview = executor->gview_; IterationState* iter_state = GetIteration(iter); - const int num_output_edges = item->num_output_edges; + const size_t num_output_edges = item->num_output_edges; const EdgeInfo* edges = item->output_edge_list(); Entry* input_tensors = iter_state->input_tensors; - for (int out_index = 0; out_index < num_output_edges; out_index++) { + for (size_t out_index = 0; out_index < num_output_edges; out_index++) { const EdgeInfo& e = edges[out_index]; const int dst_id = e.dst_id; const NodeItem* dst_item = gview.node(dst_id); diff --git a/tensorflow/core/common_runtime/executor.h b/tensorflow/core/common_runtime/executor.h index 239c9666e33..93b58906dda 100644 --- a/tensorflow/core/common_runtime/executor.h +++ b/tensorflow/core/common_runtime/executor.h @@ -162,7 +162,7 @@ class ExecutorBarrier { // // 'done' is called after the last executor completes, and // ExecutorBarrier is deleted. - ExecutorBarrier(int num, Rendezvous* r, StatusCallback done) + ExecutorBarrier(size_t num, Rendezvous* r, StatusCallback done) : rendez_(r), done_cb_(done), pending_(num) {} ~ExecutorBarrier() {} diff --git a/tensorflow/core/common_runtime/function.cc b/tensorflow/core/common_runtime/function.cc index cb7e1a40ceb..5f011c2ce94 100644 --- a/tensorflow/core/common_runtime/function.cc +++ b/tensorflow/core/common_runtime/function.cc @@ -274,8 +274,9 @@ class CallOp : public AsyncOpKernel { if (!status.ok()) { ctx->SetStatus(status); } else { - CHECK_EQ(rets->size(), ctx->num_outputs()); - for (size_t i = 0; i < rets->size(); ++i) { + const int ret_size = static_cast(rets->size()); + CHECK_EQ(ret_size, ctx->num_outputs()); + for (int i = 0; i < ret_size; ++i) { ctx->set_output(i, (*rets)[i]); } } @@ -1000,7 +1001,7 @@ string NewName(const Node* n, bool pretty) { void ToGraphDef(const Graph* g, GraphDef* gdef, bool pretty) { // We visit nodes in forward topological sort order, which is a // possible execution order of the graph. - std::vector pending(g->num_node_ids()); + std::vector pending(g->num_node_ids()); std::deque ready; for (const Node* n : g->nodes()) { pending[n->id()] = n->in_edges().size(); @@ -1154,7 +1155,7 @@ FunctionBody* SymbolicGradientHelper::Compute() { Graph* g = gbody_->graph; - const int num_y = gbody_->ret_nodes.size(); + const int num_y = static_cast(gbody_->ret_nodes.size()); // Populate 'y_node_outputs_' with node function body outputs. // Populate 'y_grad_nodes' with initial gradient nodes for each return node of @@ -1169,7 +1170,7 @@ FunctionBody* SymbolicGradientHelper::Compute() { y_node_outputs.push_back({y, 0}); DCHECK_EQ(y->type_string(), kRetOp); const DataType dtype = y->input_type(0); - const int index = gbody_->arg_nodes.size(); + const int index = static_cast(gbody_->arg_nodes.size()); Node* dy = AddArg(g, dtype, index); gbody_->arg_types.push_back(dtype); gbody_->arg_nodes.push_back(dy); @@ -1177,7 +1178,7 @@ FunctionBody* SymbolicGradientHelper::Compute() { } // Populate 'x_nodes' with function args (excluding 'y_grad_node_outputs'). - const int num_x = fbody_->arg_nodes.size(); + const size_t num_x = fbody_->arg_nodes.size(); std::vector x_node_outputs; x_node_outputs.reserve(num_x); for (size_t i = 0; i < fbody_->arg_nodes.size(); ++i) { @@ -1200,7 +1201,8 @@ FunctionBody* SymbolicGradientHelper::Compute() { gbody_->ret_nodes.clear(); // Add new return nodes to the function gradient body for each node // in 'x_grad_nodes'. - for (size_t i = 0; i < fbody_->arg_types.size(); ++i) { + const int arg_types_size = static_cast(fbody_->arg_types.size()); + for (int i = 0; i < arg_types_size; ++i) { Endpoint grad = {x_grad_node_outputs[i].node, x_grad_node_outputs[i].index}; Node* ret = AddRet(g, grad, i); gbody_->ret_nodes.push_back(ret); diff --git a/tensorflow/core/common_runtime/gpu/gpu_stream_util.cc b/tensorflow/core/common_runtime/gpu/gpu_stream_util.cc index eae917a4395..de715d140a1 100644 --- a/tensorflow/core/common_runtime/gpu/gpu_stream_util.cc +++ b/tensorflow/core/common_runtime/gpu/gpu_stream_util.cc @@ -82,7 +82,7 @@ Status AssignStreams(const Graph* graph, const AssignStreamsOpts& opts, // Determine a suitable stream to use. int stream_id = highest_stream_id + 1; for (const Edge* e : n->in_edges()) { - const int fanout = e->src()->out_edges().size(); + const size_t fanout = e->src()->out_edges().size(); if (fanout == 1) { stream_id = (*node_to_stream_id)[e->src()->id()]; break; diff --git a/tensorflow/core/common_runtime/gpu/process_state.cc b/tensorflow/core/common_runtime/gpu/process_state.cc index f9975ef0a08..cee7b6d78ad 100644 --- a/tensorflow/core/common_runtime/gpu/process_state.cc +++ b/tensorflow/core/common_runtime/gpu/process_state.cc @@ -191,7 +191,7 @@ Allocator* ProcessState::GetCUDAHostAllocator(int numa_node) { // example, process_state could maybe save the first stream executor // it knows is valid. gpu::StreamExecutor* se = nullptr; - for (size_t i = 0; i < gpu_allocators_.size(); ++i) { + for (int i = 0; i < static_cast(gpu_allocators_.size()); ++i) { if (gpu_allocators_[i] != nullptr) { se = GPUMachineManager()->ExecutorForDevice(i).ValueOrDie(); break; diff --git a/tensorflow/core/common_runtime/pending_counts.h b/tensorflow/core/common_runtime/pending_counts.h index f0c79ad601c..198eb896afc 100644 --- a/tensorflow/core/common_runtime/pending_counts.h +++ b/tensorflow/core/common_runtime/pending_counts.h @@ -69,7 +69,7 @@ class PendingCounts { // to retrieve the count data for this node. class Layout { public: - Handle CreateHandle(int max_pending_count, int max_dead_count); + Handle CreateHandle(size_t max_pending_count, size_t max_dead_count); private: friend class PendingCounts; @@ -91,7 +91,7 @@ class PendingCounts { ~PendingCounts() { delete[] bytes_; } - void set_initial_count(Handle h, int pending_count) { + void set_initial_count(Handle h, size_t pending_count) { if (h.is_large_) { LargeCounts* c = Large(h); c->pending = pending_count; @@ -306,7 +306,7 @@ class PendingCounts { }; inline PendingCounts::Handle PendingCounts::Layout::CreateHandle( - int max_pending_count, int max_dead_count) { + size_t max_pending_count, size_t max_dead_count) { Handle result; if ((max_pending_count > kMaxCountForPackedCounts) || (max_dead_count > kMaxCountForPackedCounts)) { diff --git a/tensorflow/core/util/util.cc b/tensorflow/core/util/util.cc index 3481a6aaa4d..1e5a9c57126 100644 --- a/tensorflow/core/util/util.cc +++ b/tensorflow/core/util/util.cc @@ -85,7 +85,7 @@ void MovingAverage::AddValue(double v) { static char hex_char[] = "0123456789abcdef"; -string PrintMemory(const char* ptr, int n) { +string PrintMemory(const char* ptr, size_t n) { string ret; ret.resize(n * 3); for (int i = 0; i < n; ++i) { diff --git a/tensorflow/core/util/util.h b/tensorflow/core/util/util.h index c142f4d0d26..4adf2f14dcc 100644 --- a/tensorflow/core/util/util.h +++ b/tensorflow/core/util/util.h @@ -49,7 +49,7 @@ class MovingAverage { // Returns a string printing bytes in ptr[0..n). The output looks // like "00 01 ef cd cd ef". -string PrintMemory(const char* ptr, int n); +string PrintMemory(const char* ptr, size_t n); // Given a flattened index into a tensor, computes a string s so that // StrAppend("tensor", s) is a Python indexing expression. E.g.,