Remove all 64/32 bit warnings in tensorflow/cc

Change: 153637886
This commit is contained in:
Suharsh Sivakumar 2017-04-19 13:52:40 -08:00 committed by TensorFlower Gardener
parent 1fc916d0c1
commit c6ab1fb225
10 changed files with 46 additions and 41 deletions

View File

@ -730,7 +730,7 @@ void OpInfo::GetOutput(string* out) const {
// One output, no need for NameRangeMap
if (is_list_output[0]) {
strings::StrAppend(out,
" for (int64 i = 0; i < ret->num_outputs(); ++i)\n");
" for (int32 i = 0; i < ret->num_outputs(); ++i)\n");
strings::StrAppend(out, " this->", output_names[0],
".push_back(Output(ret, i));\n");
} else {
@ -753,7 +753,7 @@ void OpInfo::GetOutput(string* out) const {
const string arg_range = strings::StrCat(
"_outputs_range[\"", graph_op_def.output_arg(i).name(), "\"]");
if (is_list_output[i]) {
strings::StrAppend(out, " for (int64 i = ", arg_range, ".first; i < ",
strings::StrAppend(out, " for (int32 i = ", arg_range, ".first; i < ",
arg_range, ".second; ++i)\n");
strings::StrAppend(out, " this->", output_names[i],
".push_back(Output(ret, i));\n");

View File

@ -40,8 +40,8 @@ Status ComputeTheoreticalJacobianTranspose(
const std::vector<Tensor>& x_datas, const OutputList& ys,
const std::vector<TensorShape>& y_shapes,
std::vector<Tensor>& jacobian_ts) {
int y_num = y_shapes.size();
int x_num = x_shapes.size();
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
// Call AddSymbolicGradients to get 'dxs' (we will feed 'dys').
OutputList dys;
for (const auto& y_shape : y_shapes) {
@ -130,8 +130,8 @@ Status ComputeNumericJacobianTranspose(const Scope& scope, const OutputList& xs,
const T delta,
std::vector<Tensor>& x_datas,
std::vector<Tensor>& jacobian_ts) {
int y_num = y_shapes.size();
int x_num = x_shapes.size();
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
ClientSession session(scope);
for (int x_idx = 0; x_idx < x_num; x_idx++) {
@ -176,8 +176,8 @@ void InitJacobians(const OutputList& xs,
const std::vector<TensorShape>& x_shapes,
const std::vector<TensorShape>& y_shapes,
std::vector<Tensor>& jacobians) {
int y_num = y_shapes.size();
int x_num = x_shapes.size();
size_t y_num = y_shapes.size();
size_t x_num = x_shapes.size();
jacobians.resize(y_num * x_num);
for (int x_idx = 0; x_idx < x_num; x_idx++) {

View File

@ -210,8 +210,8 @@ Status SymbolicGradientBuilder::Initialize() {
{
// Initialize backprop with `grad_inputs_`.
const int num_dy = grad_inputs_.size();
for (int i = 0; i < num_dy; ++i) {
const size_t num_dy = grad_inputs_.size();
for (size_t i = 0; i < num_dy; ++i) {
TF_RETURN_IF_ERROR(BackpropAlongEdge(grad_inputs_[i], outputs_[i]));
}
}
@ -308,7 +308,7 @@ Status SymbolicGradientBuilder::AddGradients() {
continue;
}
const int num_no_grad = no_grad_dy_indices.size();
const size_t num_no_grad = no_grad_dy_indices.size();
if (IsPrimitiveOpWithNoGrad(n->type_string()) || num_no_grad == num_y) {
// No grad defined for this op, or all outputs returned 'NoGradient':
// Backprop 'NoGradient' along the in edges.

View File

@ -20,7 +20,7 @@ namespace tensorflow {
Operation::Operation(Node* n) : inputs_(GetInputs(n)), node_(n) {}
Output Operation::input(int i) const {
Output Operation::input(int32 i) const {
CHECK_NOTNULL(node_);
CHECK_GE(i, 0);
CHECK_LT(i, node_->num_inputs());
@ -37,14 +37,14 @@ Output Operation::input(int i) const {
return Output(inputs_[i].first, inputs_[i].second);
}
Output Operation::output(int i) const {
Output Operation::output(int32 i) const {
CHECK_NOTNULL(node_);
CHECK_GE(i, 0);
CHECK_LT(i, node_->num_outputs());
return Output(node_, i);
}
uint64 Operation::hash(int64 index) const {
uint64 Operation::hash(int32 index) const {
return ::tensorflow::Hash64(reinterpret_cast<const char*>(&node_),
sizeof(Node*), index);
}

View File

@ -39,22 +39,22 @@ class Operation {
Operation() : node_(nullptr) {}
explicit Operation(Node* n);
int num_inputs() const { return node_->num_inputs(); }
DataType input_type(int o) const { return node_->input_type(o); }
Output input(int i) const;
int32 num_inputs() const { return node_->num_inputs(); }
DataType input_type(int32 o) const { return node_->input_type(o); }
Output input(int32 i) const;
int num_outputs() const { return node_->num_outputs(); }
DataType output_type(int o) const { return node_->output_type(o); }
Output output(int i) const;
int32 num_outputs() const { return node_->num_outputs(); }
DataType output_type(int32 o) const { return node_->output_type(o); }
Output output(int32 i) const;
Node* node() const { return node_; }
uint64 hash(int64 index) const;
uint64 hash(int32 index) const;
bool operator==(const Operation& other) const { return node_ == other.node_; }
private:
typedef std::vector<std::pair<Node*, int64>> Inputs;
typedef std::vector<std::pair<Node*, int32>> Inputs;
static Inputs GetInputs(Node* node);
Inputs inputs_;
@ -66,12 +66,12 @@ class Output {
public:
Output() = default;
explicit Output(Node* n) : op_(n) {}
Output(Node* n, int64 index) : op_(n), index_(index) {}
Output(const Operation& op, int64 index) : op_(op), index_(index) {}
Output(Node* n, int32 index) : op_(n), index_(index) {}
Output(const Operation& op, int32 index) : op_(op), index_(index) {}
Operation op() const { return op_; }
Node* node() const { return op().node(); }
int64 index() const { return index_; }
int32 index() const { return index_; }
DataType type() const { return op_.output_type(index_); }
string name() const { return strings::StrCat(node()->name(), ":", index()); }
bool operator==(const Output& other) const {
@ -82,14 +82,14 @@ class Output {
private:
Operation op_ = Operation(nullptr);
int64 index_ = 0;
int32 index_ = 0;
};
/// Hash class that can be used for e.g. storing Outputs in an unordered_map
struct OutputHash {
std::size_t operator()(const Output& output) const {
return Hash64Combine(std::hash<Node*>()(output.node()),
std::hash<int64>()(output.index()));
std::hash<int32>()(output.index()));
}
};
@ -230,12 +230,12 @@ class Input {
/// Constructor specifying a node name, index and datatype. This should only
/// be used for specifying a backward edge, needed by control flow.
Input(const string& name, int i, DataType dt)
Input(const string& name, int32 i, DataType dt)
: node_name_(name), index_(i), data_type_(dt) {}
Node* node() const { return output_.node(); }
string node_name() const { return node_name_; }
int index() const { return node_name_.empty() ? output_.index() : index_; }
int32 index() const { return node_name_.empty() ? output_.index() : index_; }
DataType data_type() const { return data_type_; }
Status status() const { return status_; }
const Tensor& tensor() const { return tensor_; }
@ -245,7 +245,7 @@ class Input {
Output output_ = Output(Operation(nullptr), 0);
Tensor tensor_;
const string node_name_ = "";
int index_ = 0;
int32 index_ = 0;
DataType data_type_ = DT_INVALID;
};

View File

@ -49,7 +49,12 @@ Status QueueRunner::Init(const QueueRunnerDef& queue_runner_def) {
enqueue_op_names_.insert(enqueue_op_names_.end(),
queue_runner_def.enqueue_op_name().begin(),
queue_runner_def.enqueue_op_name().end());
runs_ = enqueue_op_names_.size();
size_t op_names_size = enqueue_op_names_.size();
if (op_names_size > kint32max) {
return Status(error::INVALID_ARGUMENT,
"Enqueue ops to run cannot exceed kint32max");
}
runs_ = static_cast<int>(op_names_size);
if (runs_ == 0) {
return Status(error::INVALID_ARGUMENT, "Empty enqueue ops to run.");
}

View File

@ -227,7 +227,7 @@ int main(int argc, char* argv[]) {
argv[dst++] = f;
}
argv[dst++] = nullptr;
argc = unknown_flags.size() + 1;
argc = static_cast<int>(unknown_flags.size() + 1);
tensorflow::port::InitMain(argv[0], &argc, &argv);
tensorflow::example::ConcurrentSessions(opts);
}

View File

@ -84,12 +84,12 @@ class Node {
const OpDef& op_def() const { return *props_->op_def_; }
// input and output types
int num_inputs() const { return props_->input_types_.size(); }
DataType input_type(int i) const { return props_->input_types_[i]; }
int32 num_inputs() const { return props_->input_types_.size(); }
DataType input_type(int32 i) const { return props_->input_types_[i]; }
const DataTypeVector& input_types() const { return props_->input_types_; }
int num_outputs() const { return props_->output_types_.size(); }
DataType output_type(int o) const { return props_->output_types_[o]; }
int32 num_outputs() const { return props_->output_types_.size(); }
DataType output_type(int32 o) const { return props_->output_types_[o]; }
const DataTypeVector& output_types() const { return props_->output_types_; }
// This gives the device the runtime has assigned this node to. If

View File

@ -21,14 +21,14 @@ limitations under the License.
namespace tensorflow {
NodeBuilder::NodeOut::NodeOut(Node* n, int i) // NOLINT(runtime/explicit)
NodeBuilder::NodeOut::NodeOut(Node* n, int32 i) // NOLINT(runtime/explicit)
: node(n),
error(false),
name(node != nullptr ? node->name() : (error = true, "")),
index(i),
dt(SafeGetOutput(node, i, &error)) {}
NodeBuilder::NodeOut::NodeOut(StringPiece n, int i, DataType t)
NodeBuilder::NodeOut::NodeOut(StringPiece n, int32 i, DataType t)
: node(nullptr), error(false), name(n.ToString()), index(i), dt(t) {}
NodeBuilder::NodeOut::NodeOut()

View File

@ -49,13 +49,13 @@ class NodeBuilder {
// ArraySlice.
struct NodeOut {
// For referencing an existing Node.
NodeOut(Node* n, int i = 0);
NodeOut(Node* n, int32 i = 0);
// For referencing Nodes not in the graph being built. It is
// useful when preparing a graph for ExtendSession or creating a
// back edge to a node that hasn't been added to the graph yet,
// but will be.
NodeOut(StringPiece name, int i, DataType t);
NodeOut(StringPiece name, int32 i, DataType t);
// Default constructor for std::vector<NodeOut>.
NodeOut();
@ -67,7 +67,7 @@ class NodeBuilder {
// * an out-of-range index was passed to the NodeOut constructor.
bool error;
string name;
int index;
int32 index;
DataType dt;
};