c, cc folder resolutions
This commit is contained in:
parent
3bb28df8d4
commit
5d61fb278e
@ -573,7 +573,7 @@ Status InitialGradients(
|
||||
gtl::ArraySlice<Gradient*> output_gradients, const TensorTape& tensor_tape,
|
||||
const OpTape<BackwardFunction, TapeTensor>& op_tape,
|
||||
std::unordered_map<int64, std::vector<Gradient*>>* result) {
|
||||
for (int i = 0; i < target_tensor_ids.size(); ++i) {
|
||||
for (int i = 0, end = target_tensor_ids.size(); i < end; ++i) {
|
||||
const int64 id = target_tensor_ids[i];
|
||||
if (output_gradients.empty() || output_gradients[i] == nullptr) {
|
||||
auto tensor_it = tensor_tape.find(id);
|
||||
@ -699,7 +699,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
|
||||
std::vector<Gradient*> out_gradients;
|
||||
out_gradients.reserve(trace.output_tensor_info.size());
|
||||
std::vector<int64> unneeded_gradients;
|
||||
for (int i = 0; i < trace.input_tensor_id.size(); i++) {
|
||||
for (int i = 0, end = trace.input_tensor_id.size(); i < end; i++) {
|
||||
const auto& in_tensor_id = trace.input_tensor_id[i];
|
||||
if (tensor_tape_.find(in_tensor_id) == tensor_tape_.end() &&
|
||||
sources_set.find(in_tensor_id) == sources_set.end()) {
|
||||
@ -709,7 +709,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
|
||||
|
||||
bool any_gradient_nonzero = false;
|
||||
std::vector<int> zero_indices;
|
||||
for (int i = 0; i < trace.output_tensor_info.size(); ++i) {
|
||||
for (int i = 0, end = trace.output_tensor_info.size(); i < end; ++i) {
|
||||
const int64 id = trace.output_tensor_info[i].GetID();
|
||||
auto grad_it = gradients.find(id);
|
||||
if (grad_it == gradients.end()) {
|
||||
@ -775,7 +775,7 @@ Status GradientTape<Gradient, BackwardFunction, TapeTensor>::ComputeGradient(
|
||||
}
|
||||
VLOG(1) << "Got " << in_gradients.size() << " in_gradients for "
|
||||
<< trace.input_tensor_id.size() << " sources";
|
||||
for (int i = 0; i < in_gradients.size(); ++i) {
|
||||
for (int i = 0, end = in_gradients.size(); i < end; ++i) {
|
||||
const int64 id = trace.input_tensor_id[i];
|
||||
if (in_gradients[i] != nullptr) {
|
||||
auto& unaggregated_grads = gradients[id];
|
||||
@ -968,7 +968,7 @@ ForwardAccumulator<Gradient, BackwardFunction, TapeTensor>::ForwardpropFromTape(
|
||||
targets.reserve(grad.size());
|
||||
used_in_grads.reserve(grad.size());
|
||||
std::unordered_map<int64, TapeTensor> sources_that_are_targets;
|
||||
for (int grad_index = 0; grad_index < grad.size(); ++grad_index) {
|
||||
for (int grad_index = 0, end = grad.size(); grad_index < end; ++grad_index) {
|
||||
Gradient* grad_tensor = grad[grad_index];
|
||||
if (grad_tensor != nullptr) {
|
||||
int64 tensor_id = vspace_.TensorId(grad_tensor);
|
||||
|
@ -425,7 +425,7 @@ Status SymbolicGradientBuilder::ProcessWhileLoop(Node* exit_node,
|
||||
// Backprop along the in edges to the while loop (i.e. the inputs to the enter
|
||||
// nodes)
|
||||
DCHECK_EQ(dx.size(), while_ctx->enter_nodes().size());
|
||||
for (int i = 0; i < dx.size(); ++i) {
|
||||
for (int i = 0, end = dx.size(); i < end; ++i) {
|
||||
Node* enter_node = while_ctx->enter_nodes()[i];
|
||||
for (const Edge* e : enter_node->in_edges()) {
|
||||
if (e->IsControlEdge()) continue;
|
||||
@ -489,7 +489,7 @@ Status SymbolicGradientBuilder::AddGradients() {
|
||||
// All loop-specific control flow ops should have been handled above
|
||||
DCHECK(!n->IsEnter() && !n->IsNextIteration()) << n->DebugString();
|
||||
|
||||
const size_t num_no_grad = no_grad_dy_indices.size();
|
||||
const int num_no_grad = no_grad_dy_indices.size();
|
||||
if (IsPrimitiveOpWithNoGrad(n->type_string()) || num_no_grad == num_y) {
|
||||
// No grad defined for this op, or all outputs returned 'NoGradient':
|
||||
// Backprop 'NoGradient' along the in edges.
|
||||
@ -524,7 +524,7 @@ Status SymbolicGradientBuilder::AddGradients() {
|
||||
// make this association explicit.
|
||||
for (const Edge* e : n->in_edges()) {
|
||||
if (e->IsControlEdge()) continue;
|
||||
int dx_index = e->dst_input();
|
||||
size_t dx_index = e->dst_input();
|
||||
if (dx_index >= dx.size()) {
|
||||
return errors::Internal(
|
||||
"Invalid gradient output index: ", dx_index, " size: ", dx.size());
|
||||
|
@ -34,7 +34,7 @@ Output ToOutput(OutputTensor output_tensor) {
|
||||
|
||||
std::vector<Output> ToOutputVector(
|
||||
const std::vector<OutputTensor>& output_tensors) {
|
||||
size_t n = output_tensors.size();
|
||||
const int n = output_tensors.size();
|
||||
std::vector<Output> result;
|
||||
result.reserve(n);
|
||||
for (int i = 0; i < n; ++i) result.push_back(ToOutput(output_tensors[i]));
|
||||
|
Loading…
Reference in New Issue
Block a user