Rename all BackProp to Backprop for consistency. (#12623)

This commit is contained in:
Aseem Raj Baranwal 2017-09-18 02:08:59 +05:30 committed by drpngx
parent 17baa33a76
commit 7aa94908e0
4 changed files with 39 additions and 39 deletions

View File

@ -26,8 +26,8 @@ namespace grappler {
constexpr int kOpsPerMac = 2;
constexpr char kConv2d[] = "Conv2D";
constexpr char kConv2dBackPropFilter[] = "Conv2DBackpropFilter";
constexpr char kConv2dBackPropInput[] = "Conv2DBackpropInput";
constexpr char kConv2dBackpropFilter[] = "Conv2DBackpropFilter";
constexpr char kConv2dBackpropInput[] = "Conv2DBackpropInput";
constexpr char kMatMul[] = "MatMul";
constexpr char kSparseMatMul[] = "SparseMatMul";
constexpr char kIdentity[] = "Identity";
@ -150,10 +150,10 @@ OpLevelCostEstimator::OpLevelCostEstimator() {
device_cost_impl_ = {
{kConv2d, wrap(&OpLevelCostEstimator::PredictConv2D)},
{kConv2dBackPropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackPropFilter)},
{kConv2dBackPropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackPropInput)},
{kConv2dBackpropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropFilter)},
{kConv2dBackpropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropInput)},
{kMatMul, wrap(&OpLevelCostEstimator::PredictMatMul)},
{kSparseMatMul, wrap(&OpLevelCostEstimator::PredictMatMul)},
{kIdentity, wrap(&OpLevelCostEstimator::PredictNoOp)},
@ -668,20 +668,20 @@ int64 OpLevelCostEstimator::CountBatchMatMulOperations(
return ops;
}
// TODO(cliffy): Dedup this method and CountConv2DBackPropFilterOperations.
int64 OpLevelCostEstimator::CountConv2DBackPropInputOperations(
// TODO(cliffy): Dedup this method and CountConv2DBackpropFilterOperations.
int64 OpLevelCostEstimator::CountConv2DBackpropInputOperations(
const OpInfo& op_features, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes) const {
int64 ops = 0;
if (op_features.op() != kConv2dBackPropInput) {
if (op_features.op() != kConv2dBackpropInput) {
LOG(ERROR) << "Invalid Operation";
return ops;
}
if (op_features.outputs_size() != 1) {
// Need _output_shapes for input shape.
LOG(ERROR) << "No output shape in Conv2DBackPropInput op.";
LOG(ERROR) << "No output shape in Conv2DBackpropInput op.";
return ops;
}
@ -696,7 +696,7 @@ int64 OpLevelCostEstimator::CountConv2DBackPropInputOperations(
ops *= conv_dims.iz * conv_dims.oz;
ops *= kOpsPerMac;
VLOG(1) << "Operations for Conv2DBackPropInput " << ops;
VLOG(1) << "Operations for Conv2DBackpropInput " << ops;
if (returned_conv_dims != nullptr) {
*returned_conv_dims = conv_dims;
@ -704,18 +704,18 @@ int64 OpLevelCostEstimator::CountConv2DBackPropInputOperations(
return ops;
}
int64 OpLevelCostEstimator::CountConv2DBackPropFilterOperations(
int64 OpLevelCostEstimator::CountConv2DBackpropFilterOperations(
const OpInfo& op_features, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes) const {
int64 ops = 0;
if (op_features.op() != kConv2dBackPropFilter) {
if (op_features.op() != kConv2dBackpropFilter) {
LOG(ERROR) << "Invalid Operation";
return ops;
}
if (op_features.outputs_size() != 1) {
// Need _output_shapes for input shape.
LOG(ERROR) << "No output shape in Conv2DBackPropFilter op.";
LOG(ERROR) << "No output shape in Conv2DBackpropFilter op.";
return ops;
}
@ -730,7 +730,7 @@ int64 OpLevelCostEstimator::CountConv2DBackPropFilterOperations(
ops *= conv_dims.iz * conv_dims.oz;
ops *= kOpsPerMac;
VLOG(1) << "Operations for Conv2DBackPropFilter" << ops;
VLOG(1) << "Operations for Conv2DBackpropFilter" << ops;
if (returned_conv_dims != nullptr) {
*returned_conv_dims = conv_dims;
@ -814,22 +814,22 @@ Costs OpLevelCostEstimator::PredictConv2D(const OpInfo& op_features) const {
return costs;
}
Costs OpLevelCostEstimator::PredictConv2DBackPropInput(
Costs OpLevelCostEstimator::PredictConv2DBackpropInput(
const OpInfo& op_features) const {
bool found_unknown_shapes = false;
auto costs =
PredictOpCountBasedCost(CountConv2DBackPropInputOperations(
PredictOpCountBasedCost(CountConv2DBackpropInputOperations(
op_features, nullptr, &found_unknown_shapes),
op_features);
costs.inaccurate = found_unknown_shapes;
return costs;
}
Costs OpLevelCostEstimator::PredictConv2DBackPropFilter(
Costs OpLevelCostEstimator::PredictConv2DBackpropFilter(
const OpInfo& op_features) const {
bool found_unknown_shapes = false;
auto costs =
PredictOpCountBasedCost(CountConv2DBackPropFilterOperations(
PredictOpCountBasedCost(CountConv2DBackpropFilterOperations(
op_features, nullptr, &found_unknown_shapes),
op_features);
costs.inaccurate = found_unknown_shapes;

View File

@ -82,10 +82,10 @@ class OpLevelCostEstimator {
bool* found_unknown_shapes) const;
int64 CountBatchMatMulOperations(const OpInfo& op_features,
bool* found_unknown_shapes) const;
int64 CountConv2DBackPropInputOperations(const OpInfo& op_features,
int64 CountConv2DBackpropInputOperations(const OpInfo& op_features,
ConvolutionDimensions* conv_info,
bool* found_unknown_shapes) const;
int64 CountConv2DBackPropFilterOperations(const OpInfo& op_features,
int64 CountConv2DBackpropFilterOperations(const OpInfo& op_features,
ConvolutionDimensions* conv_info,
bool* found_unknown_shapes) const;
@ -124,8 +124,8 @@ class OpLevelCostEstimator {
// device.
Costs PredictConv2D(const OpInfo& op_features) const;
Costs PredictCwiseOp(const OpInfo& op_features) const;
Costs PredictConv2DBackPropInput(const OpInfo& op_features) const;
Costs PredictConv2DBackPropFilter(const OpInfo& op_features) const;
Costs PredictConv2DBackpropInput(const OpInfo& op_features) const;
Costs PredictConv2DBackpropFilter(const OpInfo& op_features) const;
Costs PredictMatMul(const OpInfo& op_features) const;
Costs PredictNoOp(const OpInfo& op_features) const;
Costs PredictBatchMatMul(const OpInfo& op_features) const;

View File

@ -117,7 +117,7 @@ def _MergeGrad(op, grad, _):
# Add the stack pop op. If pred.op is in a (outer) CondContext,
# the stack pop will be guarded with a switch.
real_pred = grad_state.AddBackPropAccumulatedValue(history_pred, pred)
real_pred = grad_state.AddBackpropAccumulatedValue(history_pred, pred)
grad_state.history_map[pred.name] = real_pred
pred = real_pred
# pylint: disable=protected-access
@ -214,9 +214,9 @@ def _EnterGrad(op, grad):
if op.get_attr("is_constant"):
# Add a gradient accumulator for each loop invariant.
if isinstance(grad, ops.Tensor):
result = grad_ctxt.AddBackPropAccumulator(op, grad)
result = grad_ctxt.AddBackpropAccumulator(op, grad)
elif isinstance(grad, ops.IndexedSlices):
result = grad_ctxt.AddBackPropIndexedSlicesAccumulator(op, grad)
result = grad_ctxt.AddBackpropIndexedSlicesAccumulator(op, grad)
else:
# TODO(yuanbyu, lukasr): Add support for SparseTensor.
raise TypeError("Type %s not supported" % type(grad))

View File

@ -683,7 +683,7 @@ class GradLoopState(object):
# The while loop context for backprop.
self._grad_context = None
# The loop counter added by AddBackPropLoopCounter. It is the value
# The loop counter added by AddBackpropLoopCounter. It is the value
# of the loop counter for the current iteration.
self._grad_index = None
@ -725,8 +725,8 @@ class GradLoopState(object):
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
real_cnt = outer_grad_state.AddBackPropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
real_cnt = outer_grad_state.AddBackpropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackpropLoopCounter(
real_cnt, outer_grad_state)
outer_grad_ctxt.Exit()
else:
@ -736,7 +736,7 @@ class GradLoopState(object):
forward_ctxt.swap_memory,
forward_ctxt.name,
self)
self._grad_index = self._grad_context.AddBackPropLoopCounter(
self._grad_index = self._grad_context.AddBackpropLoopCounter(
cnt, outer_grad_state)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
@ -914,7 +914,7 @@ class GradLoopState(object):
push.op._add_control_input(add_op)
return acc
def AddBackPropAccumulatedValue(self, history_value, value,
def AddBackpropAccumulatedValue(self, history_value, value,
dead_branch=False):
"""Add the getter for an accumulated value in the grad context.
@ -1013,7 +1013,7 @@ class GradLoopState(object):
if real_value is None:
# Add the stack pop op in the grad context.
real_value = cur_grad_state.AddBackPropAccumulatedValue(history_value,
real_value = cur_grad_state.AddBackpropAccumulatedValue(history_value,
cur_value)
if cur_grad_state != self:
real_value = self._grad_context.AddValue(real_value)
@ -1170,7 +1170,7 @@ class ControlFlowState(object):
# Get the shape back from the stack.
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_shape, shape)
result = array_ops.zeros(real_shape, val.dtype)
outer_grad_ctxt.Exit()
@ -1240,7 +1240,7 @@ class ControlFlowState(object):
grad_state.grad_context.Enter()
# Create a zero tensor with the right shape.
shape = grad_state.AddBackPropAccumulatedValue(
shape = grad_state.AddBackpropAccumulatedValue(
history_zeros_shape, zeros_shape, dead_branch)
result = array_ops.zeros(shape, val.dtype)
return result
@ -2282,7 +2282,7 @@ class WhileContext(ControlFlowContext):
self.Exit()
return total_iterations, next_n
def AddBackPropLoopCounter(self, count, outer_grad_state):
def AddBackpropLoopCounter(self, count, outer_grad_state):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
@ -2336,7 +2336,7 @@ class WhileContext(ControlFlowContext):
self.Exit()
return next_count
def AddBackPropAccumulator(self, op, grad):
def AddBackpropAccumulator(self, op, grad):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
@ -2382,7 +2382,7 @@ class WhileContext(ControlFlowContext):
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackPropAccumulatedValue(
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
@ -2412,10 +2412,10 @@ class WhileContext(ControlFlowContext):
self.ExitResult([result_acc])
return result_acc
def AddBackPropIndexedSlicesAccumulator(self, op, grad):
def AddBackpropIndexedSlicesAccumulator(self, op, grad):
"""This is used for accumulating gradients that are IndexedSlices.
This is essentially the equavalent of AddBackPropAccumulator but optimized
This is essentially the equivalent of AddBackpropAccumulator but optimized
for things like updating embeddings from within a while loop.
Args: