Rename usages of tf.mul, tf.neg, tf.sub that are used internally
Change: 142595367
This commit is contained in:
parent
d2b92f24e0
commit
cb4acf5e47
@ -304,7 +304,7 @@ def _sampled_scattered_embedding_lookup(
|
||||
math_ops.range(0, dimension), 0), array_ops.shape(values))
|
||||
else:
|
||||
dimension = array_ops.shape(sampled_candidates)[
|
||||
math_ops.sub(array_ops.rank(sampled_candidates), 1)]
|
||||
math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
|
||||
sampled_candidates_shape = array_ops.shape(sampled_candidates)
|
||||
dimension_tensor = array_ops.reshape(dimension, shape=[1,])
|
||||
expected_shape = array_ops.concat_v2([values_shape, dimension_tensor], 0)
|
||||
@ -539,7 +539,7 @@ def _sampled_scattered_embedding_lookup_sparse(params,
|
||||
array_ops.constant([-1., 1.]), sp_values.values, dimension=dimension,
|
||||
sampled_candidates=sampled_candidates, hash_key=hash_key,
|
||||
name="signs_lookup")
|
||||
embeddings = math_ops.mul(signs, embeddings, name="signs_hash")
|
||||
embeddings = math_ops.multiply(signs, embeddings, name="signs_hash")
|
||||
|
||||
if segment_ids.dtype != dtypes.int32:
|
||||
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
|
||||
|
@ -194,9 +194,9 @@ class _TargetColumn(object):
|
||||
def _weighted_loss(self, loss, weight_tensor):
|
||||
"""Returns cumulative weighted loss."""
|
||||
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
|
||||
weighted_loss = math_ops.mul(unweighted_loss,
|
||||
array_ops.reshape(
|
||||
weight_tensor, shape=(-1,)))
|
||||
weighted_loss = math_ops.multiply(unweighted_loss,
|
||||
array_ops.reshape(
|
||||
weight_tensor, shape=(-1,)))
|
||||
return weighted_loss
|
||||
|
||||
def training_loss(self, logits, target, features, name="training_loss"):
|
||||
|
@ -76,7 +76,8 @@ def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
|
||||
shape_multipliers = array_ops.stack(
|
||||
_multiplier_helper(array_ops.unstack(dense_shape)[1:]))
|
||||
offsets = math_ops.reduce_sum(
|
||||
math_ops.mul(higher_dims, shape_multipliers), reduction_indices=[1])
|
||||
math_ops.multiply(higher_dims, shape_multipliers),
|
||||
reduction_indices=[1])
|
||||
flat_indices = math_ops.add(flat_indices, offsets)
|
||||
values = array_ops.gather(flat_tensor, flat_indices)
|
||||
return sparse_tensor.SparseTensor(indices, values, dense_shape)
|
||||
|
@ -1171,9 +1171,9 @@ class _MultiHead(_Head):
|
||||
def _weighted_loss(loss, weight):
|
||||
"""Returns cumulative weighted loss as 1d `Tensor`."""
|
||||
with ops.name_scope(None, "weighted_loss", (loss, weight)) as name:
|
||||
return math_ops.mul(array_ops.reshape(loss, shape=(-1,)),
|
||||
array_ops.reshape(weight, shape=(-1,)),
|
||||
name=name)
|
||||
return math_ops.multiply(array_ops.reshape(loss, shape=(-1,)),
|
||||
array_ops.reshape(weight, shape=(-1,)),
|
||||
name=name)
|
||||
|
||||
|
||||
def _weight_tensor(features, weight_column_name):
|
||||
|
@ -71,5 +71,5 @@ def softmax_classifier(tensor_in,
|
||||
with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
|
||||
logits = nn.xw_plus_b(tensor_in, weights, biases)
|
||||
if class_weight is not None:
|
||||
logits = math_ops.mul(logits, class_weight)
|
||||
logits = math_ops.multiply(logits, class_weight)
|
||||
return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
|
||||
|
@ -241,7 +241,7 @@ class SdcaModel(object):
|
||||
for sfc, sv in zip(examples['sparse_features'], sparse_variables):
|
||||
# TODO(sibyl-Aix6ihai): following does not take care of missing features.
|
||||
result += math_ops.segment_sum(
|
||||
math_ops.mul(
|
||||
math_ops.multiply(
|
||||
array_ops.gather(sv, sfc.feature_indices), sfc.feature_values),
|
||||
sfc.example_indices)
|
||||
dense_features = self._convert_n_to_tensor(examples['dense_features'])
|
||||
@ -454,7 +454,7 @@ class SdcaModel(object):
|
||||
examples['example_weights']), dtypes.float64)
|
||||
|
||||
if self._options['loss_type'] == 'logistic_loss':
|
||||
return math_ops.reduce_sum(math_ops.mul(
|
||||
return math_ops.reduce_sum(math_ops.multiply(
|
||||
sigmoid_cross_entropy_with_logits(predictions, labels),
|
||||
weights)) / math_ops.reduce_sum(weights)
|
||||
|
||||
@ -462,19 +462,19 @@ class SdcaModel(object):
|
||||
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
|
||||
# first convert 0/1 labels into -1/1 labels.
|
||||
all_ones = array_ops.ones_like(predictions)
|
||||
adjusted_labels = math_ops.sub(2 * labels, all_ones)
|
||||
adjusted_labels = math_ops.subtract(2 * labels, all_ones)
|
||||
# Tensor that contains (unweighted) error (hinge loss) per
|
||||
# example.
|
||||
error = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(adjusted_labels,
|
||||
predictions)))
|
||||
weighted_error = math_ops.mul(error, weights)
|
||||
error = nn_ops.relu(math_ops.subtract(
|
||||
all_ones, math_ops.multiply(adjusted_labels, predictions)))
|
||||
weighted_error = math_ops.multiply(error, weights)
|
||||
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
|
||||
weights)
|
||||
|
||||
# squared loss
|
||||
err = math_ops.sub(labels, predictions)
|
||||
err = math_ops.subtract(labels, predictions)
|
||||
|
||||
weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
|
||||
weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
|
||||
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
|
||||
return (math_ops.reduce_sum(weighted_squared_err) /
|
||||
(2.0 * math_ops.reduce_sum(weights)))
|
||||
|
@ -67,7 +67,7 @@ def _scale_losses(losses, weights):
|
||||
reduction_indices = list(range(start_index, losses.get_shape().ndims))
|
||||
reduced_losses = math_ops.reduce_sum(losses,
|
||||
reduction_indices=reduction_indices)
|
||||
reduced_losses = math_ops.mul(reduced_losses, weights)
|
||||
reduced_losses = math_ops.multiply(reduced_losses, weights)
|
||||
return math_ops.reduce_sum(reduced_losses)
|
||||
|
||||
|
||||
@ -181,7 +181,7 @@ def _num_present(losses, weights, per_batch=False):
|
||||
math_ops.to_float(batch_size))
|
||||
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
|
||||
0.0, num_per_batch)
|
||||
num_per_batch = math_ops.mul(array_ops.ones(
|
||||
num_per_batch = math_ops.multiply(array_ops.ones(
|
||||
array_ops.reshape(batch_size, [1])), num_per_batch)
|
||||
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
|
||||
|
||||
@ -197,7 +197,7 @@ def _num_present(losses, weights, per_batch=False):
|
||||
[weights.get_shape().ndims], [-1])
|
||||
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
|
||||
|
||||
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
|
||||
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
|
||||
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
|
||||
|
||||
|
||||
@ -295,7 +295,7 @@ def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
losses = math_ops.abs(math_ops.sub(predictions, labels))
|
||||
losses = math_ops.abs(math_ops.subtract(predictions, labels))
|
||||
return compute_weighted_loss(losses, weights, scope=scope)
|
||||
|
||||
|
||||
@ -458,9 +458,9 @@ def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
losses = -math_ops.mul(
|
||||
losses = -math_ops.multiply(
|
||||
labels,
|
||||
math_ops.log(predictions + epsilon)) - math_ops.mul(
|
||||
math_ops.log(predictions + epsilon)) - math_ops.multiply(
|
||||
(1 - labels), math_ops.log(1 - predictions + epsilon))
|
||||
return compute_weighted_loss(losses, weights, scope=scope)
|
||||
|
||||
@ -487,8 +487,9 @@ def hinge_loss(logits, labels=None, scope=None):
|
||||
# We first need to convert binary labels to -1/1 labels (as floats).
|
||||
labels = math_ops.to_float(labels)
|
||||
all_ones = array_ops.ones_like(labels)
|
||||
labels = math_ops.sub(2 * labels, all_ones)
|
||||
return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
|
||||
labels = math_ops.subtract(2 * labels, all_ones)
|
||||
return nn_ops.relu(
|
||||
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
|
||||
|
||||
|
||||
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
|
||||
@ -522,7 +523,7 @@ def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
losses = math_ops.square(math_ops.sub(predictions, labels))
|
||||
losses = math_ops.square(math_ops.subtract(predictions, labels))
|
||||
return compute_weighted_loss(losses, weights, scope=scope)
|
||||
|
||||
|
||||
@ -574,7 +575,7 @@ def mean_pairwise_squared_error(
|
||||
labels = math_ops.to_float(labels)
|
||||
weights = math_ops.to_float(ops.convert_to_tensor(weights))
|
||||
|
||||
diffs = math_ops.sub(predictions, labels)
|
||||
diffs = math_ops.subtract(predictions, labels)
|
||||
|
||||
# Need to verify here since the function doesn't use compute_weighted_loss
|
||||
if diffs.get_shape().ndims is None:
|
||||
@ -638,6 +639,6 @@ def cosine_distance(
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
|
||||
radial_diffs = math_ops.mul(predictions, labels)
|
||||
radial_diffs = math_ops.multiply(predictions, labels)
|
||||
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
|
||||
return compute_weighted_loss(losses, weights, scope=scope)
|
||||
|
@ -56,8 +56,8 @@ def accuracy(predictions, labels, weights=None):
|
||||
is_correct = math_ops.cast(
|
||||
math_ops.equal(predictions, labels), dtypes.float32)
|
||||
if weights is not None:
|
||||
is_correct = math_ops.mul(is_correct, weights)
|
||||
num_values = math_ops.mul(weights, array_ops.ones_like(is_correct))
|
||||
is_correct = math_ops.multiply(is_correct, weights)
|
||||
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
|
||||
return math_ops.div(math_ops.reduce_sum(is_correct),
|
||||
math_ops.reduce_sum(num_values))
|
||||
return math_ops.reduce_mean(is_correct)
|
||||
|
@ -134,7 +134,7 @@ def _count_condition(values, weights=None, metrics_collections=None,
|
||||
values = math_ops.to_float(values)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_float(weights)
|
||||
values = math_ops.mul(values, weights)
|
||||
values = math_ops.multiply(values, weights)
|
||||
|
||||
value_tensor = array_ops.identity(count)
|
||||
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
|
||||
@ -318,7 +318,7 @@ def _broadcast_weights(weights, values):
|
||||
values_shape.is_fully_defined() and
|
||||
weights_shape.is_compatible_with(values_shape)):
|
||||
return weights
|
||||
return math_ops.mul(
|
||||
return math_ops.multiply(
|
||||
weights, array_ops.ones_like(values), name='broadcast_weights')
|
||||
|
||||
|
||||
@ -1542,7 +1542,7 @@ def sparse_average_precision_at_k(predictions, labels, k):
|
||||
precision_per_k = math_ops.div(
|
||||
math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),
|
||||
name='precision_per_k')
|
||||
relevant_precision_per_k = math_ops.mul(
|
||||
relevant_precision_per_k = math_ops.multiply(
|
||||
precision_per_k, math_ops.to_double(relevant_per_k),
|
||||
name='relevant_precision_per_k')
|
||||
|
||||
@ -1710,7 +1710,7 @@ def _sparse_true_positive_at_k(predictions_idx,
|
||||
tp = math_ops.to_double(tp)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_double(weights)
|
||||
tp = math_ops.mul(tp, weights)
|
||||
tp = math_ops.multiply(tp, weights)
|
||||
return tp
|
||||
|
||||
|
||||
@ -1798,7 +1798,7 @@ def _sparse_false_positive_at_k(predictions_idx,
|
||||
fp = math_ops.to_double(fp)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_double(weights)
|
||||
fp = math_ops.mul(fp, weights)
|
||||
fp = math_ops.multiply(fp, weights)
|
||||
return fp
|
||||
|
||||
|
||||
@ -1887,7 +1887,7 @@ def _sparse_false_negative_at_k(predictions_idx,
|
||||
fn = math_ops.to_double(fn)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_double(weights)
|
||||
fn = math_ops.mul(fn, weights)
|
||||
fn = math_ops.multiply(fn, weights)
|
||||
return fn
|
||||
|
||||
|
||||
@ -2329,11 +2329,12 @@ def streaming_pearson_correlation(predictions,
|
||||
|
||||
pearson_r = _safe_div(
|
||||
cov,
|
||||
math_ops.mul(math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
|
||||
math_ops.multiply(math_ops.sqrt(var_predictions),
|
||||
math_ops.sqrt(var_labels)),
|
||||
'pearson_r')
|
||||
with ops.control_dependencies(
|
||||
[update_cov, update_var_predictions, update_var_labels]):
|
||||
update_op = _safe_div(update_cov, math_ops.mul(
|
||||
update_op = _safe_div(update_cov, math_ops.multiply(
|
||||
math_ops.sqrt(update_var_predictions),
|
||||
math_ops.sqrt(update_var_labels)), 'update_op')
|
||||
|
||||
@ -2393,7 +2394,7 @@ def streaming_mean_cosine_distance(predictions, labels, dim, weights=None,
|
||||
predictions, labels, weights = _remove_squeezable_dimensions(
|
||||
predictions, labels, weights)
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
radial_diffs = math_ops.mul(predictions, labels)
|
||||
radial_diffs = math_ops.multiply(predictions, labels)
|
||||
radial_diffs = math_ops.reduce_sum(radial_diffs,
|
||||
reduction_indices=[dim,],
|
||||
keep_dims=True)
|
||||
@ -2401,8 +2402,8 @@ def streaming_mean_cosine_distance(predictions, labels, dim, weights=None,
|
||||
None,
|
||||
None,
|
||||
name or 'mean_cosine_distance')
|
||||
mean_distance = math_ops.sub(1.0, mean_distance)
|
||||
update_op = math_ops.sub(1.0, update_op)
|
||||
mean_distance = math_ops.subtract(1.0, mean_distance)
|
||||
update_op = math_ops.subtract(1.0, update_op)
|
||||
|
||||
if metrics_collections:
|
||||
ops.add_to_collections(metrics_collections, mean_distance)
|
||||
|
@ -436,11 +436,11 @@ class RandomForestGraphs(object):
|
||||
# pylint: disable=unused-argument
|
||||
def training_loss(self, features, labels, data_spec=None,
|
||||
name='training_loss'):
|
||||
return math_ops.neg(self.average_size(), name=name)
|
||||
return math_ops.negative(self.average_size(), name=name)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
def validation_loss(self, features, labels):
|
||||
return math_ops.neg(self.average_size())
|
||||
return math_ops.negative(self.average_size())
|
||||
|
||||
def average_impurity(self):
|
||||
"""Constructs a TF graph for evaluating the leaf impurity of a forest.
|
||||
@ -832,8 +832,8 @@ class RandomTreeGraphs(object):
|
||||
# Calculate values to put into scatter update for total counts.
|
||||
total_cleared = array_ops.tile(
|
||||
array_ops.expand_dims(
|
||||
math_ops.neg(array_ops.ones_like(accumulators_cleared,
|
||||
dtype=dtypes.float32)), 1),
|
||||
math_ops.negative(array_ops.ones_like(accumulators_cleared,
|
||||
dtype=dtypes.float32)), 1),
|
||||
[1, self.params.num_output_columns])
|
||||
total_reset = array_ops.tile(
|
||||
array_ops.expand_dims(
|
||||
@ -852,7 +852,7 @@ class RandomTreeGraphs(object):
|
||||
# Calculate values to put into scatter update for candidate splits.
|
||||
split_features_updates = array_ops.tile(
|
||||
array_ops.expand_dims(
|
||||
math_ops.neg(array_ops.ones_like(
|
||||
math_ops.negative(array_ops.ones_like(
|
||||
cleared_and_allocated_accumulators)), 1),
|
||||
[1, self.params.num_splits_to_consider])
|
||||
updates.append(state_ops.scatter_update(
|
||||
|
@ -794,7 +794,7 @@ class BaseSession(SessionInterface):
|
||||
b = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
c = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
r1 = math_ops.add(a, b)
|
||||
r2 = math_ops.mul(r1, c)
|
||||
r2 = math_ops.multiply(r1, c)
|
||||
|
||||
h = sess.partial_run_setup([r1, r2], [a, b, c])
|
||||
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
|
||||
|
@ -1328,7 +1328,7 @@ class SessionTest(test_util.TensorFlowTestCase):
|
||||
b = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
c = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
r1 = math_ops.add(a, b)
|
||||
r2 = math_ops.mul(r1, c)
|
||||
r2 = math_ops.multiply(r1, c)
|
||||
|
||||
h = sess.partial_run_setup([r1, r2], [a, b, c])
|
||||
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
|
||||
@ -1350,7 +1350,7 @@ class SessionTest(test_util.TensorFlowTestCase):
|
||||
b = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
c = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
r1 = math_ops.add(a, b)
|
||||
r2 = math_ops.mul(r1, c)
|
||||
r2 = math_ops.multiply(r1, c)
|
||||
|
||||
h = sess.partial_run_setup([r1, r2], [a, b, c])
|
||||
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
|
||||
@ -1361,7 +1361,7 @@ class SessionTest(test_util.TensorFlowTestCase):
|
||||
b = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
c = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
r1 = math_ops.add(a, b)
|
||||
r2 = math_ops.mul(r1, c)
|
||||
r2 = math_ops.multiply(r1, c)
|
||||
|
||||
h1 = sess.partial_run_setup([r1], [a, b, c])
|
||||
h2 = sess.partial_run_setup([r1, r2], [a, b, c])
|
||||
@ -1380,7 +1380,7 @@ class SessionTest(test_util.TensorFlowTestCase):
|
||||
a = constant_op.constant(2.0, dtypes.float32)
|
||||
for i in xrange(steps):
|
||||
inputs.append(array_ops.placeholder(dtypes.float32, shape=[]))
|
||||
a = math_ops.mul(a, inputs[i])
|
||||
a = math_ops.multiply(a, inputs[i])
|
||||
outputs.append(a)
|
||||
|
||||
h = sess.partial_run_setup(outputs, inputs)
|
||||
@ -1528,7 +1528,7 @@ class SessionTest(test_util.TensorFlowTestCase):
|
||||
a = array_ops.placeholder(dtypes.float32, shape=[])
|
||||
b = math_ops.add(a, a)
|
||||
c = array_ops.identity(b)
|
||||
d = math_ops.mul(c, c)
|
||||
d = math_ops.multiply(c, c)
|
||||
for step in xrange(120):
|
||||
run_metadata = config_pb2.RunMetadata()
|
||||
sess.run(d, feed_dict={a: 1.0},
|
||||
|
@ -754,7 +754,7 @@ class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
|
||||
y = control_flow_ops.with_dependencies(
|
||||
[x], y, name="control_deps/ctrl_dep_y")
|
||||
|
||||
z = math_ops.mul(x, y, name="control_deps/z")
|
||||
z = math_ops.multiply(x, y, name="control_deps/z")
|
||||
|
||||
z = control_flow_ops.with_dependencies(
|
||||
[x, y], z, name="control_deps/ctrl_dep_z")
|
||||
|
@ -102,11 +102,11 @@ class NodeStepperSimpleGraphTest(test_util.TensorFlowTestCase):
|
||||
self.b = variables.Variable(20.0, name="b")
|
||||
|
||||
self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0.
|
||||
self.d = math_ops.sub(self.a, self.c, name="d") # Should be -20.0.
|
||||
self.e = math_ops.mul(self.c, self.d, name="e") # Should be -600.0.
|
||||
self.d = math_ops.subtract(self.a, self.c, name="d") # Should be -20.0.
|
||||
self.e = math_ops.multiply(self.c, self.d, name="e") # Should be -600.0.
|
||||
|
||||
self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph")
|
||||
self.f = math_ops.mul(self.e, self.ph, name="f")
|
||||
self.f = math_ops.multiply(self.e, self.ph, name="f")
|
||||
|
||||
self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
|
||||
self.e, name="opt")
|
||||
|
@ -360,15 +360,15 @@ class SessionDebugTestBase(test_util.TensorFlowTestCase):
|
||||
v = variables.Variable(v_init, name=v_name)
|
||||
|
||||
# Expected output: [0.0, 3.0]
|
||||
w = math_ops.sub(u, v, name=w_name)
|
||||
w = math_ops.subtract(u, v, name=w_name)
|
||||
|
||||
# Expected output: [inf, 1.3333]
|
||||
x = math_ops.div(u, w, name=x_name)
|
||||
|
||||
# Expected output: [nan, 4.0]
|
||||
y = math_ops.mul(w, x, name=y_name)
|
||||
y = math_ops.multiply(w, x, name=y_name)
|
||||
|
||||
z = math_ops.mul(y, y, name=z_name)
|
||||
z = math_ops.multiply(y, y, name=z_name)
|
||||
|
||||
u.initializer.run()
|
||||
v.initializer.run()
|
||||
@ -720,7 +720,7 @@ class SessionDebugTestBase(test_util.TensorFlowTestCase):
|
||||
v_init = constant_op.constant(20.0)
|
||||
v = variables.Variable(v_init, name="gdo/v")
|
||||
|
||||
w = math_ops.mul(u, v, name="gdo/w")
|
||||
w = math_ops.multiply(u, v, name="gdo/w")
|
||||
# gdo stands for GradientDescentOptimizer.
|
||||
|
||||
train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(
|
||||
|
@ -36,10 +36,10 @@ class StepperTest(test_util.TensorFlowTestCase):
|
||||
self.a = variables.Variable(2.0, name="a")
|
||||
self.b = variables.Variable(3.0, name="b")
|
||||
|
||||
self.c = math_ops.mul(self.a, self.b, name="c") # Should be 6.0.
|
||||
self.d = math_ops.mul(self.a, self.a, name="d") # Should be 4.0.
|
||||
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
|
||||
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
|
||||
|
||||
self.e = math_ops.mul(self.d, self.c, name="e") # Should be 24.0.
|
||||
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
|
||||
|
||||
self.f_y = constant_op.constant(0.30, name="f_y")
|
||||
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
|
||||
@ -47,9 +47,9 @@ class StepperTest(test_util.TensorFlowTestCase):
|
||||
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
|
||||
# and y are both direct inputs to z, but x is also a direct input to y.
|
||||
self.x = variables.Variable(2.0, name="x") # Should be 2.0
|
||||
self.y = math_ops.neg(self.x, name="y") # Should be -2.0.
|
||||
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
|
||||
|
||||
self.z = math_ops.mul(self.x, self.y, name="z") # Should be -4.0.
|
||||
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
|
||||
|
||||
self.sess = session.Session()
|
||||
self.sess.run(variables.global_variables_initializer())
|
||||
@ -565,9 +565,9 @@ class StepperBackwardRunTest(test_util.TensorFlowTestCase):
|
||||
self.a = variables.Variable(1.0, name="a")
|
||||
self.b = variables.Variable(2.0, name="b")
|
||||
self.c = variables.Variable(4.0, name="c")
|
||||
self.d = math_ops.mul(self.a, self.b, name="d")
|
||||
self.e = math_ops.mul(self.b, self.c, name="e")
|
||||
self.f = math_ops.mul(self.d, self.e, name="f")
|
||||
self.d = math_ops.multiply(self.a, self.b, name="d")
|
||||
self.e = math_ops.multiply(self.b, self.c, name="e")
|
||||
self.f = math_ops.multiply(self.d, self.e, name="f")
|
||||
|
||||
# Gradient descent optimizer that minimizes g.
|
||||
gradient_descent.GradientDescentOptimizer(0.01).minimize(
|
||||
|
@ -146,7 +146,7 @@ class CholeskyGradTest(test.TestCase):
|
||||
x = constant_op.constant(np.random.randn(), dtype)
|
||||
R = constant_op.constant(
|
||||
np.random.randn(shape[0], shape[1]), dtype)
|
||||
e = math_ops.mul(R, x)
|
||||
e = math_ops.multiply(R, x)
|
||||
tensor = math_ops.matmul(e, array_ops.transpose(e)) / shape[0]
|
||||
|
||||
# Inner-most matrices in tensor are positive definite.
|
||||
|
@ -617,7 +617,7 @@ class PlaceholderTest(test.TestCase):
|
||||
p = array_ops.placeholder(dtypes_lib.int32, shape=[], name="p")
|
||||
with ops.control_dependencies([p]):
|
||||
c = constant_op.constant(5, dtypes_lib.int32)
|
||||
d = math_ops.mul(p, c)
|
||||
d = math_ops.multiply(p, c)
|
||||
self.assertEqual(10, d.eval(feed_dict={p: 2}))
|
||||
|
||||
def testBadShape(self):
|
||||
@ -702,7 +702,7 @@ class PlaceholderV2Test(test.TestCase):
|
||||
p = array_ops.placeholder_v2(dtypes_lib.int32, shape=[], name="p")
|
||||
with ops.control_dependencies([p]):
|
||||
c = constant_op.constant(5, dtypes_lib.int32)
|
||||
d = math_ops.mul(p, c)
|
||||
d = math_ops.multiply(p, c)
|
||||
val = np.array(2).astype(np.int)
|
||||
self.assertEqual(10, d.eval(feed_dict={p: val}))
|
||||
|
||||
|
@ -52,7 +52,9 @@ from tensorflow.python.ops import script_ops
|
||||
from tensorflow.python.ops import state_ops
|
||||
from tensorflow.python.ops import variable_scope
|
||||
from tensorflow.python.ops import variables
|
||||
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
|
||||
# pylint: disable=unused-import
|
||||
import tensorflow.python.ops.tensor_array_grad
|
||||
# pylint: enable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.training import adam
|
||||
from tensorflow.python.training import gradient_descent
|
||||
@ -155,7 +157,7 @@ class ControlFlowTest(test.TestCase):
|
||||
enter_data = control_flow_ops.enter(data, "foo_1", False)
|
||||
five = constant_op.constant(5)
|
||||
enter_five = control_flow_ops.enter(five, "foo_1", False)
|
||||
mul_op = math_ops.mul(enter_data, enter_five)
|
||||
mul_op = math_ops.multiply(enter_data, enter_five)
|
||||
exit_op = control_flow_ops.exit(mul_op)
|
||||
|
||||
result = exit_op.eval()
|
||||
@ -220,7 +222,7 @@ class ControlFlowTest(test.TestCase):
|
||||
one = constant_op.constant(1)
|
||||
add_op = math_ops.add(switch_op[0], one)
|
||||
five = constant_op.constant(5)
|
||||
mul_op = math_ops.mul(switch_op[1], five)
|
||||
mul_op = math_ops.multiply(switch_op[1], five)
|
||||
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
|
||||
|
||||
result = merge_op.eval()
|
||||
@ -307,7 +309,7 @@ class ControlFlowTest(test.TestCase):
|
||||
def testCondBool(self):
|
||||
values = constant_op.constant(10)
|
||||
fn1 = lambda: math_ops.add(values, 1)
|
||||
fn2 = lambda: math_ops.sub(values, 1)
|
||||
fn2 = lambda: math_ops.subtract(values, 1)
|
||||
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
|
||||
_ = control_flow_ops.cond(False, fn1, fn2)
|
||||
|
||||
@ -328,7 +330,7 @@ class ControlFlowTest(test.TestCase):
|
||||
x = ops.IndexedSlices(values, indices)
|
||||
pred = math_ops.less(1, 2)
|
||||
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
|
||||
fn2 = lambda: ops.IndexedSlices(math_ops.sub(x.values, 1), indices)
|
||||
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
|
||||
r = control_flow_ops.cond(pred, fn1, fn2)
|
||||
|
||||
val = r.values.eval()
|
||||
@ -374,7 +376,7 @@ class ControlFlowTest(test.TestCase):
|
||||
x = ops.IndexedSlices(values, i_32)
|
||||
pred = math_ops.less(1, 2)
|
||||
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
|
||||
fn2 = lambda: ops.IndexedSlices(math_ops.sub(x.values, 1), i_64)
|
||||
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
|
||||
r = control_flow_ops.cond(pred, fn1, fn2)
|
||||
|
||||
val = r.values.eval()
|
||||
@ -392,7 +394,7 @@ class ControlFlowTest(test.TestCase):
|
||||
x = constant_op.constant(10.0)
|
||||
pred = math_ops.less(1.0, 2.0)
|
||||
fn1 = lambda: math_ops.add(v, 1.0)
|
||||
fn2 = lambda: math_ops.sub(x, 1.0)
|
||||
fn2 = lambda: math_ops.subtract(x, 1.0)
|
||||
r = control_flow_ops.cond(pred, fn1, fn2)
|
||||
|
||||
for op in x.graph.get_operations():
|
||||
@ -404,7 +406,7 @@ class ControlFlowTest(test.TestCase):
|
||||
x = constant_op.constant(10)
|
||||
pred = math_ops.less(1, 2)
|
||||
fn1 = lambda: math_ops.add(x, 1)
|
||||
fn2 = lambda: math_ops.sub(x, 1)
|
||||
fn2 = lambda: math_ops.subtract(x, 1)
|
||||
r = control_flow_ops.cond(pred, fn1, fn2)
|
||||
|
||||
result = r.eval()
|
||||
@ -420,7 +422,7 @@ class ControlFlowTest(test.TestCase):
|
||||
x = constant_op.constant(10)
|
||||
r = control_flow_ops.cond(
|
||||
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
|
||||
lambda: math_ops.sub(x, 1))
|
||||
lambda: math_ops.subtract(x, 1))
|
||||
result = r.eval()
|
||||
self.assertTrue(check_op_order(x.graph))
|
||||
self.assertAllEqual(9, result)
|
||||
@ -430,7 +432,7 @@ class ControlFlowTest(test.TestCase):
|
||||
x = constant_op.constant(10)
|
||||
pred = math_ops.less(1, 2)
|
||||
fn1 = lambda: math_ops.add(x, 1)
|
||||
fn2 = lambda: math_ops.sub(x, 1)
|
||||
fn2 = lambda: math_ops.subtract(x, 1)
|
||||
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
|
||||
r = control_flow_ops.cond(pred, fn3, fn2)
|
||||
|
||||
@ -595,8 +597,8 @@ class ControlFlowTest(test.TestCase):
|
||||
c = array_ops.placeholder(dtypes.int32, shape=[])
|
||||
x = constant_op.constant(10.0)
|
||||
pred = math_ops.less(c, 2)
|
||||
fn1 = lambda: math_ops.mul(x, 42.0)
|
||||
fn2 = lambda: math_ops.mul(x, 3.0)
|
||||
fn1 = lambda: math_ops.multiply(x, 42.0)
|
||||
fn2 = lambda: math_ops.multiply(x, 3.0)
|
||||
r = control_flow_ops.cond(pred, fn1, fn2)
|
||||
|
||||
grad = gradients_impl.gradients(r, [x])[0]
|
||||
@ -1080,7 +1082,7 @@ class ControlFlowTest(test.TestCase):
|
||||
|
||||
r = control_flow_ops.cond(p,
|
||||
lambda: control_flow_ops.while_loop(c, b, [n]),
|
||||
lambda: math_ops.mul(n, 2.0))
|
||||
lambda: math_ops.multiply(n, 2.0))
|
||||
r1 = gradients_impl.gradients(r, [n])
|
||||
self.assertEqual(10, sess.run(r, {p: True}))
|
||||
self.assertEqual([1.0], sess.run(r1, {p: True}))
|
||||
@ -1100,7 +1102,8 @@ class ControlFlowTest(test.TestCase):
|
||||
# pylint: disable=undefined-variable
|
||||
# for OSS build
|
||||
b = lambda x: control_flow_ops.cond(
|
||||
constant_op.constant(True), lambda: math_ops.add(x, one), lambda: math_ops.sub(x, one))
|
||||
constant_op.constant(True),
|
||||
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
|
||||
# pylint: enable=undefined-variable
|
||||
r = control_flow_ops.while_loop(c, b, [i])
|
||||
self.assertAllEqual(10, r.eval())
|
||||
@ -1119,8 +1122,9 @@ class ControlFlowTest(test.TestCase):
|
||||
c = lambda x: math_ops.less(x, 10)
|
||||
# pylint: disable=undefined-variable
|
||||
# for OSS build
|
||||
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1), lambda: math_ops.add(x, 1),
|
||||
lambda: math_ops.sub(x, 1))
|
||||
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
|
||||
lambda: math_ops.add(x, 1),
|
||||
lambda: math_ops.subtract(x, 1))
|
||||
# pylint: enable=undefined-variable
|
||||
r = control_flow_ops.while_loop(c, b, [n])
|
||||
self.assertAllEqual(10, r.eval())
|
||||
@ -1326,7 +1330,7 @@ class ControlFlowTest(test.TestCase):
|
||||
return math_ops.greater(i, 0)
|
||||
|
||||
def b1(i, x):
|
||||
ni = math_ops.sub(i, 1)
|
||||
ni = math_ops.subtract(i, 1)
|
||||
nx = x + gen_data_flow_ops._stack_pop(s, dtypes.int32)
|
||||
return [ni, nx]
|
||||
|
||||
@ -1385,7 +1389,7 @@ class ControlFlowTest(test.TestCase):
|
||||
v = constant_op.constant([2.0], name="v")
|
||||
n = constant_op.constant(0, name="n")
|
||||
c = lambda i, v: math_ops.less(i, 5)
|
||||
b = lambda i, v: [i + 1, math_ops.mul(x, v)]
|
||||
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
|
||||
r = control_flow_ops.while_loop(
|
||||
c,
|
||||
b, [n, v], [n.get_shape(), tensor_shape.unknown_shape()],
|
||||
@ -1400,7 +1404,7 @@ class ControlFlowTest(test.TestCase):
|
||||
x = array_ops.placeholder(dtypes.float32, [None])
|
||||
v0 = constant_op.constant([2.0, 2.0], name="v")
|
||||
c = lambda v: constant_op.constant(False)
|
||||
b = lambda v: math_ops.mul(v, x)
|
||||
b = lambda v: math_ops.multiply(v, x)
|
||||
r = control_flow_ops.while_loop(c, b, [v0])
|
||||
y = math_ops.square(x)
|
||||
|
||||
@ -1413,7 +1417,7 @@ class ControlFlowTest(test.TestCase):
|
||||
c = lambda v: math_ops.less(v, 100.0)
|
||||
b = math_ops.square
|
||||
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
|
||||
r = math_ops.mul(r, r)
|
||||
r = math_ops.multiply(r, r)
|
||||
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
self.assertEqual(524288.0, r.eval())
|
||||
@ -1434,7 +1438,7 @@ class ControlFlowTest(test.TestCase):
|
||||
a = constant_op.constant(3.0, name="a")
|
||||
v = constant_op.constant(2.0, name="v")
|
||||
c = lambda v: math_ops.less(v, 100.0)
|
||||
b = lambda v: math_ops.mul(v, a)
|
||||
b = lambda v: math_ops.multiply(v, a)
|
||||
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
|
||||
|
||||
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
|
||||
@ -1454,13 +1458,13 @@ class ControlFlowTest(test.TestCase):
|
||||
def inner_loop(s):
|
||||
z = constant_op.constant(0)
|
||||
c = lambda i, x: math_ops.less(i, 4)
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
|
||||
return control_flow_ops.while_loop(c, b, [z, s])
|
||||
c = lambda x: math_ops.less(x, 128.0)
|
||||
def b(x):
|
||||
return control_flow_ops.cond(constant_op.constant(True),
|
||||
lambda: math_ops.square(inner_loop(x)[1]),
|
||||
lambda: math_ops.mul(x, 2.0))
|
||||
lambda: math_ops.multiply(x, 2.0))
|
||||
r = control_flow_ops.while_loop(c, b, [v])
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
self.assertAllClose(512.0, r.eval())
|
||||
@ -1474,7 +1478,7 @@ class ControlFlowTest(test.TestCase):
|
||||
a = variables.Variable(3.0)
|
||||
v = constant_op.constant(2.0, name="v")
|
||||
c = lambda v: math_ops.less(v, 100.0)
|
||||
b = lambda v: math_ops.mul(v, a)
|
||||
b = lambda v: math_ops.multiply(v, a)
|
||||
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
|
||||
|
||||
r = gradients_impl.gradients(r, a)
|
||||
@ -1566,7 +1570,7 @@ class ControlFlowTest(test.TestCase):
|
||||
|
||||
def b(x, y):
|
||||
y1 = math_ops.add(x, y)
|
||||
x1 = math_ops.mul(x, y1)
|
||||
x1 = math_ops.multiply(x, y1)
|
||||
return x1, y1
|
||||
|
||||
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
|
||||
@ -1588,7 +1592,7 @@ class ControlFlowTest(test.TestCase):
|
||||
c = lambda i, x: math_ops.less(i, 10)
|
||||
|
||||
def b(i, x):
|
||||
x = math_ops.mul(x, 2.0)
|
||||
x = math_ops.multiply(x, 2.0)
|
||||
i = math_ops.add(i, 1)
|
||||
return i, x
|
||||
|
||||
@ -1655,7 +1659,7 @@ class ControlFlowTest(test.TestCase):
|
||||
c = lambda i, x: math_ops.less(i, 5)
|
||||
|
||||
def b(i, x):
|
||||
x = math_ops.mul(x, 2.0)
|
||||
x = math_ops.multiply(x, 2.0)
|
||||
i = math_ops.add(i, 1)
|
||||
return i, x
|
||||
|
||||
@ -1673,7 +1677,7 @@ class ControlFlowTest(test.TestCase):
|
||||
c = lambda i, x: math_ops.less(i, 5)
|
||||
|
||||
def b(i, x):
|
||||
x = math_ops.mul(x, 2.0)
|
||||
x = math_ops.multiply(x, 2.0)
|
||||
i = math_ops.add(i, 1)
|
||||
return i, x
|
||||
|
||||
@ -1711,11 +1715,11 @@ class ControlFlowTest(test.TestCase):
|
||||
|
||||
def inner_loop(s):
|
||||
c = lambda x: math_ops.less(x, 4.0)
|
||||
b = lambda x: math_ops.mul(x, 2.0)
|
||||
b = lambda x: math_ops.multiply(x, 2.0)
|
||||
return control_flow_ops.while_loop(c, b, [s])
|
||||
|
||||
c = lambda x: math_ops.less(x, 2.0)
|
||||
b = lambda x: math_ops.mul(inner_loop(x), 2.0)
|
||||
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
|
||||
r = control_flow_ops.while_loop(c, b, [v])
|
||||
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
@ -1732,13 +1736,13 @@ class ControlFlowTest(test.TestCase):
|
||||
def inner_loop1(s):
|
||||
z = constant_op.constant(0)
|
||||
c = lambda i, x: math_ops.less(i, 4)
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
|
||||
return control_flow_ops.while_loop(c, b, [z, s])
|
||||
|
||||
def inner_loop2(s):
|
||||
z = constant_op.constant(0)
|
||||
c = lambda i, x: math_ops.less(i, 4)
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
|
||||
return control_flow_ops.while_loop(c, b, [z, s])
|
||||
|
||||
c = lambda x: math_ops.less(x, 128.0)
|
||||
@ -1755,17 +1759,17 @@ class ControlFlowTest(test.TestCase):
|
||||
def inner_loop1(s):
|
||||
z = constant_op.constant(0)
|
||||
c = lambda i, x: math_ops.less(i, 4)
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
|
||||
return control_flow_ops.while_loop(c, b, [z, s])
|
||||
|
||||
def inner_loop2(s):
|
||||
z = constant_op.constant(0)
|
||||
c = lambda i, x: math_ops.less(i, 4)
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.mul(x, 2.0)]
|
||||
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
|
||||
return control_flow_ops.while_loop(c, b, [z, s])
|
||||
|
||||
c = lambda x: math_ops.less(x, 128.0)
|
||||
b = lambda x: math_ops.mul(inner_loop1(x)[1], inner_loop2(x)[1])
|
||||
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
|
||||
r = control_flow_ops.while_loop(c, b, [v])
|
||||
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
@ -1802,8 +1806,8 @@ class ControlFlowTest(test.TestCase):
|
||||
# pylint: disable=undefined-variable
|
||||
# for OSS build
|
||||
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
|
||||
lambda: math_ops.square(x),
|
||||
lambda: math_ops.sub(x, one))
|
||||
lambda: math_ops.square(x),
|
||||
lambda: math_ops.subtract(x, one))
|
||||
# pylint: enable=undefined-variable
|
||||
r = control_flow_ops.while_loop(c, b, [v])
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
@ -1822,8 +1826,8 @@ class ControlFlowTest(test.TestCase):
|
||||
# pylint: disable=undefined-variable
|
||||
# for OSS build
|
||||
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
|
||||
lambda: math_ops.square(x),
|
||||
lambda: math_ops.sub(x, one))
|
||||
lambda: math_ops.square(x),
|
||||
lambda: math_ops.subtract(x, one))
|
||||
# pylint: enable=undefined-variable
|
||||
r = control_flow_ops.while_loop(c, b, [v])
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
@ -1937,7 +1941,7 @@ class ControlFlowTest(test.TestCase):
|
||||
|
||||
def b(i, x):
|
||||
data = constant_op.constant([1.0, 2.0, 3.0])
|
||||
data = math_ops.mul(data, params_1)
|
||||
data = math_ops.multiply(data, params_1)
|
||||
x1 = x + gradients_impl.gradients(data, params)[0]
|
||||
return i + 1, x1
|
||||
|
||||
@ -1956,7 +1960,8 @@ class ControlFlowTest(test.TestCase):
|
||||
|
||||
def b(i, y):
|
||||
return [
|
||||
i + 1, functional_ops.map_fn(lambda x: math_ops.mul(x, param), y)
|
||||
i + 1,
|
||||
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
|
||||
]
|
||||
|
||||
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
|
||||
|
@ -64,7 +64,7 @@ class AssignOpTest(test.TestCase):
|
||||
ones_t = array_ops.fill([1024, 1024], float(1))
|
||||
p = variables.Variable(array_ops.zeros([1024, 1024]))
|
||||
assigns = [
|
||||
state_ops.assign(p, math_ops.mul(ones_t, float(i)), False)
|
||||
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
|
||||
for i in range(1, 21)
|
||||
]
|
||||
variables.global_variables_initializer().run()
|
||||
|
@ -153,7 +153,7 @@ class AssignOpTest(test.TestCase):
|
||||
p = variables.Variable(zeros_t)
|
||||
assigns = [
|
||||
state_ops.assign(
|
||||
p, math_ops.mul(ones_t, float(i)), use_locking=True)
|
||||
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
|
||||
for i in range(1, 21)
|
||||
]
|
||||
p.initializer.run()
|
||||
|
@ -43,7 +43,7 @@ def simple_scoped_fn(a, x):
|
||||
"two", [],
|
||||
dtype=dtypes.int32,
|
||||
initializer=init_ops.constant_initializer(2))
|
||||
return math_ops.mul(math_ops.add(a, x), two)
|
||||
return math_ops.multiply(math_ops.add(a, x), two)
|
||||
|
||||
|
||||
class FunctionalOpsTest(test.TestCase):
|
||||
@ -52,12 +52,13 @@ class FunctionalOpsTest(test.TestCase):
|
||||
with self.test_session():
|
||||
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
|
||||
|
||||
r = functional_ops.foldl(lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
|
||||
elems)
|
||||
r = functional_ops.foldl(
|
||||
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
|
||||
elems)
|
||||
self.assertAllEqual(208, r.eval())
|
||||
|
||||
r = functional_ops.foldl(
|
||||
lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
|
||||
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
|
||||
elems,
|
||||
initializer=10)
|
||||
self.assertAllEqual(880, r.eval())
|
||||
@ -85,12 +86,13 @@ class FunctionalOpsTest(test.TestCase):
|
||||
with self.test_session():
|
||||
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
|
||||
|
||||
r = functional_ops.foldr(lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
|
||||
elems)
|
||||
r = functional_ops.foldr(
|
||||
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
|
||||
elems)
|
||||
self.assertAllEqual(450, r.eval())
|
||||
|
||||
r = functional_ops.foldr(
|
||||
lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
|
||||
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
|
||||
elems,
|
||||
initializer=10)
|
||||
self.assertAllEqual(1282, r.eval())
|
||||
@ -114,27 +116,28 @@ class FunctionalOpsTest(test.TestCase):
|
||||
self.assertEqual(len(variables.trainable_variables()), 1)
|
||||
self.assertAllEqual(1282, r.eval())
|
||||
|
||||
# pylint: disable=unnecessary-lambda
|
||||
def testFold_Grad(self):
|
||||
with self.test_session():
|
||||
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
|
||||
v = constant_op.constant(2.0, name="v")
|
||||
|
||||
r = functional_ops.foldl(
|
||||
lambda a, x: math_ops.mul(a, x), elems, initializer=v)
|
||||
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
self.assertAllEqual(720.0, r.eval())
|
||||
|
||||
r = functional_ops.foldr(
|
||||
lambda a, x: math_ops.mul(a, x), elems, initializer=v)
|
||||
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
self.assertAllEqual(720.0, r.eval())
|
||||
# pylint: enable=unnecessary-lambda
|
||||
|
||||
def testMap_Simple(self):
|
||||
with self.test_session():
|
||||
nums = [1, 2, 3, 4, 5, 6]
|
||||
elems = constant_op.constant(nums, name="data")
|
||||
r = functional_ops.map_fn(lambda x: math_ops.mul(math_ops.add(x, 3), 2),
|
||||
elems)
|
||||
r = functional_ops.map_fn(
|
||||
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), elems)
|
||||
self.assertAllEqual(np.array([(x + 3) * 2 for x in nums]), r.eval())
|
||||
|
||||
def testMapSparseTensor(self):
|
||||
@ -158,7 +161,7 @@ class FunctionalOpsTest(test.TestCase):
|
||||
"two", [],
|
||||
dtype=dtypes.int32,
|
||||
initializer=init_ops.constant_initializer(2))
|
||||
return math_ops.mul(x, two)
|
||||
return math_ops.multiply(x, two)
|
||||
|
||||
with variable_scope.variable_scope("root") as varscope:
|
||||
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
|
||||
@ -183,7 +186,7 @@ class FunctionalOpsTest(test.TestCase):
|
||||
param = constant_op.constant(2.0)
|
||||
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
|
||||
y = functional_ops.map_fn(
|
||||
lambda x: math_ops.mul(math_ops.square(x), param), elems)
|
||||
lambda x: math_ops.multiply(math_ops.square(x), param), elems)
|
||||
r = gradients_impl.gradients(y, param)[0]
|
||||
self.assertAllEqual(91.0, r.eval())
|
||||
r = gradients_impl.gradients(y, elems)[0]
|
||||
@ -192,8 +195,8 @@ class FunctionalOpsTest(test.TestCase):
|
||||
def testMap_SimpleNotTensor(self):
|
||||
with self.test_session():
|
||||
nums = np.array([1, 2, 3, 4, 5, 6])
|
||||
r = functional_ops.map_fn(lambda x: math_ops.mul(math_ops.add(x, 3), 2),
|
||||
nums)
|
||||
r = functional_ops.map_fn(
|
||||
lambda x: math_ops.multiply(math_ops.add(x, 3), 2), nums)
|
||||
self.assertAllEqual(np.array([(x + 3) * 2 for x in nums]), r.eval())
|
||||
|
||||
def testMap_SingleInputMultiOutput(self):
|
||||
@ -250,12 +253,14 @@ class FunctionalOpsTest(test.TestCase):
|
||||
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
|
||||
v = constant_op.constant(2.0, name="v")
|
||||
|
||||
r = functional_ops.scan(lambda a, x: math_ops.mul(a, x), elems)
|
||||
# pylint: disable=unnecessary-lambda
|
||||
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems)
|
||||
self.assertAllEqual([1., 2., 6., 24., 120., 720.], r.eval())
|
||||
|
||||
r = functional_ops.scan(
|
||||
lambda a, x: math_ops.mul(a, x), elems, initializer=v)
|
||||
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
|
||||
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], r.eval())
|
||||
# pylint: enable=unnecessary-lambda
|
||||
|
||||
def testScan_SingleInputMultiOutput(self):
|
||||
with self.test_session() as sess:
|
||||
@ -355,8 +360,10 @@ class FunctionalOpsTest(test.TestCase):
|
||||
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
|
||||
v = constant_op.constant(2.0, name="v")
|
||||
|
||||
# pylint: disable=unnecessary-lambda
|
||||
r = functional_ops.scan(
|
||||
lambda a, x: math_ops.mul(a, x), elems, initializer=v)
|
||||
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
|
||||
# pylint: enable=unnecessary-lambda
|
||||
r = gradients_impl.gradients(r, v)[0]
|
||||
self.assertAllEqual(873.0, r.eval())
|
||||
|
||||
|
@ -32,13 +32,13 @@ class SessionOpsTest(test.TestCase):
|
||||
# Return a handle.
|
||||
a = constant_op.constant(10)
|
||||
b = constant_op.constant(5)
|
||||
c = math_ops.mul(a, b)
|
||||
c = math_ops.multiply(a, b)
|
||||
h = session_ops.get_session_handle(c)
|
||||
h = sess.run(h)
|
||||
|
||||
# Feed a tensor handle.
|
||||
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
|
||||
y = math_ops.mul(x, 10)
|
||||
y = math_ops.multiply(x, 10)
|
||||
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
|
||||
|
||||
def testHandleEval(self):
|
||||
@ -46,7 +46,7 @@ class SessionOpsTest(test.TestCase):
|
||||
# Return a handle.
|
||||
a = constant_op.constant(10)
|
||||
b = constant_op.constant(5)
|
||||
c = math_ops.mul(a, b)
|
||||
c = math_ops.multiply(a, b)
|
||||
h = session_ops.get_session_handle(c)
|
||||
h = sess.run(h)
|
||||
|
||||
@ -58,9 +58,9 @@ class SessionOpsTest(test.TestCase):
|
||||
# Return a handle and a value.
|
||||
a = constant_op.constant(10)
|
||||
b = constant_op.constant(5)
|
||||
c = math_ops.mul(a, b)
|
||||
c = math_ops.multiply(a, b)
|
||||
h = session_ops.get_session_handle(c)
|
||||
v = math_ops.mul(a, c)
|
||||
v = math_ops.multiply(a, c)
|
||||
h, v = sess.run([h, v])
|
||||
|
||||
self.assertEqual(50, h.eval())
|
||||
@ -72,16 +72,16 @@ class SessionOpsTest(test.TestCase):
|
||||
a = constant_op.constant(10)
|
||||
b = constant_op.constant(5)
|
||||
p = math_ops.less(a, b)
|
||||
c = math_ops.mul(a, b)
|
||||
c = math_ops.multiply(a, b)
|
||||
h = session_ops.get_session_handle(c)
|
||||
p, h = sess.run([p, h])
|
||||
|
||||
# Run by feeding a tensor handle.
|
||||
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
|
||||
if p:
|
||||
y = math_ops.mul(x, 10)
|
||||
y = math_ops.multiply(x, 10)
|
||||
else:
|
||||
y = math_ops.mul(x, 100)
|
||||
y = math_ops.multiply(x, 100)
|
||||
result = sess.run(y, feed_dict={f: h.handle})
|
||||
|
||||
self.assertEqual(5000, result)
|
||||
@ -128,13 +128,13 @@ class SessionOpsTest(test.TestCase):
|
||||
# Return a handle.
|
||||
a = constant_op.constant(10)
|
||||
b = constant_op.constant(5)
|
||||
c = math_ops.mul(a, b)
|
||||
c = math_ops.multiply(a, b)
|
||||
h = session_ops.get_session_handle(c)
|
||||
h = sess.run(h)
|
||||
|
||||
# Feed a tensor handle.
|
||||
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
|
||||
y = math_ops.mul(x, 10)
|
||||
y = math_ops.multiply(x, 10)
|
||||
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
|
||||
|
||||
# Feed another tensor handle.
|
||||
@ -149,7 +149,7 @@ class SessionOpsTest(test.TestCase):
|
||||
# Return a handle.
|
||||
a = constant_op.constant(10)
|
||||
b = constant_op.constant(5)
|
||||
c = math_ops.mul(a, b)
|
||||
c = math_ops.multiply(a, b)
|
||||
h = session_ops.get_session_handle(c)
|
||||
sess.run(h).delete()
|
||||
|
||||
@ -158,7 +158,7 @@ class SessionOpsTest(test.TestCase):
|
||||
# Return a handle.
|
||||
a = constant_op.constant(10)
|
||||
b = constant_op.constant(5)
|
||||
c = math_ops.mul(a, b)
|
||||
c = math_ops.multiply(a, b)
|
||||
h = session_ops.get_session_handle(c)
|
||||
h = sess.run(h)
|
||||
|
||||
|
@ -82,7 +82,7 @@ class StackOpTest(test.TestCase):
|
||||
return math_ops.greater(x, 0)
|
||||
|
||||
def b1(x, y):
|
||||
nx = math_ops.sub(x, 1)
|
||||
nx = math_ops.subtract(x, 1)
|
||||
ny = y + gen_data_flow_ops._stack_pop(h, dtypes.float32)
|
||||
return [nx, ny]
|
||||
|
||||
|
@ -863,7 +863,7 @@ class TensorArrayTest(test.TestCase):
|
||||
def b(i, acc):
|
||||
x1 = control_flow_ops.cond(
|
||||
math_ops.equal(i, 0), lambda: x,
|
||||
lambda: math_ops.mul(acc.read(i - 1), 2.0))
|
||||
lambda: math_ops.multiply(acc.read(i - 1), 2.0))
|
||||
return i + 1, acc.write(i, x1)
|
||||
|
||||
i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
|
||||
|
@ -228,7 +228,7 @@ class VariableOpTest(test.TestCase):
|
||||
# GPU. The test ensures that the dependency on 'increment' is still
|
||||
# honored, i.e., the Send and Recv from GPU to CPU should take place
|
||||
# only after the increment.
|
||||
result = math_ops.mul(var, var)
|
||||
result = math_ops.multiply(var, var)
|
||||
self.assertAllClose([4.0], result.eval())
|
||||
|
||||
def testIsVariableInitialized(self):
|
||||
|
@ -349,7 +349,7 @@ class VariablesTestCase(test.TestCase):
|
||||
v1.eval()
|
||||
|
||||
v2 = variables.Variable(
|
||||
math_ops.neg(v1.initialized_value()), dtype=dtypes.float32)
|
||||
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
|
||||
self.assertEqual(v1.get_shape(), v2.get_shape())
|
||||
self.assertAllClose(np.negative(value), v2.initial_value.eval())
|
||||
|
||||
|
@ -1065,7 +1065,7 @@ class Conv2DTranspose(Conv2D):
|
||||
|
||||
def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
|
||||
if isinstance(dim_size, ops.Tensor):
|
||||
dim_size = math_ops.mul(dim_size, stride_size)
|
||||
dim_size = math_ops.multiply(dim_size, stride_size)
|
||||
elif dim_size is not None:
|
||||
dim_size *= stride_size
|
||||
|
||||
|
@ -2157,7 +2157,7 @@ class WhileContext(ControlFlowContext):
|
||||
self._pivot = loop_cond(pred, name="b_count")
|
||||
switch_count = switch(merge_count, self._pivot)
|
||||
|
||||
index = math_ops.sub(switch_count[1], one)
|
||||
index = math_ops.subtract(switch_count[1], one)
|
||||
self._pivot_for_body = index
|
||||
next_count = _NextIteration(index)
|
||||
merge_count.op._update_input(1, next_count)
|
||||
|
@ -823,7 +823,7 @@ def _hessian_vector_product(ys, xs, v):
|
||||
|
||||
assert len(grads) == length
|
||||
elemwise_products = [
|
||||
math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
|
||||
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
|
||||
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
|
||||
]
|
||||
|
||||
|
@ -474,7 +474,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
|
||||
c = constant_op.constant(np_val)
|
||||
c_sparse = math_ops._as_indexed_slices(c)
|
||||
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
|
||||
c_dense = math_ops.mul(c_sparse, 1.0)
|
||||
c_dense = math_ops.multiply(c_sparse, 1.0)
|
||||
self.assertAllClose(np_val, c_dense.eval())
|
||||
|
||||
def testIndexedSlicesToTensorList(self):
|
||||
@ -502,7 +502,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
|
||||
c_sparse.values,
|
||||
math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape)
|
||||
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
|
||||
c_dense = math_ops.mul(c_sparse, 1.0)
|
||||
c_dense = math_ops.multiply(c_sparse, 1.0)
|
||||
self.assertAllClose(np_val, c_dense.eval())
|
||||
|
||||
def testWarnings(self):
|
||||
@ -511,7 +511,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
|
||||
array_ops.placeholder(dtypes.float32),
|
||||
array_ops.placeholder(dtypes.int32), constant([4, 4, 4, 4]))
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
math_ops.mul(c_sparse, 1.0)
|
||||
math_ops.multiply(c_sparse, 1.0)
|
||||
self.assertEqual(0, len(w))
|
||||
|
||||
# Greater than or equal to the threshold: warning.
|
||||
@ -519,7 +519,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
|
||||
array_ops.placeholder(dtypes.float32),
|
||||
array_ops.placeholder(dtypes.int32), constant([100, 100, 100, 100]))
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
math_ops.mul(c_sparse, 1.0)
|
||||
math_ops.multiply(c_sparse, 1.0)
|
||||
self.assertEqual(1, len(w))
|
||||
self.assertTrue(
|
||||
"with 100000000 elements. This may consume a large amount of memory." in
|
||||
@ -531,7 +531,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
|
||||
array_ops.placeholder(dtypes.int32),
|
||||
array_ops.placeholder(dtypes.int32))
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
math_ops.mul(c_sparse, 1.0)
|
||||
math_ops.multiply(c_sparse, 1.0)
|
||||
self.assertEqual(1, len(w))
|
||||
self.assertTrue(
|
||||
"of unknown shape. This may consume a large amount of memory." in
|
||||
|
@ -735,7 +735,7 @@ def per_image_standardization(image):
|
||||
pixel_value_scale = math_ops.maximum(stddev, min_stddev)
|
||||
pixel_value_offset = image_mean
|
||||
|
||||
image = math_ops.sub(image, pixel_value_offset)
|
||||
image = math_ops.subtract(image, pixel_value_offset)
|
||||
image = math_ops.div(image, pixel_value_scale)
|
||||
return image
|
||||
|
||||
@ -968,7 +968,7 @@ def convert_image_dtype(image, dtype, saturate=False, name=None):
|
||||
else:
|
||||
cast = math_ops.cast(image, dtype)
|
||||
scale = (scale_out + 1) // (scale_in + 1)
|
||||
return math_ops.mul(cast, scale, name=name)
|
||||
return math_ops.multiply(cast, scale, name=name)
|
||||
elif image.dtype.is_floating and dtype.is_floating:
|
||||
# Both float: Just cast, no possible overflows in the allowed ranges.
|
||||
# Note: We're ignoreing float overflows. If your image dynamic range
|
||||
@ -979,11 +979,11 @@ def convert_image_dtype(image, dtype, saturate=False, name=None):
|
||||
# Converting to float: first cast, then scale. No saturation possible.
|
||||
cast = math_ops.cast(image, dtype)
|
||||
scale = 1. / image.dtype.max
|
||||
return math_ops.mul(cast, scale, name=name)
|
||||
return math_ops.multiply(cast, scale, name=name)
|
||||
else:
|
||||
# Converting from float: first scale, then cast
|
||||
scale = dtype.max + 0.5 # avoid rounding problems in the cast
|
||||
scaled = math_ops.mul(image, scale)
|
||||
scaled = math_ops.multiply(image, scale)
|
||||
if saturate:
|
||||
return math_ops.saturate_cast(scaled, dtype, name=name)
|
||||
else:
|
||||
|
@ -66,7 +66,7 @@ def _scale_losses(losses, weights):
|
||||
reduction_indices = list(range(start_index, losses.get_shape().ndims))
|
||||
reduced_losses = math_ops.reduce_sum(losses,
|
||||
reduction_indices=reduction_indices)
|
||||
reduced_losses = math_ops.mul(reduced_losses, weights)
|
||||
reduced_losses = math_ops.multiply(reduced_losses, weights)
|
||||
return math_ops.reduce_sum(reduced_losses)
|
||||
|
||||
|
||||
@ -140,7 +140,7 @@ def _num_present(losses, weights, per_batch=False):
|
||||
math_ops.to_float(batch_size))
|
||||
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
|
||||
0.0, num_per_batch)
|
||||
num_per_batch = math_ops.mul(array_ops.ones(
|
||||
num_per_batch = math_ops.multiply(array_ops.ones(
|
||||
array_ops.reshape(batch_size, [1])), num_per_batch)
|
||||
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
|
||||
|
||||
@ -156,7 +156,7 @@ def _num_present(losses, weights, per_batch=False):
|
||||
[weights.get_shape().ndims], [-1])
|
||||
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
|
||||
|
||||
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
|
||||
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
|
||||
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
|
||||
|
||||
|
||||
@ -266,7 +266,7 @@ def absolute_difference(
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
losses = math_ops.abs(math_ops.sub(predictions, labels))
|
||||
losses = math_ops.abs(math_ops.subtract(predictions, labels))
|
||||
return compute_weighted_loss(losses, weights, scope, loss_collection)
|
||||
|
||||
|
||||
@ -309,7 +309,7 @@ def cosine_distance(
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
|
||||
radial_diffs = math_ops.mul(predictions, labels)
|
||||
radial_diffs = math_ops.multiply(predictions, labels)
|
||||
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
|
||||
return compute_weighted_loss(losses, weights, scope, loss_collection)
|
||||
|
||||
@ -344,8 +344,9 @@ def hinge_loss(labels, logits, weights=1.0, scope=None,
|
||||
# We first need to convert binary labels to -1/1 labels (as floats).
|
||||
labels = math_ops.to_float(labels)
|
||||
all_ones = array_ops.ones_like(labels)
|
||||
labels = math_ops.sub(2 * labels, all_ones)
|
||||
losses = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
|
||||
labels = math_ops.subtract(2 * labels, all_ones)
|
||||
losses = nn_ops.relu(
|
||||
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
|
||||
return compute_weighted_loss(losses, weights, scope, loss_collection)
|
||||
|
||||
|
||||
@ -388,9 +389,9 @@ def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
losses = -math_ops.mul(
|
||||
losses = -math_ops.multiply(
|
||||
labels,
|
||||
math_ops.log(predictions + epsilon)) - math_ops.mul(
|
||||
math_ops.log(predictions + epsilon)) - math_ops.multiply(
|
||||
(1 - labels), math_ops.log(1 - predictions + epsilon))
|
||||
return compute_weighted_loss(losses, weights, scope, loss_collection)
|
||||
|
||||
@ -444,7 +445,7 @@ def mean_pairwise_squared_error(labels, predictions, weights=1.0, scope=None,
|
||||
labels = math_ops.to_float(labels)
|
||||
weights = math_ops.to_float(ops.convert_to_tensor(weights))
|
||||
|
||||
diffs = math_ops.sub(predictions, labels)
|
||||
diffs = math_ops.subtract(predictions, labels)
|
||||
|
||||
# Need to verify here since the function doesn't use compute_weighted_loss
|
||||
if diffs.get_shape().ndims is None:
|
||||
@ -514,7 +515,7 @@ def mean_squared_error(labels, predictions, weights=1.0, scope=None,
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
predictions = math_ops.to_float(predictions)
|
||||
labels = math_ops.to_float(labels)
|
||||
losses = math_ops.square(math_ops.sub(predictions, labels))
|
||||
losses = math_ops.square(math_ops.subtract(predictions, labels))
|
||||
return compute_weighted_loss(losses, weights, scope, loss_collection)
|
||||
|
||||
|
||||
|
@ -540,7 +540,7 @@ def _AsinGrad(op, grad):
|
||||
x = math_ops.conj(x)
|
||||
x2 = math_ops.square(x)
|
||||
one = constant_op.constant(1, dtype=grad.dtype)
|
||||
den = math_ops.sqrt(math_ops.sub(one, x2))
|
||||
den = math_ops.sqrt(math_ops.subtract(one, x2))
|
||||
inv = math_ops.reciprocal(den)
|
||||
return grad * inv
|
||||
|
||||
@ -553,7 +553,7 @@ def _AcosGrad(op, grad):
|
||||
x = math_ops.conj(x)
|
||||
x2 = math_ops.square(x)
|
||||
one = constant_op.constant(1, dtype=grad.dtype)
|
||||
den = math_ops.sqrt(math_ops.sub(one, x2))
|
||||
den = math_ops.sqrt(math_ops.subtract(one, x2))
|
||||
inv = math_ops.reciprocal(den)
|
||||
return -grad * inv
|
||||
|
||||
|
@ -215,7 +215,7 @@ def _broadcast_weights(weights, values):
|
||||
values_shape.is_fully_defined() and
|
||||
weights_shape.is_compatible_with(values_shape)):
|
||||
return weights
|
||||
return math_ops.mul(
|
||||
return math_ops.multiply(
|
||||
weights, array_ops.ones_like(values), name='broadcast_weights')
|
||||
|
||||
|
||||
@ -302,7 +302,7 @@ def mean(values, weights=None, metrics_collections=None,
|
||||
|
||||
if weights is not None:
|
||||
weights = math_ops.to_float(weights)
|
||||
values = math_ops.mul(values, weights)
|
||||
values = math_ops.multiply(values, weights)
|
||||
num_values = math_ops.reduce_sum(_broadcast_weights(weights, values))
|
||||
else:
|
||||
num_values = math_ops.to_float(array_ops.size(values))
|
||||
@ -595,7 +595,7 @@ def auc(labels, predictions, weights=None, num_thresholds=200,
|
||||
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
|
||||
x = rec
|
||||
y = prec
|
||||
return math_ops.reduce_sum(math_ops.mul(
|
||||
return math_ops.reduce_sum(math_ops.multiply(
|
||||
x[:num_thresholds - 1] - x[1:],
|
||||
(y[:num_thresholds - 1] + y[1:]) / 2.), name=name)
|
||||
|
||||
@ -712,7 +712,7 @@ def mean_cosine_distance(labels, predictions, dim, weights=None,
|
||||
labels, predictions, weights = _remove_squeezable_dimensions(
|
||||
labels, predictions, weights)
|
||||
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
|
||||
radial_diffs = math_ops.mul(predictions, labels)
|
||||
radial_diffs = math_ops.multiply(predictions, labels)
|
||||
radial_diffs = math_ops.reduce_sum(radial_diffs,
|
||||
reduction_indices=[dim,],
|
||||
keep_dims=True)
|
||||
@ -720,8 +720,8 @@ def mean_cosine_distance(labels, predictions, dim, weights=None,
|
||||
None,
|
||||
None,
|
||||
name or 'mean_cosine_distance')
|
||||
mean_distance = math_ops.sub(1.0, mean_distance)
|
||||
update_op = math_ops.sub(1.0, update_op)
|
||||
mean_distance = math_ops.subtract(1.0, mean_distance)
|
||||
update_op = math_ops.subtract(1.0, update_op)
|
||||
|
||||
if metrics_collections:
|
||||
ops.add_to_collections(metrics_collections, mean_distance)
|
||||
@ -1000,8 +1000,8 @@ def mean_tensor(values, weights=None, metrics_collections=None,
|
||||
num_values = array_ops.ones_like(values)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_float(weights)
|
||||
values = math_ops.mul(values, weights)
|
||||
num_values = math_ops.mul(num_values, weights)
|
||||
values = math_ops.multiply(values, weights)
|
||||
num_values = math_ops.multiply(num_values, weights)
|
||||
|
||||
total_compute_op = state_ops.assign_add(total, values)
|
||||
count_compute_op = state_ops.assign_add(count, num_values)
|
||||
@ -1101,7 +1101,7 @@ def _count_condition(values, weights=None, metrics_collections=None,
|
||||
values = math_ops.to_float(values)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_float(weights)
|
||||
values = math_ops.mul(values, weights)
|
||||
values = math_ops.multiply(values, weights)
|
||||
|
||||
value_tensor = array_ops.identity(count)
|
||||
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
|
||||
@ -1562,7 +1562,7 @@ def _sparse_true_positive_at_k(labels,
|
||||
tp = math_ops.to_double(tp)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_double(weights)
|
||||
tp = math_ops.mul(tp, weights)
|
||||
tp = math_ops.multiply(tp, weights)
|
||||
return tp
|
||||
|
||||
|
||||
@ -1650,7 +1650,7 @@ def _sparse_false_negative_at_k(labels,
|
||||
fn = math_ops.to_double(fn)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_double(weights)
|
||||
fn = math_ops.mul(fn, weights)
|
||||
fn = math_ops.multiply(fn, weights)
|
||||
return fn
|
||||
|
||||
|
||||
@ -2185,7 +2185,7 @@ def _sparse_average_precision_at_k(labels, predictions, k):
|
||||
precision_per_k = math_ops.div(
|
||||
math_ops.to_double(tp_per_k), math_ops.to_double(retrieved_per_k),
|
||||
name='precision_per_k')
|
||||
relevant_precision_per_k = math_ops.mul(
|
||||
relevant_precision_per_k = math_ops.multiply(
|
||||
precision_per_k, math_ops.to_double(relevant_per_k),
|
||||
name='relevant_precision_per_k')
|
||||
|
||||
@ -2259,7 +2259,7 @@ def sparse_average_precision_at_k(labels,
|
||||
predictions=predictions, labels=labels, k=k)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_double(weights)
|
||||
average_precision = math_ops.mul(average_precision, weights)
|
||||
average_precision = math_ops.multiply(average_precision, weights)
|
||||
|
||||
# Create accumulation variables and update ops for max average precision and
|
||||
# total average precision.
|
||||
@ -2275,7 +2275,7 @@ def sparse_average_precision_at_k(labels,
|
||||
array_ops.size(average_precision, name='batch_max'))
|
||||
else:
|
||||
# TODO(ptucker): More efficient way to broadcast?
|
||||
broadcast_weights = math_ops.mul(
|
||||
broadcast_weights = math_ops.multiply(
|
||||
weights, array_ops.ones_like(average_precision),
|
||||
name='broadcast_weights')
|
||||
batch_max = math_ops.reduce_sum(broadcast_weights, name='batch_max')
|
||||
@ -2334,7 +2334,7 @@ def _sparse_false_positive_at_k(labels,
|
||||
fp = math_ops.to_double(fp)
|
||||
if weights is not None:
|
||||
weights = math_ops.to_double(weights)
|
||||
fp = math_ops.mul(fp, weights)
|
||||
fp = math_ops.multiply(fp, weights)
|
||||
return fp
|
||||
|
||||
|
||||
|
@ -280,7 +280,7 @@ def l2_normalize(x, dim, epsilon=1e-12, name=None):
|
||||
x = ops.convert_to_tensor(x, name="x")
|
||||
square_sum = math_ops.reduce_sum(math_ops.square(x), dim, keep_dims=True)
|
||||
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
|
||||
return math_ops.mul(x, x_inv_norm, name=name)
|
||||
return math_ops.multiply(x, x_inv_norm, name=name)
|
||||
|
||||
|
||||
def zero_fraction(value, name=None):
|
||||
@ -525,7 +525,7 @@ def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
|
||||
counts = math_ops.reduce_prod(x_dims, name="count")
|
||||
if shift is not None:
|
||||
shift = ops.convert_to_tensor(shift, name="shift")
|
||||
m_ss = math_ops.sub(x, shift)
|
||||
m_ss = math_ops.subtract(x, shift)
|
||||
v_ss = math_ops.squared_difference(x, shift)
|
||||
else: # no shift.
|
||||
m_ss = x
|
||||
@ -554,14 +554,14 @@ def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
|
||||
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
|
||||
divisor = math_ops.reciprocal(counts, name="divisor")
|
||||
if shift is not None:
|
||||
shifted_mean = math_ops.mul(mean_ss, divisor, name="shifted_mean")
|
||||
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
|
||||
mean = math_ops.add(shifted_mean, shift, name="mean")
|
||||
else: # no shift.
|
||||
shifted_mean = math_ops.mul(mean_ss, divisor, name="mean")
|
||||
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
|
||||
mean = shifted_mean
|
||||
variance = math_ops.sub(math_ops.mul(variance_ss, divisor),
|
||||
math_ops.square(shifted_mean),
|
||||
name="variance")
|
||||
variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor),
|
||||
math_ops.square(shifted_mean),
|
||||
name="variance")
|
||||
return (mean, variance)
|
||||
|
||||
|
||||
@ -658,7 +658,7 @@ def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=False):
|
||||
|
||||
divisor = math_ops.reciprocal(sum_of_weights, name="inv_weight_sum")
|
||||
|
||||
weighted_mean = math_ops.mul(weighted_input_sum, divisor)
|
||||
weighted_mean = math_ops.multiply(weighted_input_sum, divisor)
|
||||
|
||||
# Have the weighted mean; now on to variance:
|
||||
weighted_distsq = math_ops.reduce_sum(
|
||||
@ -667,7 +667,7 @@ def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=False):
|
||||
name="weighted_distsq",
|
||||
keep_dims=True)
|
||||
|
||||
weighted_variance = math_ops.mul(weighted_distsq, divisor)
|
||||
weighted_variance = math_ops.multiply(weighted_distsq, divisor)
|
||||
|
||||
if not keep_dims:
|
||||
weighted_mean = array_ops.squeeze(weighted_mean, squeeze_dims=axes)
|
||||
@ -950,7 +950,7 @@ def _compute_sampled_logits(weights,
|
||||
# row_wise_dots is [batch_size, num_true, dim]
|
||||
dim = array_ops.shape(true_w)[1:2]
|
||||
new_true_w_shape = array_ops.concat_v2([[-1, num_true], dim], 0)
|
||||
row_wise_dots = math_ops.mul(
|
||||
row_wise_dots = math_ops.multiply(
|
||||
array_ops.expand_dims(inputs, 1),
|
||||
array_ops.reshape(true_w, new_true_w_shape))
|
||||
# We want the row-wise dot plus biases which yields a
|
||||
|
@ -1387,7 +1387,7 @@ def _flatten_outer_dims(logits):
|
||||
"""Flattens logits' outer dimensions and keep its last dimension."""
|
||||
rank = array_ops.rank(logits)
|
||||
last_dim_size = array_ops.slice(
|
||||
array_ops.shape(logits), [math_ops.sub(rank, 1)], [1])
|
||||
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
|
||||
output = array_ops.reshape(logits,
|
||||
array_ops.concat_v2([[-1], last_dim_size], 0))
|
||||
|
||||
@ -1461,7 +1461,7 @@ def _softmax(logits, compute_op, dim=-1, name=None):
|
||||
|
||||
# Swap logits' dimension of dim and its last dimension.
|
||||
input_rank = array_ops.rank(logits)
|
||||
logits = _swap_axis(logits, dim, math_ops.sub(input_rank, 1))
|
||||
logits = _swap_axis(logits, dim, math_ops.subtract(input_rank, 1))
|
||||
shape_after_swap = array_ops.shape(logits)
|
||||
|
||||
# Reshape logits into a matrix.
|
||||
@ -1472,7 +1472,7 @@ def _softmax(logits, compute_op, dim=-1, name=None):
|
||||
|
||||
# Transform back the output tensor.
|
||||
output = array_ops.reshape(output, shape_after_swap)
|
||||
output = _swap_axis(output, dim, math_ops.sub(input_rank, 1))
|
||||
output = _swap_axis(output, dim, math_ops.subtract(input_rank, 1))
|
||||
|
||||
# Make shape inference work since reshape and transpose may erase its static
|
||||
# shape.
|
||||
@ -1602,7 +1602,7 @@ def softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
|
||||
|
||||
# The output cost shape should be the input minus dim.
|
||||
output_shape = array_ops.slice(input_shape, [0],
|
||||
[math_ops.sub(input_rank, 1)])
|
||||
[math_ops.subtract(input_rank, 1)])
|
||||
cost = array_ops.reshape(cost, output_shape)
|
||||
|
||||
# Make shape inference work since reshape and transpose may erase its static
|
||||
@ -2075,12 +2075,12 @@ def erosion2d(value, kernel, strides, rates, padding, name=None):
|
||||
"""
|
||||
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
|
||||
# Reduce erosion to dilation by duality.
|
||||
return math_ops.neg(gen_nn_ops.dilation2d(input=math_ops.neg(value),
|
||||
filter=array_ops.reverse_v2(
|
||||
kernel, [0, 1]),
|
||||
strides=strides,
|
||||
rates=rates,
|
||||
padding=padding,
|
||||
name=name))
|
||||
return math_ops.negative(
|
||||
gen_nn_ops.dilation2d(input=math_ops.negative(value),
|
||||
filter=array_ops.reverse_v2(kernel, [0, 1]),
|
||||
strides=strides,
|
||||
rates=rates,
|
||||
padding=padding,
|
||||
name=name))
|
||||
|
||||
# pylint: enable=invalid-name
|
||||
|
@ -308,7 +308,7 @@ def _einsum_reduction(t0, t0_axis_labels, t1, t1_axis_labels, axes_to_sum):
|
||||
t0 = array_ops.expand_dims(t0, -1)
|
||||
for _ in broadcast_axes[0]:
|
||||
t1 = array_ops.expand_dims(t1, len(preserved_axes))
|
||||
product = math_ops.mul(t0, t1)
|
||||
product = math_ops.multiply(t0, t1)
|
||||
product_axes = sorted_axes[0] + sorted_axes[1][len(preserved_axes):]
|
||||
return product, ''.join(product_axes)
|
||||
else:
|
||||
|
@ -43,9 +43,9 @@ class StripUnusedTest(test_util.TensorFlowTestCase):
|
||||
# and that then multiplies it by 2.
|
||||
with ops.Graph().as_default():
|
||||
constant_node = constant_op.constant(1.0, name="constant_node")
|
||||
wanted_input_node = math_ops.sub(constant_node,
|
||||
3.0,
|
||||
name="wanted_input_node")
|
||||
wanted_input_node = math_ops.subtract(constant_node,
|
||||
3.0,
|
||||
name="wanted_input_node")
|
||||
output_node = math_ops.multiply(
|
||||
wanted_input_node, 2.0, name="output_node")
|
||||
math_ops.add(output_node, 2.0, name="later_node")
|
||||
@ -98,8 +98,8 @@ class StripUnusedTest(test_util.TensorFlowTestCase):
|
||||
with ops.Graph().as_default():
|
||||
constant_node1 = constant_op.constant(1.0, name="constant_node1")
|
||||
constant_node2 = constant_op.constant(2.0, name="constant_node2")
|
||||
input_node1 = math_ops.sub(constant_node1, 3.0, name="input_node1")
|
||||
input_node2 = math_ops.sub(constant_node2, 5.0, name="input_node2")
|
||||
input_node1 = math_ops.subtract(constant_node1, 3.0, name="input_node1")
|
||||
input_node2 = math_ops.subtract(constant_node2, 5.0, name="input_node2")
|
||||
output_node = math_ops.multiply(
|
||||
input_node1, input_node2, name="output_node")
|
||||
math_ops.add(output_node, 2.0, name="later_node")
|
||||
|
@ -92,7 +92,8 @@ def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
|
||||
p = global_step / decay_steps
|
||||
if staircase:
|
||||
p = math_ops.floor(p)
|
||||
return math_ops.mul(learning_rate, math_ops.pow(decay_rate, p), name=name)
|
||||
return math_ops.multiply(learning_rate, math_ops.pow(decay_rate, p),
|
||||
name=name)
|
||||
|
||||
|
||||
def piecewise_constant(x, boundaries, values, name=None):
|
||||
@ -250,15 +251,15 @@ def polynomial_decay(learning_rate, global_step, decay_steps,
|
||||
power = math_ops.cast(power, dtype)
|
||||
if cycle:
|
||||
# Find the first multiple of decay_steps that is bigger than global_step.
|
||||
decay_steps = math_ops.mul(decay_steps,
|
||||
math_ops.ceil(global_step / decay_steps))
|
||||
decay_steps = math_ops.multiply(decay_steps,
|
||||
math_ops.ceil(global_step / decay_steps))
|
||||
else:
|
||||
# Make sure that the global_step used is not bigger than decay_steps.
|
||||
global_step = math_ops.minimum(global_step, decay_steps)
|
||||
|
||||
p = math_ops.div(global_step, decay_steps)
|
||||
return math_ops.add(math_ops.mul(learning_rate - end_learning_rate,
|
||||
math_ops.pow(1 - p, power)),
|
||||
return math_ops.add(math_ops.multiply(learning_rate - end_learning_rate,
|
||||
math_ops.pow(1 - p, power)),
|
||||
end_learning_rate, name=name)
|
||||
|
||||
|
||||
@ -325,8 +326,8 @@ def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
|
||||
p = global_step / decay_steps
|
||||
if staircase:
|
||||
p = math_ops.floor(p)
|
||||
exponent = math_ops.exp(math_ops.mul(math_ops.neg(decay_rate), p))
|
||||
return math_ops.mul(learning_rate, exponent, name=name)
|
||||
exponent = math_ops.exp(math_ops.multiply(math_ops.negative(decay_rate), p))
|
||||
return math_ops.multiply(learning_rate, exponent, name=name)
|
||||
|
||||
|
||||
def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
|
||||
@ -393,5 +394,5 @@ def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
|
||||
if staircase:
|
||||
p = math_ops.floor(p)
|
||||
const = math_ops.cast(constant_op.constant(1), learning_rate.dtype)
|
||||
denom = math_ops.add(const, math_ops.mul(decay_rate, p))
|
||||
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
|
||||
return math_ops.div(learning_rate, denom, name=name)
|
||||
|
Loading…
Reference in New Issue
Block a user