Clean up uses of deprecated math_ops casts

PiperOrigin-RevId: 237861278
This commit is contained in:
Gaurav Jain 2019-03-11 12:47:10 -07:00 committed by TensorFlower Gardener
parent ae88bfbea8
commit df3a337594
94 changed files with 601 additions and 450 deletions
tensorflow
contrib
lite/experimental/examples/lstm
python

View File

@ -193,7 +193,8 @@ class EqualitySplitHandler(base_split_handler.BaseSplitHandler):
num_minibatches = control_flow_ops.cond(
ops.convert_to_tensor(self._loss_uses_sum_reduction),
lambda: math_ops.to_int64(1), lambda: num_minibatches)
lambda: math_ops.cast(1, dtypes.int64),
lambda: num_minibatches)
partition_ids, gains, split_infos = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=num_minibatches,

View File

@ -312,9 +312,10 @@ def _make_dense_split(quantile_accumulator_handle, stats_accumulator_handle,
gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
# For sum_reduction, we don't need to divide by number of minibatches.
num_minibatches = control_flow_ops.cond(loss_uses_sum_reduction,
lambda: math_ops.to_int64(1),
lambda: num_minibatches)
num_minibatches = control_flow_ops.cond(
loss_uses_sum_reduction,
lambda: math_ops.cast(1, dtypes.int64),
lambda: num_minibatches)
# Put quantile and stats accumulator flushing in the dependency path.
with ops.control_dependencies([flush_quantiles, partition_ids]):
are_splits_ready = array_ops.identity(are_splits_ready)
@ -488,9 +489,10 @@ def _make_sparse_split(
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
num_minibatches = control_flow_ops.cond(loss_uses_sum_reduction,
lambda: math_ops.to_int64(1),
lambda: num_minibatches)
num_minibatches = control_flow_ops.cond(
loss_uses_sum_reduction,
lambda: math_ops.cast(1, dtypes.int64),
lambda: num_minibatches)
# Put quantile and stats accumulator flushing in the dependency path.
with ops.control_dependencies([flush_quantiles, partition_ids]):
are_splits_ready = array_ops.identity(are_splits_ready)

View File

@ -228,7 +228,7 @@ def extract_features(features, feature_columns, use_core_columns):
indices = array_ops.concat([
array_ops.slice(categorical_tensor.indices, [0, 0], [-1, 1]),
array_ops.expand_dims(
math_ops.to_int64(categorical_tensor.values), -1)
math_ops.cast(categorical_tensor.values, dtypes.int64), -1)
], 1)
tensor = sparse_tensor.SparseTensor(
indices=indices, values=weight_tensor.values, dense_shape=shape)
@ -611,8 +611,9 @@ class GradientBoostedDecisionTreeModel(object):
learner_pb2.LearnerConfig.TREE_PER_CLASS and
self._logits_dimension != 1):
# Choose the class for which the tree is built (one vs rest).
return math_ops.to_int32(
predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension)
return math_ops.cast(
predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension,
dtypes.int32)
return constant_op.constant(-1, dtype=dtypes.int32)
def update_stats(self, loss, predictions_dict, gradients=None, hessians=None):

View File

@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
@ -43,7 +44,7 @@ def per_example_logistic_loss(labels, weights, predictions):
loss: A Rank 2 (N, 1) tensor of per-example logistic loss.
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
unweighted_loss = nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
@ -74,7 +75,7 @@ def per_example_quantile_regression_loss(labels, weights, predictions,
loss: A Rank 2 (N, 1) tensor of per-example quantile loss.
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
error = labels - predictions
square_loss_right = array_ops.where(error * quantile < 1.0,
math_ops.square(quantile * error),
@ -112,7 +113,7 @@ def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
loss: A Rank 2 (N, 1) tensor of per-example maxent loss
update_op: An update operation to update the loss's internal state.
"""
labels = math_ops.to_int64(labels)
labels = math_ops.cast(labels, dtypes.int64)
# If labels are of rank 1, make them rank 2.
labels_shape = labels.get_shape()
if len(labels_shape) != 2:
@ -120,7 +121,7 @@ def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
# Labels are indices of classes, convert them to one hot encodings.
target_one_hot = array_ops.one_hot(indices=labels, depth=num_classes)
labels = math_ops.reduce_sum(input_tensor=target_one_hot, axis=[1])
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
# Calculate softmax probabilities for each class.
unnormalized_probs = math_ops.exp(logits)
@ -253,7 +254,7 @@ def per_example_exp_loss(labels, weights, predictions, name=None, eps=0.1):
preds_converted = min_res
return math_ops.exp(-preds_converted * labels_converted)
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
unweighted_loss = exp_with_logits(
name=name, eps=eps, labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()
@ -312,7 +313,7 @@ def per_example_full_exp_loss(labels, weights, predictions, name=None):
return math_ops.exp(-1.0 * logits * labels_converted)
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
unweighted_loss = full_exp_with_logits(
name=name, labels=labels, logits=predictions)
return unweighted_loss * weights, control_flow_ops.no_op()

View File

@ -283,7 +283,7 @@ def crf_unary_score(tag_indices, sequence_lengths, inputs):
offsets += array_ops.expand_dims(math_ops.range(max_seq_len) * num_tags, 0)
# Use int32 or int64 based on tag_indices' dtype.
if tag_indices.dtype == dtypes.int64:
offsets = math_ops.to_int64(offsets)
offsets = math_ops.cast(offsets, dtypes.int64)
flattened_tag_indices = array_ops.reshape(offsets + tag_indices, [-1])
unary_scores = array_ops.reshape(

View File

@ -232,7 +232,7 @@ class SlideDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.to_int32(i)], i),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
iterator = dataset_ops.make_initializable_iterator(

View File

@ -20,6 +20,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
@ -158,10 +159,13 @@ def vector_size_to_square_matrix_size(d, validate_args, name=None):
return int(n)
else:
with ops.name_scope(name, "vector_size_to_square_matrix_size", [d]) as name:
n = (-1. + math_ops.sqrt(1 + 8. * math_ops.to_float(d))) / 2.
n = (-1. + math_ops.sqrt(1 + 8. * math_ops.cast(d, dtypes.float32))) / 2.
if validate_args:
with ops.control_dependencies([check_ops.assert_equal(
math_ops.to_float(math_ops.to_int32(n)), n,
message="Vector length is not a triangular number")]):
with ops.control_dependencies([
check_ops.assert_equal(
math_ops.cast(math_ops.cast(n, dtypes.int32), dtypes.float32),
n,
message="Vector length is not a triangular number")
]):
n = array_ops.identity(n)
return math_ops.cast(n, d.dtype)

View File

@ -304,14 +304,14 @@ def percentile(x,
x = ops.convert_to_tensor(x, name="x")
# Double is needed here and below, else we get the wrong index if the array
# is huge along axis.
q = math_ops.to_double(q, name="q")
q = math_ops.cast(q, dtypes.float64, name="q")
_get_static_ndims(q, expect_ndims=0)
if validate_args:
q = control_flow_ops.with_dependencies([
check_ops.assert_rank(q, 0),
check_ops.assert_greater_equal(q, math_ops.to_double(0.)),
check_ops.assert_less_equal(q, math_ops.to_double(100.))
check_ops.assert_greater_equal(q, math_ops.cast(0., dtypes.float64)),
check_ops.assert_less_equal(q, math_ops.cast(100., dtypes.float64))
], q)
if axis is None:
@ -336,7 +336,7 @@ def percentile(x,
y = _move_dims_to_flat_end(x, axis, x_ndims)
frac_at_q_or_above = 1. - q / 100.
d = math_ops.to_double(array_ops.shape(y)[-1])
d = math_ops.cast(array_ops.shape(y)[-1], dtypes.float64)
if interpolation == "lower":
index = math_ops.ceil((d - 1) * frac_at_q_or_above)
@ -349,7 +349,7 @@ def percentile(x,
# let's use max/min to avoid out of bounds errors.
d = array_ops.shape(y)[-1]
# d - 1 will be distinct from d in int32.
index = clip_ops.clip_by_value(math_ops.to_int32(index), 0, d - 1)
index = clip_ops.clip_by_value(math_ops.cast(index, dtypes.int32), 0, d - 1)
# Sort everything, not just the top 'k' entries, which allows multiple calls
# to sort only once (under the hood) and use CSE.

View File

@ -20,6 +20,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import histogram_ops
@ -125,7 +126,7 @@ class DiscreteScalarDistributionTestHelpers(object):
atol: Python `float`-type indicating the admissible absolute error between
analytical and sample statistics.
"""
x = math_ops.to_float(dist.sample(num_samples, seed=seed))
x = math_ops.cast(dist.sample(num_samples, seed=seed), dtypes.float32)
sample_mean = math_ops.reduce_mean(x, axis=0)
sample_variance = math_ops.reduce_mean(
math_ops.square(x - sample_mean), axis=0)
@ -180,7 +181,7 @@ class DiscreteScalarDistributionTestHelpers(object):
lo = value_range[0]
hi = value_range[1]
if nbins is None:
nbins = math_ops.to_int32(hi - lo)
nbins = math_ops.cast(hi - lo, dtypes.int32)
delta = (hi - lo) / math_ops.cast(
nbins, dtype=value_range.dtype.base_dtype)
edges = math_ops.range(

View File

@ -613,7 +613,8 @@ class _InitializeClustersOpFactory(object):
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp,
math_ops.to_int64(self._num_remaining), self._random_seed,
math_ops.cast(self._num_remaining, dtypes.int64),
self._random_seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):

View File

@ -53,7 +53,7 @@ def _covariance(x, diag):
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = math_ops.to_float(array_ops.shape(x)[0])
num_points = math_ops.cast(array_ops.shape(x)[0], dtypes.float32)
x -= math_ops.reduce_mean(x, 0, keepdims=True)
if diag:
cov = math_ops.reduce_sum(
@ -297,8 +297,9 @@ class GmmAlgorithm(object):
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
* math_ops.log(2 * np.pi) + log_det_covs)
self._probs[shard_id] = (
-0.5 * (diag_m + math_ops.cast(self._dimensions, dtypes.float32) *
math_ops.log(2 * np.pi) + log_det_covs))
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
@ -320,7 +321,8 @@ class GmmAlgorithm(object):
x2_cov = math_ops.matmul(x2, cov_expanded)
x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) +
math_ops.cast(self._dimensions, dtypes.float32) *
math_ops.log(2.0 * np.pi) +
array_ops.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
@ -400,7 +402,8 @@ class GmmAlgorithm(object):
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = math_ops.to_float(math_ops.reduce_sum(final_points_in_k))
num_examples = math_ops.cast(math_ops.reduce_sum(final_points_in_k),
dtypes.float32)
self._alpha_op = self._alpha.assign(final_points_in_k /
(num_examples + MEPS))
else:

View File

@ -140,7 +140,7 @@ def preprocess_image(images,
is_single = images.shape.ndims == 3
with ops.name_scope(scope, 'preprocess', [images, height, width]):
if not images.dtype.is_floating:
images = math_ops.to_float(images)
images = math_ops.cast(images, dtypes.float32)
if is_single:
images = array_ops.expand_dims(images, axis=0)
resized = image_ops.resize_bilinear(images, [height, width])
@ -1057,7 +1057,8 @@ def kernel_classifier_distance_and_std_from_activations(real_activations,
n_g = array_ops.shape(generated_activations)[0]
n_bigger = math_ops.maximum(n_r, n_g)
n_blocks = math_ops.to_int32(math_ops.ceil(n_bigger / max_block_size))
n_blocks = math_ops.cast(math_ops.ceil(n_bigger / max_block_size),
dtypes.int32)
v_r = n_r // n_blocks
v_g = n_g // n_blocks

View File

@ -28,6 +28,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
@ -74,7 +75,7 @@ def _laplacian_pyramid(batch, num_levels):
res = spatial_conv(res, 4)
return res
pyramid = [math_ops.to_float(batch)]
pyramid = [math_ops.cast(batch, dtypes.float32)]
for _ in range(1, num_levels):
pyramid.append(pyr_down(pyramid[-1]))
pyramid[-2] -= pyr_up(pyramid[-1])

View File

@ -20,6 +20,7 @@ from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.eval.python import eval_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
@ -171,8 +172,10 @@ def add_image_comparison_summaries(gan_model, num_comparisons=2,
gan_model.generated_data[:num_comparisons])
real_list = array_ops.unstack(gan_model.real_data[:num_comparisons])
diffs = [
math_ops.abs(math_ops.to_float(generated) - math_ops.to_float(real)) for
generated, real in zip(generated_list, real_list)]
math_ops.abs(math_ops.cast(generated, dtypes.float32) -
math_ops.cast(real, dtypes.float32))
for generated, real in zip(generated_list, real_list)
]
image_list.extend(diffs)
# Reshape image and display.

View File

@ -224,7 +224,8 @@ class VBN(object):
# statistics and the reference batch statistics.
ref_batch_size = _static_or_dynamic_batch_size(
self._reference_batch, self._batch_axis)
self._example_weight = 1. / (math_ops.to_float(ref_batch_size) + 1.)
self._example_weight = 1. / (
math_ops.cast(ref_batch_size, dtypes.float32) + 1.)
self._ref_weight = 1. - self._example_weight
# Make the variables, if necessary.

View File

@ -518,7 +518,7 @@ def connected_components(images):
def has_zero():
# Insert a zero in the consecutive ids where zero appears in unique_ids.
# id_is_zero has length 1.
zero_id_ind = math_ops.to_int32(id_is_zero[0])
zero_id_ind = math_ops.cast(id_is_zero[0], dtypes.int32)
ids_before = nonzero_consecutive_ids[:zero_id_ind]
ids_after = nonzero_consecutive_ids[zero_id_ind:]
return array_ops.concat([ids_before, [0], ids_after], axis=0)

View File

@ -80,7 +80,7 @@ def sparse_multiclass_hinge_loss(
' {}'.format(logits_rank))
logits_shape = array_ops.shape(logits)
batch_size, num_classes = logits_shape[0], logits_shape[1]
logits = math_ops.to_float(logits)
logits = math_ops.cast(logits, dtypes.float32)
# Check labels have valid type.
if labels.dtype != dtypes.int32 and labels.dtype != dtypes.int64:

View File

@ -840,7 +840,7 @@ class _WeightedSparseColumn(
# The weight tensor can be a regular Tensor. In such case, sparsify it.
weight_tensor = contrib_sparse_ops.dense_to_sparse_tensor(weight_tensor)
if not self.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return tuple([id_tensor, weight_tensor])
def insert_transformed_feature(self, columns_to_tensors):
@ -1731,7 +1731,7 @@ class _RealValuedVarLenColumn(_FeatureColumn, collections.namedtuple(
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
columns_to_tensors[self] = math_ops.cast(input_tensor, dtypes.float32)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
@ -1871,7 +1871,7 @@ class _RealValuedColumn(
"""
# Transform the input tensor according to the normalizer function.
input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
columns_to_tensors[self] = math_ops.to_float(input_tensor)
columns_to_tensors[self] = math_ops.cast(input_tensor, dtypes.float32)
# pylint: disable=unused-argument
def _to_dnn_input_layer(self,
@ -1881,7 +1881,7 @@ class _RealValuedColumn(
output_rank=2):
input_tensor = self._to_dense_tensor(input_tensor)
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
input_tensor = math_ops.cast(input_tensor, dtypes.float32)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):
@ -1897,8 +1897,8 @@ class _RealValuedColumn(
return inputs.get(self)
def _transform_feature(self, inputs):
return math_ops.to_float(
self._normalized_input_tensor(inputs.get(self.name)))
return math_ops.cast(
self._normalized_input_tensor(inputs.get(self.name)), dtypes.float32)
@property
def _parse_example_spec(self):
@ -2104,7 +2104,7 @@ class _BucketizedColumn(
raise ValueError("BucketizedColumn currently only supports output_rank=2")
return array_ops.reshape(
array_ops.one_hot(
math_ops.to_int64(input_tensor),
math_ops.cast(input_tensor, dtypes.int64),
self.length,
1.,
0.,
@ -2136,8 +2136,10 @@ class _BucketizedColumn(
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros")
bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape")
indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2))))
shape = math_ops.to_int64(array_ops.stack([batch_size, dimension]))
indices = math_ops.cast(array_ops.transpose(array_ops.stack((i1, i2))),
dtypes.int64)
shape = math_ops.cast(array_ops.stack([batch_size, dimension]),
dtypes.int64)
sparse_id_values = sparse_tensor_py.SparseTensor(
indices, bucket_indices, shape)
@ -2527,7 +2529,7 @@ class DataFrameColumn(_FeatureColumn,
trainable=True,
output_rank=2):
if input_tensor.dtype != dtypes.float32:
input_tensor = math_ops.to_float(input_tensor)
input_tensor = math_ops.cast(input_tensor, dtypes.float32)
return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def _to_dense_tensor(self, input_tensor):

View File

@ -1399,9 +1399,10 @@ class DropoutTest(test.TestCase):
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0))
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(images > 0, dtypes.float32))
output = _layers.dropout(images)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
self.assertGreater(num_elem, num_elem_initial / 2 - 0.1)
@ -1421,9 +1422,10 @@ class DropoutTest(test.TestCase):
with self.cached_session() as sess:
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0))
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(images > 0, dtypes.float32))
output = _layers.dropout(images, is_training=False)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertEqual(num_elem, num_elem_initial)
outputs, inputs = sess.run([output, images])
@ -1435,9 +1437,10 @@ class DropoutTest(test.TestCase):
images = random_ops.random_uniform(
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(images, 50)
num_elem_initial = math_ops.reduce_mean(math_ops.to_float(output > 0))
num_elem_initial = math_ops.reduce_mean(
math_ops.cast(output > 0, dtypes.float32))
output = _layers.dropout(output)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
sess.run(variables_lib.global_variables_initializer())
num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial])
self.assertLess(num_elem, num_elem_initial / 2 + 0.1)
@ -1450,7 +1453,7 @@ class DropoutTest(test.TestCase):
(5, height, width, 3), seed=1, name='images')
output = _layers.fully_connected(
images, 50, normalizer_fn=_layers.dropout)
num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0))
num_elem = math_ops.reduce_mean(math_ops.cast(output > 0, dtypes.float32))
sess.run(variables_lib.global_variables_initializer())
num_elem = sess.run(num_elem)
self.assertLess(num_elem, 0.5)

View File

@ -21,6 +21,7 @@ from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
@ -325,7 +326,7 @@ def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
n = math_ops.cast(global_step, dtypes.float32)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages

View File

@ -23,6 +23,7 @@ import six
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
@ -185,7 +186,8 @@ class _TargetColumn(object):
return None
else:
return array_ops.reshape(
math_ops.to_float(features[self._weight_column_name]), shape=(-1,))
math_ops.cast(features[self._weight_column_name], dtypes.float32),
shape=(-1,))
@property
def problem_type(self):
@ -252,9 +254,10 @@ class _TargetColumn(object):
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.div(math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")
return math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.cast(math_ops.reduce_sum(weight_tensor), dtypes.float32),
name="loss")
class _RegressionTargetColumn(_TargetColumn):
@ -323,7 +326,7 @@ class _MultiClassTargetColumn(_TargetColumn):
metrics = {("accuracy", "classes"): metric_ops.streaming_accuracy}
predictions = math_ops.sigmoid(logits)
labels_float = math_ops.to_float(labels)
labels_float = math_ops.cast(labels, dtypes.float32)
default_metrics = self._default_eval_metrics()
for metric_name, metric_op in default_metrics.items():
@ -399,7 +402,8 @@ def _mean_squared_loss(logits, target):
target = array_ops.expand_dims(target, axis=1)
logits.get_shape().assert_is_compatible_with(target.get_shape())
return math_ops.squared_difference(logits, math_ops.to_float(target))
return math_ops.squared_difference(logits,
math_ops.cast(target, dtypes.float32))
def _log_loss_with_two_classes(logits, target):
@ -407,7 +411,7 @@ def _log_loss_with_two_classes(logits, target):
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, axis=1)
loss_vec = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(target), logits=logits)
labels=math_ops.cast(target, dtypes.float32), logits=logits)
return loss_vec
@ -475,7 +479,7 @@ def get_default_binary_metrics_for_eval(thresholds):
def _float_weights_or_none(weights):
if weights is None:
return None
return math_ops.to_float(weights)
return math_ops.cast(weights, dtypes.float32)
def _labels_streaming_mean(unused_predictions, labels, weights=None):
@ -494,8 +498,8 @@ def _streaming_auc(predictions, labels, weights=None):
def _accuracy_at_threshold(threshold):
def _accuracy_metric(predictions, labels, weights=None):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
threshold_predictions = math_ops.cast(
math_ops.greater_equal(predictions, threshold), dtypes.float32)
return metric_ops.streaming_accuracy(
predictions=threshold_predictions, labels=labels, weights=weights)

View File

@ -86,11 +86,11 @@ def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
values[i] = math_ops.cast(values[i], dtypes.int64)
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
dense_inputs[i] = math_ops.cast(dense_inputs[i], dtypes.int64)
internal_type = dtypes.int64
if hash_key:

View File

@ -474,7 +474,7 @@ class DebugClassifierTest(test.TestCase):
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)

View File

@ -807,7 +807,7 @@ class DNNLinearCombinedClassifierTest(test.TestCase):
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))

View File

@ -815,7 +815,7 @@ class DNNClassifierTest(test.TestCase):
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)

View File

@ -372,9 +372,10 @@ class DynamicRnnEstimatorTest(test.TestCase):
labels = array_ops.slice(random_sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
math_ops.cast(
array_ops.slice(random_sequence, [0, 1],
[batch_size, sequence_length])), 2)
[batch_size, sequence_length]),
dtypes.float32), 2)
input_dict = {
dynamic_rnn_estimator._get_state_name(i): random_ops.random_uniform(
[batch_size, cell_size], seed=((i + 1) * seed))
@ -430,9 +431,10 @@ class DynamicRnnEstimatorTest(test.TestCase):
labels = array_ops.slice(sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
math_ops.cast(
array_ops.slice(sequence, [0, 1], [batch_size, sequence_length
])), 2)
]),
dtypes.float32), 2)
input_dict = state_dict
input_dict['inputs'] = inputs
return input_dict, labels
@ -587,9 +589,11 @@ class DynamicRNNEstimatorLearningTest(test.TestCase):
labels = array_ops.slice(random_sequence, [0, 0],
[batch_size, sequence_length])
inputs = array_ops.expand_dims(
math_ops.to_float(
math_ops.cast(
array_ops.slice(random_sequence, [0, 1],
[batch_size, sequence_length])), 2)
[batch_size, sequence_length]),
dtypes.float32),
2)
return {'inputs': inputs}, labels
return input_fn
@ -719,11 +723,13 @@ class DynamicRNNEstimatorLearningTest(test.TestCase):
def input_fn():
random_sequence = random_ops.random_uniform(
[batch_size, sequence_length], 0, 2, dtype=dtypes.int32, seed=seed)
inputs = array_ops.expand_dims(math_ops.to_float(random_sequence), 2)
labels = math_ops.to_int32(
inputs = array_ops.expand_dims(
math_ops.cast(random_sequence, dtypes.float32), 2)
labels = math_ops.cast(
array_ops.squeeze(
math_ops.reduce_sum(inputs, axis=[1]) > (
sequence_length / 2.0)))
sequence_length / 2.0)),
dtypes.int32)
return {'inputs': inputs}, labels
return input_fn

View File

@ -220,7 +220,7 @@ def _build_estimator_for_export_tests(tmpdir):
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
math_ops.cast(features['feature'], dtypes.int64))
return input_fn_utils.InputFnOps(features, labels, inputs)

View File

@ -568,7 +568,7 @@ def _mean_squared_loss(labels, logits, weights=None):
logits = array_ops.expand_dims(logits, axis=1)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = math_ops.squared_difference(
logits, math_ops.to_float(labels), name=name)
logits, math_ops.cast(labels, dtypes.float32), name=name)
return _compute_weighted_loss(loss, weights)
@ -793,7 +793,7 @@ def _log_loss_with_two_classes(labels, logits, weights=None):
with ops.name_scope(None, "log_loss_with_two_classes",
(logits, labels)) as name:
logits = ops.convert_to_tensor(logits)
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
# TODO(ptucker): This will break for dynamic shapes.
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
@ -1214,8 +1214,8 @@ def _sparse_labels_to_indicator(labels, num_classes):
if num_classes < 2:
raise ValueError("Must set num_classes >= 2 when passing labels as a "
"SparseTensor.")
return math_ops.to_int64(
sparse_ops.sparse_to_indicator(labels, num_classes))
return math_ops.cast(
sparse_ops.sparse_to_indicator(labels, num_classes), dtypes.int64)
return labels
@ -1400,8 +1400,9 @@ class _MultiLabelHead(_SingleHead):
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.to_int64(
math_ops.cast(
math_ops.greater(logits, 0),
dtypes.int64,
name=prediction_key.PredictionKey.CLASSES)
}
@ -1783,7 +1784,7 @@ def _weight_tensor(features, weight_column_name):
raise ValueError("Weights {} missing from features.".format(
weight_column_name))
with ops.name_scope(None, "weight_tensor", tuple(six.itervalues(features))):
weight_tensor = math_ops.to_float(features[weight_column_name])
weight_tensor = math_ops.cast(features[weight_column_name], dtypes.float32)
shape = weight_tensor.get_shape()
rank = shape.ndims
# We don't bother with expanding dims of non-staticly shaped tensors or
@ -1833,7 +1834,7 @@ def _compute_weighted_loss(loss_unweighted, weight, name="loss"):
weighted_loss_mean = math_ops.reduce_mean(weighted_loss, name=name_scope)
weighted_loss_normalized = math_ops.div(
math_ops.reduce_sum(weighted_loss),
math_ops.to_float(math_ops.reduce_sum(weight)),
math_ops.cast(math_ops.reduce_sum(weight), dtypes.float32),
name="weighted_average_loss")
return weighted_loss_mean, weighted_loss_normalized
@ -1952,7 +1953,7 @@ def _sigmoid_cross_entropy_loss(labels, logits, weights=None):
(logits, labels)) as name:
# sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.
loss = nn.sigmoid_cross_entropy_with_logits(
labels=math_ops.to_float(labels), logits=logits, name=name)
labels=math_ops.cast(labels, dtypes.float32), logits=logits, name=name)
return _compute_weighted_loss(loss, weights)
@ -1960,11 +1961,11 @@ def _float_weights_or_none(weights):
if weights is None:
return None
with ops.name_scope(None, "float_weights", (weights,)) as name:
return math_ops.to_float(weights, name=name)
return math_ops.cast(weights, dtypes.float32, name=name)
def _indicator_labels_streaming_mean(labels, weights=None, class_id=None):
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
@ -1978,7 +1979,7 @@ def _indicator_labels_streaming_mean(labels, weights=None, class_id=None):
def _predictions_streaming_mean(predictions,
weights=None,
class_id=None):
predictions = math_ops.to_float(predictions)
predictions = math_ops.cast(predictions, dtypes.float32)
weights = _float_weights_or_none(weights)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, predictions)
@ -2002,9 +2003,9 @@ def _class_predictions_streaming_mean(predictions, weights, class_id):
return metrics_lib.mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(predictions)),
array_ops.ones_like(predictions),
array_ops.zeros_like(predictions)),
math_ops.cast(class_id, dtypes.int32),
math_ops.cast(predictions, dtypes.int32)),
array_ops.ones_like(predictions), array_ops.zeros_like(predictions)),
weights=weights)
@ -2012,15 +2013,16 @@ def _class_labels_streaming_mean(labels, weights, class_id):
return metrics_lib.mean(
array_ops.where(
math_ops.equal(
math_ops.to_int32(class_id), math_ops.to_int32(labels)),
array_ops.ones_like(labels), array_ops.zeros_like(labels)),
math_ops.cast(class_id, dtypes.int32),
math_ops.cast(labels, dtypes.int32)), array_ops.ones_like(labels),
array_ops.zeros_like(labels)),
weights=weights)
def _streaming_auc(predictions, labels, weights=None, class_id=None,
curve="ROC"):
# pylint: disable=missing-docstring
predictions = math_ops.to_float(predictions)
predictions = math_ops.cast(predictions, dtypes.float32)
if labels.dtype.base_dtype != dtypes.bool:
logging.warning("Casting %s labels to bool.", labels.dtype)
labels = math_ops.cast(labels, dtypes.bool)
@ -2047,8 +2049,8 @@ def _assert_class_id(class_id, num_classes=None):
def _streaming_accuracy_at_threshold(predictions, labels, weights, threshold):
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
threshold_predictions = math_ops.cast(
math_ops.greater_equal(predictions, threshold), dtypes.float32)
return metrics_lib.accuracy(labels, threshold_predictions, weights)

View File

@ -31,6 +31,7 @@ from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
@ -160,8 +161,9 @@ def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
labels=labels_tensor, predictions=predictions)
for threshold in thresholds:
predictions_at_threshold = math_ops.to_float(
predictions_at_threshold = math_ops.cast(
math_ops.greater_equal(predictions, threshold),
dtypes.float32,
name='predictions_at_threshold_%f' % threshold)
metrics[metric_key.MetricKey.ACCURACY_MEAN % threshold] = (
metrics_lib.streaming_accuracy(labels=labels_tensor,

View File

@ -396,8 +396,9 @@ class StateSavingRnnEstimatorTest(test.TestCase):
random_sequence = random_ops.random_uniform(
[sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
labels = array_ops.slice(random_sequence, [0], [sequence_length])
inputs = math_ops.to_float(
array_ops.slice(random_sequence, [1], [sequence_length]))
inputs = math_ops.cast(
array_ops.slice(random_sequence, [1], [sequence_length]),
dtypes.float32)
features = {'inputs': inputs}
if mode == model_fn_lib.ModeKeys.INFER:
@ -450,8 +451,9 @@ class LegacyConstructorTest(test.TestCase):
random_sequence = random_ops.random_uniform(
[sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
labels = array_ops.slice(random_sequence, [0], [sequence_length])
inputs = math_ops.to_float(
array_ops.slice(random_sequence, [1], [sequence_length]))
inputs = math_ops.cast(
array_ops.slice(random_sequence, [1], [sequence_length]),
dtypes.float32)
return {'inputs': inputs}, labels
return input_fn
@ -537,8 +539,9 @@ class StateSavingRNNEstimatorLearningTest(test.TestCase):
random_sequence = random_ops.random_uniform(
[sequence_length + 1], 0, 2, dtype=dtypes.int32, seed=seed)
labels = array_ops.slice(random_sequence, [0], [sequence_length])
inputs = math_ops.to_float(
array_ops.slice(random_sequence, [1], [sequence_length]))
inputs = math_ops.cast(
array_ops.slice(random_sequence, [1], [sequence_length]),
dtypes.float32)
return {'inputs': inputs}, labels
return input_fn

View File

@ -624,7 +624,7 @@ class SdcaModel(object):
# Note that we need double precision to get accurate results.
with ops.control_dependencies(shard_sums):
shard_sums.append(
math_ops.reduce_sum(math_ops.to_double(values), 0))
math_ops.reduce_sum(math_ops.cast(values, dtypes.float64), 0))
summed_values = math_ops.add_n(shard_sums)
primal_loss = summed_values[1]

View File

@ -135,7 +135,7 @@ class SDCAOptimizer(object):
array_ops.reshape(
array_ops.split(
value=sparse_indices, num_or_size_splits=2, axis=1)[1], [-1]),
array_ops.reshape(math_ops.to_float(sparse_values), [-1]))
array_ops.reshape(math_ops.cast(sparse_values, dtypes.float32), [-1]))
def _training_examples_and_variables():
"""Returns dictionaries for training examples and variables."""
@ -254,8 +254,8 @@ class SDCAOptimizer(object):
examples = dict(
sparse_features=sparse_feature_with_values,
dense_features=dense_features,
example_labels=math_ops.to_float(
array_ops.reshape(targets, shape=[-1])),
example_labels=math_ops.cast(
array_ops.reshape(targets, shape=[-1]), dtypes.float32),
example_weights=example_weights,
example_ids=example_ids)
sdca_variables = dict(

View File

@ -22,6 +22,7 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
@ -100,8 +101,8 @@ def compute_weighted_loss(losses, weights=1.0, scope=None):
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
losses = math_ops.cast(losses, dtypes.float32)
weights = math_ops.cast(ops.convert_to_tensor(weights), dtypes.float32)
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
@ -147,8 +148,8 @@ def _num_present(losses, weights, per_batch=False):
batch_size = array_ops.reshape(
array_ops.slice(array_ops.shape(losses), [0], [1]), [])
num_per_batch = math_ops.div(
math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
math_ops.cast(array_ops.size(losses), dtypes.float32),
math_ops.cast(batch_size, dtypes.float32))
num_per_batch = array_ops.where(
math_ops.equal(weights, 0), 0.0, num_per_batch)
num_per_batch = math_ops.multiply(
@ -159,12 +160,14 @@ def _num_present(losses, weights, per_batch=False):
if weights.get_shape().ndims >= 1:
axis = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)), axis=axis)
math_ops.cast(math_ops.not_equal(weights, 0), dtypes.float32),
axis=axis)
# Next, determine the number of elements that weights would broadcast to:
broadcast_dims = array_ops.slice(
array_ops.shape(losses), [weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_to_broadcast = math_ops.cast(math_ops.reduce_prod(broadcast_dims),
dtypes.float32)
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@ -262,8 +265,8 @@ def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@ -438,8 +441,8 @@ def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
losses = -math_ops.multiply(
labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
@ -473,7 +476,7 @@ def hinge_loss(logits, labels=None, scope=None):
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
labels = math_ops.cast(labels, dtypes.float32)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
@ -509,8 +512,8 @@ def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(losses, weights, scope=scope)
@ -563,9 +566,9 @@ def mean_pairwise_squared_error(predictions,
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
weights = math_ops.cast(ops.convert_to_tensor(weights), dtypes.float32)
diffs = math_ops.subtract(predictions, labels)
@ -638,8 +641,8 @@ def cosine_distance(predictions,
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions = math_ops.cast(predictions, dtypes.float32)
labels = math_ops.cast(labels, dtypes.float32)
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(

View File

@ -67,11 +67,13 @@ def pairwise_distance(feature, squared=False):
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
pairwise_distances_squared +
math_ops.cast(error_mask, dtypes.float32) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
pairwise_distances,
math_ops.cast(math_ops.logical_not(error_mask), dtypes.float32))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
@ -111,8 +113,8 @@ def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.to_float(labels) * math_ops.square(distances) +
(1. - math_ops.to_float(labels)) *
math_ops.cast(labels, dtypes.float32) * math_ops.square(distances) +
(1. - math_ops.cast(labels, dtypes.float32)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
@ -284,8 +286,8 @@ def npairs_loss(labels, embeddings_anchor, embeddings_positive,
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped = math_ops.cast(
math_ops.equal(labels, array_ops.transpose(labels)), dtypes.float32)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
@ -318,9 +320,10 @@ def _build_multilabel_adjacency(sparse_labels):
adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
for i in range(num_pairs):
for j in range(num_pairs):
sparse_dot_product = math_ops.to_float(
sparse_dot_product = math_ops.cast(
sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
sparse_labels[i], sparse_labels[j])))
sparse_labels[i], sparse_labels[j])),
dtypes.float32)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
one_hot_matrix = array_ops.pad(sparse_dot_product,
@ -390,7 +393,7 @@ def npairs_loss_multilabel(sparse_labels, embeddings_anchor,
# TODO(coreylynch): are composed only of 0's and 1's.
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
labels_remapped = math_ops.cast(multilabel_adjacency_matrix, dtypes.float32)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)
# Add the softmax loss.
@ -542,7 +545,8 @@ def get_cluster_assignment(pairwise_distances, centroid_ids):
array_ops.constant(0, dtype=dtypes.int64),
axis=0,
dtype=dtypes.int64),
math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
math_ops.cast(math_ops.range(array_ops.shape(centroid_ids)[0]),
dtypes.int64))
constraint_vect = math_ops.reduce_sum(
array_ops.transpose(constraint_one_hot), axis=0)
@ -606,46 +610,51 @@ def compute_clustering_score(labels, predictions, margin_type):
def _compute_nmi_score(labels, predictions):
return math_ops.to_float(
return math_ops.cast(
script_ops.py_func(
metrics.normalized_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='nmi'))
name='nmi'),
dtypes.float32)
def _compute_ami_score(labels, predictions):
ami_score = math_ops.to_float(
ami_score = math_ops.cast(
script_ops.py_func(
metrics.adjusted_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='ami'))
name='ami'),
dtypes.float32)
return math_ops.maximum(0.0, ami_score)
def _compute_ari_score(labels, predictions):
ari_score = math_ops.to_float(
ari_score = math_ops.cast(
script_ops.py_func(
metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
name='ari'))
name='ari'),
dtypes.float32)
# ari score can go below 0
# http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
return math_ops.maximum(0.0, ari_score)
def _compute_vmeasure_score(labels, predictions):
vmeasure_score = math_ops.to_float(
vmeasure_score = math_ops.cast(
script_ops.py_func(
metrics.v_measure_score, [labels, predictions], [dtypes.float64],
name='vmeasure'))
name='vmeasure'),
dtypes.float32)
return math_ops.maximum(0.0, vmeasure_score)
def _compute_zeroone_score(labels, predictions):
zeroone_score = math_ops.to_float(
zeroone_score = math_ops.cast(
math_ops.equal(
math_ops.reduce_sum(
math_ops.to_int32(math_ops.equal(labels, predictions))),
array_ops.shape(labels)[0]))
math_ops.cast(math_ops.equal(labels, predictions), dtypes.int32)),
array_ops.shape(labels)[0]),
dtypes.float32)
return zeroone_score
@ -711,8 +720,8 @@ def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
candidate_scores = math_ops.add(
candidate_scores, margin_multiplier * nmi_scores)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
argmax_index = math_ops.cast(
math_ops.argmax(candidate_scores, axis=0), dtypes.int32)
return candidate_ids[argmax_index]
@ -787,7 +796,7 @@ def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
def func_body(iteration, scores_margin):
# swap the current medoid with the candidate cluster member
candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
candidate_medoid = math_ops.cast(cluster_member_ids[iteration], dtypes.int32)
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
@ -811,10 +820,10 @@ def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
[iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, axis=0))
argmax_index = math_ops.cast(
math_ops.argmax(candidate_scores, axis=0), dtypes.int32)
best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
best_medoid = math_ops.cast(cluster_member_ids[argmax_index], dtypes.int32)
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids
@ -842,7 +851,8 @@ def update_all_medoids(pairwise_distances, predictions, labels, chosen_ids,
def func_body_augmented_pam(iteration, chosen_ids):
"""Call the update_medoid_per_cluster subroutine."""
mask = math_ops.equal(
math_ops.to_int64(predictions), math_ops.to_int64(iteration))
math_ops.cast(predictions, dtypes.int64),
math_ops.cast(iteration, dtypes.int64))
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(

View File

@ -772,7 +772,7 @@ def _streaming_confusion_matrix_at_thresholds(predictions,
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
math_ops.cast(weights, dtypes.float32), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
@ -786,8 +786,8 @@ def _streaming_confusion_matrix_at_thresholds(predictions,
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
is_true_positive = math_ops.cast(
math_ops.logical_and(label_is_pos, pred_is_pos), dtypes.float32)
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
@ -798,8 +798,8 @@ def _streaming_confusion_matrix_at_thresholds(predictions,
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
is_false_negative = math_ops.cast(
math_ops.logical_and(label_is_pos, pred_is_neg), dtypes.float32)
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
@ -810,8 +810,8 @@ def _streaming_confusion_matrix_at_thresholds(predictions,
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
is_true_negative = math_ops.cast(
math_ops.logical_and(label_is_neg, pred_is_neg), dtypes.float32)
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
@ -822,8 +822,8 @@ def _streaming_confusion_matrix_at_thresholds(predictions,
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
is_false_positive = math_ops.cast(
math_ops.logical_and(label_is_neg, pred_is_pos), dtypes.float32)
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
@ -2164,7 +2164,7 @@ def streaming_recall_at_k(predictions,
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
in_top_k = math_ops.cast(nn.in_top_k(predictions, labels, k), dtypes.float32)
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
@ -3205,7 +3205,8 @@ def streaming_covariance(predictions,
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
batch_count = math_ops.cast(
array_ops.size(labels), dtypes.float32) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
@ -3765,15 +3766,15 @@ def count(values,
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
num_values = math_ops.cast(array_ops.size(values), dtypes.float32)
else:
values = math_ops.to_float(values)
values = math_ops.cast(values, dtypes.float32)
values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
math_ops.cast(weights, dtypes.float32), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
@ -3895,10 +3896,11 @@ def cohen_kappa(labels,
total = math_ops.reduce_sum(pe_row)
pe_sum = math_ops.reduce_sum(
math_ops.div_no_nan(
math_ops.to_double(pe_row * pe_col), math_ops.to_double(total)))
po_sum, pe_sum, total = (math_ops.to_double(po_sum),
math_ops.to_double(pe_sum),
math_ops.to_double(total))
math_ops.cast(pe_row * pe_col, dtypes.float64),
math_ops.cast(total, dtypes.float64)))
po_sum, pe_sum, total = (math_ops.cast(po_sum, dtypes.float64),
math_ops.cast(pe_sum, dtypes.float64),
math_ops.cast(total, dtypes.float64))
# kappa = (po - pe) / (N - pe)
k = metrics_impl._safe_scalar_div( # pylint: disable=protected-access
po_sum - pe_sum,

View File

@ -5810,9 +5810,10 @@ class StreamingCovarianceTest(test.TestCase):
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
@ -5823,18 +5824,20 @@ class StreamingCovarianceTest(test.TestCase):
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) +
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@ -5857,8 +5860,8 @@ class StreamingCovarianceTest(test.TestCase):
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
@ -5982,9 +5985,10 @@ class StreamingPearsonRTest(test.TestCase):
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
@ -6003,18 +6007,20 @@ class StreamingPearsonRTest(test.TestCase):
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) +
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@ -6038,8 +6044,8 @@ class StreamingPearsonRTest(test.TestCase):
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)

View File

@ -20,6 +20,7 @@ from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
@ -224,7 +225,7 @@ class GGTOptimizer(optimizer_v2.OptimizerV2):
window = state.get_hyper("window")
grad_buffer = self._get_grad_buffer(state)
next_grad_index = math_ops.floormod(
math_ops.to_int32(update_global_step - 1.), window)
math_ops.cast(update_global_step - 1., dtypes.int32), window)
# grad_buffer[(t-1) % window] := moment1_t
update_grad_buffer = state_ops.scatter_update(grad_buffer, next_grad_index,
update_moment1)

View File

@ -17,6 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
@ -57,7 +58,7 @@ def matrix_square_root(mat_a, mat_a_size, iter_count=100, ridge_epsilon=1e-4):
current_err = math_ops.sqrt(math_ops.reduce_sum(residual * residual)) / norm
return i + 1, current_mat_y, mat_y, current_mat_z, mat_z, current_err, err
identity = linalg_ops.eye(math_ops.to_int32(mat_a_size))
identity = linalg_ops.eye(math_ops.cast(mat_a_size, dtypes.int32))
mat_a = mat_a + ridge_epsilon * identity
norm = math_ops.sqrt(math_ops.reduce_sum(mat_a * mat_a))
mat_init_y = mat_a / norm
@ -100,7 +101,7 @@ def matrix_inverse_pth_root(mat_g,
mat_g^alpha
"""
identity = linalg_ops.eye(math_ops.to_int32(mat_g_size))
identity = linalg_ops.eye(math_ops.cast(mat_g_size, dtypes.int32))
def mat_power(mat_m, p):
"""Computes mat_m^p, for p a positive integer.

View File

@ -24,6 +24,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import matrix_functions
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
@ -120,7 +121,7 @@ class ShampooOptimizer(optimizer.Optimizer):
super(ShampooOptimizer, self).__init__(use_locking, name)
self._global_step = math_ops.to_float(global_step)
self._global_step = math_ops.cast(global_step, dtypes.float32)
self._max_matrix_size = max_matrix_size
self._gbar_decay = gbar_decay
self._gbar_weight = gbar_weight
@ -246,7 +247,8 @@ class ShampooOptimizer(optimizer.Optimizer):
if mat_g_size == 1:
mat_h = math_ops.pow(mat_g + self._epsilon, alpha)
else:
damping = self._epsilon * linalg_ops.eye(math_ops.to_int32(mat_g_size))
damping = self._epsilon * linalg_ops.eye(
math_ops.cast(mat_g_size, dtypes.int32))
diag_d, mat_u, mat_v = linalg_ops.svd(mat_g + damping, full_matrices=True)
mat_h = math_ops.matmul(
mat_v * math_ops.pow(math_ops.maximum(diag_d, self._epsilon), alpha),

View File

@ -23,7 +23,9 @@ from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
@ -51,10 +53,10 @@ def get_linear_decay_fn(decay_steps):
if global_step is None:
raise ValueError("global_step is required for linear_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
remaining_steps = math_ops.to_int32(decay_steps) - math_ops.to_int32(
global_step)
decayed = math_ops.to_float(remaining_steps) / math_ops.to_float(
decay_steps)
remaining_steps = math_ops.cast(
decay_steps, dtypes.int32) - math_ops.cast(global_step, dtypes.int32)
decayed = (math_ops.cast(remaining_steps, dtypes.float32) /
math_ops.cast(decay_steps, dtypes.float32))
return math_ops.maximum(0.0, decayed)
# pylint:enable=missing-docstring
return linear_decay_fn
@ -92,8 +94,8 @@ def get_cosine_decay_fn(decay_steps, num_periods=0.5, zero_after=None):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
completed_fraction = math_ops.to_float(global_step) / math_ops.to_float(
decay_steps)
completed_fraction = (math_ops.cast(global_step, dtypes.float32) /
math_ops.cast(decay_steps, dtypes.float32))
fraction = 2.0 * num_periods * completed_fraction
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
@ -143,14 +145,14 @@ def get_restart_decay_fn(decay_steps, num_periods=1, zero_after=None):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
num = math_ops.mod(num_periods * math_ops.to_float(global_step),
num = math_ops.mod(num_periods * math_ops.cast(global_step, dtypes.float32),
decay_steps)
fraction = num / math_ops.to_float(decay_steps)
fraction = num / math_ops.cast(decay_steps, dtypes.float32)
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
if zero_after is not None:
tmp = math_ops.to_float(
num_periods * global_step) / math_ops.to_float(decay_steps)
tmp = (math_ops.cast(num_periods * global_step, dtypes.float32) /
math_ops.cast(decay_steps, dtypes.float32))
decayed = array_ops.where(
math_ops.greater_equal(tmp, zero_after), 0.0, decayed)
return decayed

View File

@ -100,7 +100,7 @@ def _Update(struct_acc, struct_x, t):
to_skip_update = set()
acc_lst = nest.flatten(struct_acc)
x_lst = nest.flatten(struct_x)
t = math_ops.to_int32([t]) # tf.to_int32 casts on-device tensors.
t = math_ops.cast([t], dtypes.int32) # tf.to_int32 casts on-device tensors.
lst = []
for acc, x in zip(acc_lst, x_lst):
if acc in to_skip_update:
@ -429,7 +429,8 @@ class _Recurrent(object):
acc_extras = _EmptyAcc(slen_dim, extras)
t = slen_dim - max_input_length if self._aligned_end else 0
dev_t = math_ops.to_int32(t) if use_tpu else math_ops.to_int64(t)
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
run = functional_ops.For(
start=t,
limit=slen_dim if self._aligned_end else max_input_length,
@ -568,7 +569,8 @@ class _Recurrent(object):
# Loop backwards. Note the loop's limit is open-ended, so goes through
# t=0.
t = slen_dim - 1 if self._aligned_end else max_input_length - 1
dev_t = math_ops.to_int32(t) if use_tpu else math_ops.to_int64(t)
dev_t = math_ops.cast(t, dtypes.int32) if use_tpu else math_ops.cast(
t, dtypes.int64)
limit = slen_dim - max_input_length - 1 if self._aligned_end else -1
run = functional_ops.For(
start=t,

View File

@ -691,9 +691,10 @@ class LSTMBlockFusedCell(LSTMBlockWrapper):
wci = wcf = wco = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
max_seq_len = math_ops.cast(time_len, dtypes.int64)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
max_seq_len = math_ops.cast(math_ops.reduce_max(sequence_length),
dtypes.int64)
_, cs, _, _, _, _, h = gen_lstm_ops.block_lstm(
seq_len_max=max_seq_len,

View File

@ -149,8 +149,8 @@ def gather_tree_from_array(t, parent_ids, sequence_length):
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
max_sequence_lengths = math_ops.cast(
math_ops.reduce_max(sequence_length, axis=1), dtypes.int32)
sorted_beam_ids = beam_search_ops.gather_tree(
step_ids=beam_ids,
parent_ids=parent_ids,
@ -351,8 +351,8 @@ class BeamSearchDecoderMixin(object):
"""
del sequence_lengths
# Get max_sequence_length across all beams for each batch.
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(final_state.lengths, axis=1))
max_sequence_lengths = math_ops.cast(
math_ops.reduce_max(final_state.lengths, axis=1), dtypes.int32)
predicted_ids = beam_search_ops.gather_tree(
outputs.predicted_ids,
outputs.parent_ids,
@ -985,7 +985,7 @@ def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
on_value=np.int64(0),
off_value=np.int64(1),
dtype=dtypes.int64)
add_mask = math_ops.to_int64(not_finished)
add_mask = math_ops.cast(not_finished, dtypes.int64)
lengths_to_add *= array_ops.expand_dims(add_mask, 2)
new_prediction_lengths = (
lengths_to_add + array_ops.expand_dims(prediction_lengths, 2))
@ -996,7 +996,8 @@ def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
attention_probs = get_attention_probs(
next_cell_state, coverage_penalty_weight)
if attention_probs is not None:
attention_probs *= array_ops.expand_dims(math_ops.to_float(not_finished), 2)
attention_probs *= array_ops.expand_dims(
math_ops.cast(not_finished, dtypes.float32), 2)
accumulated_attention_probs = (
beam_state.accumulated_attention_probs + attention_probs)
@ -1030,15 +1031,17 @@ def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
gather_shape=[-1],
name="next_beam_probs")
# Note: just doing the following
# math_ops.to_int32(word_indices % vocab_size,
# math_ops.cast(
# word_indices % vocab_size,
# dtypes.int32,
# name="next_beam_word_ids")
# would be a lot cleaner but for reasons unclear, that hides the results of
# the op which prevents capturing it with tfdbg debug ops.
raw_next_word_ids = math_ops.mod(
word_indices, vocab_size, name="next_beam_word_ids")
next_word_ids = math_ops.to_int32(raw_next_word_ids)
next_beam_ids = math_ops.to_int32(
word_indices / vocab_size, name="next_beam_parent_ids")
next_word_ids = math_ops.cast(raw_next_word_ids, dtypes.int32)
next_beam_ids = math_ops.cast(
word_indices / vocab_size, dtypes.int32, name="next_beam_parent_ids")
# Append new ids to current predictions
previously_finished = _tensor_gather_helper(
@ -1057,7 +1060,8 @@ def _beam_search_step(time, logits, next_cell_state, beam_state, batch_size,
# 2. Beams that are now finished (EOS predicted) have their length
# increased by 1.
# 3. Beams that are not yet finished have their length increased by 1.
lengths_to_add = math_ops.to_int64(math_ops.logical_not(previously_finished))
lengths_to_add = math_ops.cast(
math_ops.logical_not(previously_finished), dtypes.int64)
next_prediction_len = _tensor_gather_helper(
gather_indices=next_beam_ids,
gather_from=beam_state.lengths,
@ -1204,7 +1208,7 @@ def _get_scores(log_probs, sequence_lengths, length_penalty_weight,
coverage_penalty = math_ops.reduce_sum(
math_ops.log(math_ops.minimum(accumulated_attention_probs, 1.0)), 2)
# Apply coverage penalty to finished predictions.
coverage_penalty *= math_ops.to_float(finished)
coverage_penalty *= math_ops.cast(finished, dtypes.float32)
weighted_coverage_penalty = coverage_penalty * coverage_penalty_weight
# Reshape from [batch_size, beam_width] to [batch_size, beam_width, 1]
weighted_coverage_penalty = array_ops.expand_dims(
@ -1257,8 +1261,9 @@ def _length_penalty(sequence_lengths, penalty_factor):
static_penalty = tensor_util.constant_value(penalty_factor)
if static_penalty is not None and static_penalty == 0:
return 1.0
return math_ops.div((5. + math_ops.to_float(sequence_lengths))
**penalty_factor, (5. + 1.)**penalty_factor)
return math_ops.div(
(5. + math_ops.cast(sequence_lengths, dtypes.float32))**penalty_factor,
(5. + 1.)**penalty_factor)
def _mask_probs(probs, eos_token, finished):

View File

@ -252,8 +252,9 @@ def parallel_read(data_sources,
common_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=dtypes, name='common_queue')
summary.scalar('fraction_of_%d_full' % capacity,
math_ops.to_float(common_queue.size()) * (1. / capacity))
summary.scalar(
'fraction_of_%d_full' % capacity,
math_ops.cast(common_queue.size(), tf_dtypes.float32) * (1. / capacity))
return ParallelReader(
reader_class,

View File

@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
@ -86,6 +87,7 @@ def prefetch_queue(tensors,
enqueue_op = queue.enqueue(tensors)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op] * num_threads))
summary.scalar("fraction_of_%d_full" % capacity,
math_ops.to_float(queue.size()) * (1. / capacity))
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), _dtypes.float32) * (1. / capacity))
return queue

View File

@ -329,7 +329,7 @@ class SparseTensor(ItemHandler):
shape = indices.dense_shape
indices_shape = array_ops.shape(indices.indices)
rank = indices_shape[1]
ids = math_ops.to_int64(indices.values)
ids = math_ops.cast(indices.values, dtypes.int64)
indices_columns_to_preserve = array_ops.slice(
indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
new_indices = array_ops.concat(

View File

@ -54,18 +54,19 @@ def create_test_input(batch_size, height, width, channels):
return array_ops.placeholder(dtypes.float32,
(batch_size, height, width, channels))
else:
return math_ops.to_float(
return math_ops.cast(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) + np.reshape(
np.arange(width), [1, width]), [1, height, width, 1]),
[batch_size, 1, 1, channels]))
[batch_size, 1, 1, channels]), dtypes.float32)
class ResnetUtilsTest(test.TestCase):
def testSubsampleThreeByThree(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(9)), [1, 3, 3, 1])
x = array_ops.reshape(math_ops.cast(math_ops.range(9), dtypes.float32),
[1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])
@ -73,7 +74,8 @@ class ResnetUtilsTest(test.TestCase):
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(16)), [1, 4, 4, 1])
x = array_ops.reshape(math_ops.cast(math_ops.range(16), dtypes.float32),
[1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])
@ -95,19 +97,20 @@ class ResnetUtilsTest(test.TestCase):
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37],
[43, 66, 84, 46], [26, 37, 46, 22]])
y1_expected = math_ops.cast([[14, 28, 43, 26], [28, 48, 66, 37],
[43, 66, 84, 46], [26, 37, 46, 22]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43], [43, 84]])
y2_expected = math_ops.cast([[14, 43], [43, 84]], dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = math_ops.to_float([[48, 37], [37, 22]])
y4_expected = math_ops.cast([[48, 37], [37, 22]], dtypes.float32)
y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])
with self.cached_session() as sess:
@ -132,14 +135,19 @@ class ResnetUtilsTest(test.TestCase):
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = math_ops.cast([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]])
y2_expected = math_ops.cast([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]],
dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')

View File

@ -54,18 +54,20 @@ def create_test_input(batch_size, height, width, channels):
return array_ops.placeholder(dtypes.float32,
(batch_size, height, width, channels))
else:
return math_ops.to_float(
return math_ops.cast(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) + np.reshape(
np.arange(width), [1, width]), [1, height, width, 1]),
[batch_size, 1, 1, channels]))
[batch_size, 1, 1, channels]),
dtypes.float32)
class ResnetUtilsTest(test.TestCase):
def testSubsampleThreeByThree(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(9)), [1, 3, 3, 1])
x = array_ops.reshape(math_ops.cast(math_ops.range(9), dtypes.float32),
[1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])
@ -73,7 +75,8 @@ class ResnetUtilsTest(test.TestCase):
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(16)), [1, 4, 4, 1])
x = array_ops.reshape(math_ops.cast(math_ops.range(16), dtypes.float32),
[1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])
@ -95,19 +98,22 @@ class ResnetUtilsTest(test.TestCase):
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37],
[43, 66, 84, 46], [26, 37, 46, 22]])
y1_expected = math_ops.cast([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43], [43, 84]])
y2_expected = math_ops.cast([[14, 43], [43, 84]], dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = math_ops.to_float([[48, 37], [37, 22]])
y4_expected = math_ops.cast([[48, 37], [37, 22]], dtypes.float32)
y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])
with self.cached_session() as sess:
@ -132,17 +138,19 @@ class ResnetUtilsTest(test.TestCase):
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = math_ops.cast([[14, 28, 43, 58, 34],
[28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]],
dtypes.float32)
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]])
y2_expected = math_ops.cast([[14, 43, 34],
[43, 84, 55],
[34, 55, 30]],
dtypes.float32)
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')

View File

@ -22,6 +22,7 @@ import numpy as np
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
@ -35,7 +36,7 @@ FEATURE_IMPORTANCE_NAME = 'global_feature_importance'
def _top_k_generator(k):
def _top_k(probabilities, targets):
targets = math_ops.to_int32(targets)
targets = math_ops.cast(targets, dtypes.int32)
if targets.get_shape().ndims > 1:
targets = array_ops.squeeze(targets, axis=[1])
return metrics.mean(nn.in_top_k(probabilities, targets, k))
@ -48,7 +49,7 @@ def _accuracy(predictions, targets, weights=None):
def _r2(probabilities, targets, weights=None):
targets = math_ops.to_float(targets)
targets = math_ops.cast(targets, dtypes.float32)
y_mean = math_ops.reduce_mean(targets, 0)
squares_total = math_ops.reduce_sum(
math_ops.squared_difference(targets, y_mean), 0)
@ -60,7 +61,7 @@ def _r2(probabilities, targets, weights=None):
def _squeeze_and_onehot(targets, depth):
targets = array_ops.squeeze(targets, axis=[1])
return array_ops.one_hot(math_ops.to_int32(targets), depth)
return array_ops.one_hot(math_ops.cast(targets, dtypes.int32), depth)
def _sigmoid_entropy(probabilities, targets, weights=None):
@ -75,7 +76,7 @@ def _sigmoid_entropy(probabilities, targets, weights=None):
def _softmax_entropy(probabilities, targets, weights=None):
return metrics.mean(
losses.sparse_softmax_cross_entropy(probabilities,
math_ops.to_int32(targets)),
math_ops.cast(targets, dtypes.int32)),
weights=weights)

View File

@ -22,6 +22,7 @@ import collections
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as framework_variables
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
@ -110,14 +111,15 @@ class HybridModel(object):
"""The loss to minimize while training."""
if self.is_regression:
diff = self.training_inference_graph(data) - math_ops.to_float(labels)
diff = self.training_inference_graph(data) - math_ops.cast(
labels, dtypes.float32)
mean_squared_error = math_ops.reduce_mean(diff * diff)
root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
loss = root_mean_squared_error
else:
loss = math_ops.reduce_mean(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(math_ops.to_int32(labels)),
labels=array_ops.squeeze(math_ops.cast(labels, dtypes.int32)),
logits=self.training_inference_graph(data)),
name="loss")
if self.regularizer:

View File

@ -44,7 +44,7 @@ def CastToFloat(tensor):
if tensor.dtype == dtypes.string:
return tensor_forest_ops.reinterpret_string_to_float(tensor)
elif tensor.dtype.is_integer:
return math_ops.to_float(tensor)
return math_ops.cast(tensor, dtypes.float32)
else:
return tensor
@ -195,7 +195,7 @@ def ParseLabelTensorOrDict(labels):
A 2-D tensor for labels/outputs.
"""
if isinstance(labels, dict):
return math_ops.to_float(
return math_ops.cast(
array_ops.concat(
[
sparse_ops.sparse_tensor_to_dense(
@ -203,10 +203,12 @@ def ParseLabelTensorOrDict(labels):
labels, sparse_tensor.SparseTensor) else labels[k]
for k in sorted(labels.keys())
],
1))
1),
dtypes.float32)
else:
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
labels, default_value=-1))
return math_ops.cast(
sparse_ops.sparse_tensor_to_dense(labels, default_value=-1),
dtypes.float32)
else:
return math_ops.to_float(labels)
return math_ops.cast(labels, dtypes.float32)

View File

@ -30,6 +30,7 @@ from tensorflow.contrib.tensor_forest.python.ops import data_ops
from tensorflow.contrib.tensor_forest.python.ops import model_ops
from tensorflow.contrib.tensor_forest.python.ops import stats_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
@ -540,7 +541,8 @@ class RandomForestGraphs(object):
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
sizes.append(self.trees[i].size())
return math_ops.reduce_mean(math_ops.to_float(array_ops.stack(sizes)))
return math_ops.reduce_mean(
math_ops.cast(array_ops.stack(sizes), dtypes.float32))
# pylint: disable=unused-argument
def training_loss(self, features, labels, name='training_loss'):
@ -603,7 +605,7 @@ class RandomTreeGraphs(object):
The last op in the random tree training graph.
"""
# TODO(gilberth): Use this.
unused_epoch = math_ops.to_int32(get_epoch_variable())
unused_epoch = math_ops.cast(get_epoch_variable(), dtypes.int32)
if input_weights is None:
input_weights = []

View File

@ -308,10 +308,11 @@ def _cross_replica_concat(tensor, core_id, num_cores, name):
'{}.'.format(input_dtype, name))
batch_size = tensor.shape[0]
mask = math_ops.to_float(
math_ops.equal(np.arange(num_cores, dtype=np.int32), core_id))
mask = math_ops.cast(
math_ops.equal(np.arange(num_cores, dtype=np.int32), core_id),
dtypes.float32)
mask = array_ops.reshape(mask, [num_cores] + [1] * tensor.shape.ndims)
result = mask * math_ops.to_float(tensor)
result = mask * math_ops.cast(tensor, dtypes.float32)
local_tensor_with_holes = array_ops.reshape(result,
[-1] + result.shape.as_list()[2:])
concat_tensor = tpu_ops.cross_replica_sum(local_tensor_with_holes)

View File

@ -400,7 +400,7 @@ def bucket_by_sequence_length(input_length,
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
which_bucket = math_ops.cast(which_bucket, dtypes.int32)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes

View File

@ -1597,7 +1597,7 @@ def _padding(sequences, num_unroll):
else: # Only have SparseTensors
sparse_lengths = [value.dense_shape[0] for value in sequences_dict.values()
if isinstance(value, sparse_tensor.SparseTensor)]
length = math_ops.reduce_max(math_ops.to_int32(sparse_lengths))
length = math_ops.reduce_max(math_ops.cast(sparse_lengths, dtypes.int32))
unroll = array_ops.constant(num_unroll)
padded_length = length + ((unroll - (length % unroll)) % unroll)
@ -1620,8 +1620,9 @@ def _padding(sequences, num_unroll):
# 3. concat values with paddings
padded_sequences[key] = array_ops.concat([value, paddings], 0)
else:
padded_shape = array_ops.concat([[math_ops.to_int64(padded_length)],
value.dense_shape[1:]], 0)
padded_shape = array_ops.concat(
[[math_ops.cast(padded_length, dtypes.int64)], value.dense_shape[1:]],
0)
padded_sequences[key] = sparse_tensor.SparseTensor(
indices=value.indices,
values=value.values,
@ -1834,8 +1835,8 @@ def _reconstruct_sparse_tensor_seq(sequence,
Returns:
A SparseTensor with a +1 higher rank than the input.
"""
idx_batch = math_ops.to_int64(
math_ops.floor(sp_tensor.indices[:, 0] / num_unroll))
idx_batch = math_ops.cast(
math_ops.floor(sp_tensor.indices[:, 0] / num_unroll), dtypes.int64)
idx_time = math_ops.mod(sp_tensor.indices[:, 0], num_unroll)
indices = array_ops.concat(
[

View File

@ -22,6 +22,7 @@ from __future__ import print_function
import tensorflow.lite.python.op_hint as op_hint
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
@ -215,7 +216,7 @@ def dynamic_rnn(cell,
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = math_ops.cast(sequence_length, dtypes.int32)
if sequence_length.get_shape().rank not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "

View File

@ -44,7 +44,7 @@ class RejectionResampleTest(test_base.DatasetTestBase, parameterized.TestCase):
classes = np.random.randint(5, size=(20000,)) # Uniformly sampled
target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
initial_dist = [0.2] * 5 if initial_known else None
classes = math_ops.to_int64(classes) # needed for Windows build.
classes = math_ops.cast(classes, dtypes.int64) # needed for Windows build.
dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()

View File

@ -52,7 +52,7 @@ class FlatMapDatasetSerializationTest(
def flat_map_fn(_):
def map_fn(y):
return 10 * math_ops.to_int32(y)
return 10 * math_ops.cast(y, dtypes.int32)
return dataset_ops.Dataset.range(100).map(map_fn)
@ -68,7 +68,7 @@ class FlatMapDatasetSerializationTest(
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.from_tensor_slices([defun_fn(x)])
@ -94,7 +94,7 @@ class FlatMapDatasetSerializationTest(
def map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.to_int32(x)
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(map_fn)

View File

@ -64,7 +64,7 @@ class MapDatasetSerializationTest(
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.to_int32(x)
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(_map_fn)
@ -96,7 +96,7 @@ class MapDatasetSerializationTest(
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(num_outputs).map(defun_fn)
@ -112,9 +112,10 @@ class MapDatasetSerializationTest(
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return constant_op.constant(11000) + defun_fn_deep(math_ops.to_int32(x))
return constant_op.constant(11000) + defun_fn_deep(
math_ops.cast(x, dtypes.int32))
return dataset_ops.Dataset.range(num_outputs).map(defun_fn)

View File

@ -74,7 +74,7 @@ class ParallelMapDatasetSerializationTest(
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.to_int32(x)
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(
_map_fn, num_parallel_calls=2).prefetch(2)
@ -108,7 +108,7 @@ class ParallelMapDatasetSerializationTest(
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=2).prefetch(2)
@ -125,9 +125,10 @@ class ParallelMapDatasetSerializationTest(
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.to_int32(x)
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return constant_op.constant(11000) + defun_fn_deep(math_ops.to_int32(x))
return constant_op.constant(11000) + defun_fn_deep(
math_ops.cast(x, dtypes.int32))
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=2).prefetch(2)

View File

@ -117,7 +117,7 @@ class BatchTest(test_base.DatasetTestBase, parameterized.TestCase):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.to_int32(i)], i),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5)

View File

@ -147,7 +147,7 @@ class WindowTest(test_base.DatasetTestBase, parameterized.TestCase):
return sparse_tensor.SparseTensorValue(
indices=array_ops.expand_dims(
math_ops.range(i, dtype=dtypes.int64), 1),
values=array_ops.fill([math_ops.to_int32(i)], i),
values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i),
dense_shape=[i])
dataset = dataset_ops.Dataset.range(10).map(_sparse).window(

View File

@ -2292,7 +2292,7 @@ class _NumericColumn(_DenseColumn,
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.to_float(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@property
def _variable_shape(self):
@ -2902,7 +2902,7 @@ class _WeightedCategoricalColumn(
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.to_float(weight_tensor)
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return (inputs.get(self.categorical_column), weight_tensor)
def _get_sparse_tensors(

View File

@ -572,7 +572,7 @@ class HierarchicalController(Controller):
logits = array_ops.reshape(logits,
[batch_size * self.num_ops, self.num_groups])
actions = random_ops.multinomial(logits, 1, seed=self.hparams.seed)
actions = math_ops.to_int32(actions)
actions = math_ops.cast(actions, dtypes.int32)
actions = array_ops.reshape(actions, [batch_size, self.num_ops])
action_label = array_ops.reshape(actions, [-1])
log_probs = nn_ops.sparse_softmax_cross_entropy_with_logits(
@ -924,7 +924,7 @@ class HierarchicalController(Controller):
next_y = array_ops.slice(y, [0, i], [-1, 1])
else:
raise NotImplementedError
next_y = math_ops.to_int32(next_y)
next_y = math_ops.cast(next_y, dtypes.int32)
next_y = array_ops.reshape(next_y, [self.hparams.num_children])
actions = actions.write(i, next_y)
log_probs += nn_ops.sparse_softmax_cross_entropy_with_logits(

View File

@ -5133,7 +5133,8 @@ def ctc_label_dense_to_sparse(labels, label_lengths):
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape))
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
@ -5154,10 +5155,12 @@ def ctc_batch_cost(y_true, y_pred, input_length, label_length):
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.to_int32(array_ops.squeeze(label_length, axis=-1))
input_length = math_ops.to_int32(array_ops.squeeze(input_length, axis=-1))
sparse_labels = math_ops.to_int32(
ctc_label_dense_to_sparse(y_true, label_length))
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
@ -5196,7 +5199,7 @@ def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.to_int32(input_length)
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(

View File

@ -460,7 +460,7 @@ class ClipTest(test.TestCase):
clip_norm = constant_op.constant(0.8)
with_norm = clip_ops.clip_by_average_norm(x, clip_norm)
without_norm = clip_ops.clip_by_norm(
x, clip_norm * math_ops.to_float(array_ops.size(x)))
x, clip_norm * math_ops.cast(array_ops.size(x), dtypes.float32))
clip_by_average_norm_ans = self.evaluate(with_norm)
clip_by_norm_ans = self.evaluate(without_norm)
self.assertAllClose(clip_by_average_norm_ans, clip_by_norm_ans)

View File

@ -80,7 +80,7 @@ class DrawBoundingBoxOpTest(test.TestCase):
test_drawn_image = self._fillBorder(image, color)
bboxes = np.asarray([0, 0, 1, 1])
bboxes = np.vstack([bboxes for _ in range(num_boxes)])
bboxes = math_ops.to_float(bboxes)
bboxes = math_ops.cast(bboxes, dtypes.float32)
bboxes = array_ops.expand_dims(bboxes, 0)
image = ops.convert_to_tensor(image)
image = image_ops_impl.convert_image_dtype(image, dtypes.float32)

View File

@ -754,7 +754,7 @@ class FunctionalOpsTest(test.TestCase):
def TestCondCapture(n, *args):
del args
return math_ops.to_float(n) + v < 10
return math_ops.cast(n, dtypes.float32) + v < 10
with self.assertRaises(ValueError):
_ = functional_ops.While(
@ -770,7 +770,7 @@ class FunctionalOpsTest(test.TestCase):
@function.Defun(dtypes.int32, dtypes.float32)
def Body(n, x):
return x + math_ops.to_float(n)
return x + math_ops.cast(n, dtypes.float32)
xs = [
# 1 + 2 + ... + 20
@ -799,7 +799,7 @@ class FunctionalOpsTest(test.TestCase):
@function.Defun(dtypes.int32, dtypes.float32, func_name="TestBody")
def TestBody(n, x):
return x + math_ops.to_float(n)
return x + math_ops.cast(n, dtypes.float32)
_ = functional_ops.For(
1, 21, 1, [0.], TestBody, rewrite_with_while=True)[0]
@ -817,15 +817,15 @@ class FunctionalOpsTest(test.TestCase):
@function.Defun(dtypes.int32)
def TestNullary(n):
v + math_ops.to_float(n) # pylint: disable=expression-not-assigned
v + math_ops.cast(n, dtypes.float32) # pylint: disable=expression-not-assigned
@function.Defun(dtypes.int32, dtypes.float32)
def TestUnary(n, x):
return x + math_ops.to_float(n) + v
return x + math_ops.cast(n, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32, dtypes.float32)
def TestBinary(n, x, x2):
return x + math_ops.to_float(n) + v, x2 + v
return x + math_ops.cast(n, dtypes.float32) + v, x2 + v
for rewrite_with_while in (True, False):
use_gpu = not rewrite_with_while
@ -899,7 +899,7 @@ class FunctionalOpsTest(test.TestCase):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(i, v):
return math_ops.to_float(i) + v
return math_ops.cast(i, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32)
def ReturnsTooManyArgs(unused_i, v):

View File

@ -59,7 +59,7 @@ class GradientCorrectnessTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradientWithIntegerPath(self):
x = constant_op.constant([3.9, 4.1])
k = math_ops.to_float(math_ops.to_int32(x))
k = math_ops.cast(math_ops.cast(x, dtypes.int32), dtypes.float32)
y = x * k
dy_dx, = gradients_impl.gradients(y, x)
with self.cached_session() as sess:
@ -68,7 +68,7 @@ class GradientCorrectnessTest(test.TestCase):
@test_util.run_deprecated_v1
def testNoIntegerGradient1(self):
x = constant_op.constant([3.9, 4.1])
k = math_ops.to_float(math_ops.to_int32(x))
k = math_ops.cast(math_ops.cast(x, dtypes.int32), dtypes.float32)
y = k * k
dy_dx, = gradients_impl.gradients(y, x)
self.assertIsNone(dy_dx)
@ -76,7 +76,7 @@ class GradientCorrectnessTest(test.TestCase):
@test_util.run_deprecated_v1
def testNoIntegerGradient2(self):
k = constant_op.constant([3, 4])
x = math_ops.to_float(k)
x = math_ops.cast(k, dtypes.float32)
y = x * x
dy_dk, = gradients_impl.gradients(y, k)
self.assertIsNone(dy_dk)
@ -106,7 +106,7 @@ class GradientCorrectnessTest(test.TestCase):
@test_util.run_deprecated_v1
def testNoIntegerGradient6(self):
k = constant_op.constant(3)
x = math_ops.to_float(k)
x = math_ops.cast(k, dtypes.float32)
grad_1, = gradients_impl.gradients(k * k, k)
grad_2, = gradients_impl.gradients(x * x, k)
grad_3, = gradients_impl.gradients(math_ops.square(k), k)

View File

@ -232,7 +232,8 @@ class ReconstructionOpsTest(test.TestCase):
# overlap, the gradient for this batch item will be 0-99 shaped as (10,
# 10).
reconstruction *= array_ops.stack(
[array_ops.zeros((100,)), math_ops.to_float(math_ops.range(100))])
[array_ops.zeros((100,)),
math_ops.cast(math_ops.range(100), dtypes.float32)])
loss = math_ops.reduce_sum(reconstruction)
# Verify that only the second batch item receives gradient.

View File

@ -104,11 +104,15 @@ class SpaceToBatchTest(test.TestCase, PythonOpImpl):
with self.cached_session(use_gpu=True):
# outputs = space_to_batch(inputs)
x_tf = self.space_to_batch(
math_ops.to_float(inputs), paddings, block_size=block_size)
math_ops.cast(inputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = self.batch_to_space(
math_ops.to_float(outputs), paddings, block_size=block_size)
math_ops.cast(outputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), inputs)
def _testOne(self, inputs, block_size, outputs):
@ -200,11 +204,11 @@ class SpaceToBatchNDTest(test.TestCase):
with self.cached_session(use_gpu=use_gpu):
# outputs = space_to_batch(inputs)
x_tf = array_ops.space_to_batch_nd(
math_ops.to_float(inputs), block_shape, paddings)
math_ops.cast(inputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), outputs)
# inputs = batch_to_space(outputs)
x_tf = array_ops.batch_to_space_nd(
math_ops.to_float(outputs), block_shape, paddings)
math_ops.cast(outputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), inputs)
def _testDirect(self, input_shape, block_shape, paddings):

View File

@ -113,8 +113,8 @@ class SplitOpTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.to_int32(5)
b = math_ops.to_int32(6)
a = math_ops.cast(5, dtypes.int32)
b = math_ops.cast(6, dtypes.int32)
value = np.random.rand(11, 11)

View File

@ -21,6 +21,7 @@ from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
@ -399,7 +400,7 @@ def _GatherGrad(op, grad):
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
params_shape = math_ops.cast(params_shape, dtypes.int32)
# Build appropriately shaped IndexedSlices
indices = op.inputs[1]
@ -422,7 +423,7 @@ def _GatherV2Grad(op, grad):
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
params_shape = math_ops.cast(params_shape, dtypes.int32)
indices = op.inputs[1]
indices_size = array_ops.expand_dims(array_ops.size(indices), 0)

View File

@ -613,7 +613,7 @@ class _InitializeClustersOpFactory(object):
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
return gen_clustering_ops.kmeans_plus_plus_initialization(
inp, math_ops.to_int64(self._num_remaining), self._seed,
inp, math_ops.cast(self._num_remaining, dtypes.int64), self._seed,
self._kmeans_plus_plus_num_retries)
def _kmc2_multiple_centers(self):

View File

@ -191,8 +191,10 @@ def confusion_matrix(labels,
values = (array_ops.ones_like(predictions, dtype)
if weights is None else weights)
cm_sparse = sparse_tensor.SparseTensor(
indices=indices, values=values, dense_shape=math_ops.to_int64(shape))
zero_matrix = array_ops.zeros(math_ops.to_int32(shape), dtype)
indices=indices,
values=values,
dense_shape=math_ops.cast(shape, dtypes.int64))
zero_matrix = array_ops.zeros(math_ops.cast(shape, dtypes.int32), dtype)
return sparse_ops.sparse_add(zero_matrix, cm_sparse)

View File

@ -1233,7 +1233,7 @@ class ConditionalAccumulatorBase(object):
"""
return gen_data_flow_ops.accumulator_set_global_step(
self._accumulator_ref,
math_ops.to_int64(ops.convert_to_tensor(new_global_step)),
math_ops.cast(ops.convert_to_tensor(new_global_step), _dtypes.int64),
name=name)
@ -1291,7 +1291,7 @@ class ConditionalAccumulator(ConditionalAccumulatorBase):
"""
grad = ops.convert_to_tensor(grad, self._dtype)
grad.get_shape().assert_is_compatible_with(self._shape)
local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64)
return gen_data_flow_ops.accumulator_apply_gradient(
self._accumulator_ref, local_step=local_step, gradient=grad, name=name)
@ -1423,14 +1423,14 @@ class SparseConditionalAccumulator(ConditionalAccumulatorBase):
Raises:
InvalidArgumentError: If grad is of the wrong shape
"""
local_step = math_ops.to_int64(ops.convert_to_tensor(local_step))
local_step = math_ops.cast(ops.convert_to_tensor(local_step), _dtypes.int64)
return gen_data_flow_ops.sparse_accumulator_apply_gradient(
self._accumulator_ref,
local_step=local_step,
gradient_indices=math_ops.to_int64(grad_indices),
gradient_indices=math_ops.cast(grad_indices, _dtypes.int64),
gradient_values=grad_values,
gradient_shape=math_ops.to_int64([]
if grad_shape is None else grad_shape),
gradient_shape=math_ops.cast(
[] if grad_shape is None else grad_shape, _dtypes.int64),
has_known_shape=(grad_shape is not None),
name=name)

View File

@ -485,7 +485,7 @@ def embedding_lookup_sparse(params,
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy, max_norm=max_norm)
if embeddings.dtype in (dtypes.float16, dtypes.bfloat16):
embeddings = math_ops.to_float(embeddings)
embeddings = math_ops.cast(embeddings, dtypes.float32)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:

View File

@ -615,15 +615,17 @@ def central_crop(image, central_fraction):
# bounding boxes depend on the `image` tensor's rank and whether / not the
# dimensions are statically defined.
if dynamic_h:
img_hd = math_ops.to_double(img_h)
bbox_h_start = math_ops.to_int32((img_hd - img_hd * central_fraction) / 2)
img_hd = math_ops.cast(img_h, dtypes.float64)
bbox_h_start = math_ops.cast(
(img_hd - img_hd * central_fraction) / 2, dtypes.int32)
else:
img_hd = float(img_h)
bbox_h_start = int((img_hd - img_hd * central_fraction) / 2)
if dynamic_w:
img_wd = math_ops.to_double(img_w)
bbox_w_start = math_ops.to_int32((img_wd - img_wd * central_fraction) / 2)
img_wd = math_ops.cast(img_w, dtypes.float64)
bbox_w_start = math_ops.cast(
(img_wd - img_wd * central_fraction) / 2, dtypes.int32)
else:
img_wd = float(img_w)
bbox_w_start = int((img_wd - img_wd * central_fraction) / 2)
@ -990,15 +992,21 @@ def _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name,
_, current_height, current_width, _ = _ImageDimensions(images, rank=4)
# do the computation to find the right scale and height/width.
scale_factor_height = (math_ops.to_float(new_height_const) /
math_ops.to_float(current_height))
scale_factor_width = (math_ops.to_float(new_width_const) /
math_ops.to_float(current_width))
scale_factor_height = (
math_ops.cast(new_height_const, dtypes.float32) /
math_ops.cast(current_height, dtypes.float32))
scale_factor_width = (
math_ops.cast(new_width_const, dtypes.float32) /
math_ops.cast(current_width, dtypes.float32))
scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width)
scaled_height_const = math_ops.to_int32(
math_ops.round(scale_factor * math_ops.to_float(current_height)))
scaled_width_const = math_ops.to_int32(
math_ops.round(scale_factor * math_ops.to_float(current_width)))
scaled_height_const = math_ops.cast(
math_ops.round(
scale_factor * math_ops.cast(current_height, dtypes.float32)),
dtypes.int32)
scaled_width_const = math_ops.cast(
math_ops.round(
scale_factor * math_ops.cast(current_width, dtypes.float32)),
dtypes.int32)
# NOTE: Reset the size and other constants used later.
size = ops.convert_to_tensor([scaled_height_const, scaled_width_const],

View File

@ -19,6 +19,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
@ -55,7 +56,7 @@ def _inplace_helper(x, i, v, op):
return array_ops.reshape(
op(array_ops.reshape(x, [1, -1]), [0], array_ops.reshape(v, [1, -1])),
array_ops.shape(x))
i = math_ops.to_int32(i)
i = math_ops.cast(i, dtypes.int32)
if i.get_shape().ndims == 0:
# Single 0-dim update.
return op(x, array_ops.reshape(i, [1]), array_ops.expand_dims(v, 0))

View File

@ -939,7 +939,7 @@ class IdTableWithHashBuckets(LookupInterface):
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
values = math_ops.cast(values, dtypes.int64)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
@ -1118,7 +1118,7 @@ class StaticVocabularyTable(LookupInterface):
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
values = math_ops.cast(values, dtypes.int64)
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name):
@ -1344,10 +1344,11 @@ def index_table_from_tensor(vocabulary_list,
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
values = math_ops.cast(math_ops.range(num_elements), dtypes.int64)
with ops.name_scope(None, "hash_table"):
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
table_keys = math_ops.cast(
keys, dtypes.int64) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys,
values,
@ -1507,7 +1508,7 @@ def index_to_string_table_from_tensor(vocabulary_list,
with ops.name_scope(name, "index_to_string"):
vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)
num_elements = array_ops.size(vocabulary_list)
keys = math_ops.to_int64(math_ops.range(num_elements))
keys = math_ops.cast(math_ops.range(num_elements), dtypes.int64)
init = KeyValueTensorInitializer(
keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init")

View File

@ -256,9 +256,9 @@ def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
[num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
predictions = math_ops.cast(predictions, dtypes.int64)
labels = math_ops.cast(labels, dtypes.int64)
num_classes = math_ops.cast(num_classes, dtypes.int64)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
@ -360,18 +360,18 @@ def mean(values,
'is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
values = math_ops.cast(values, dtypes.float32)
total = metric_variable([], dtypes.float32, name='total')
count = metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
num_values = math_ops.cast(array_ops.size(values), dtypes.float32)
else:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
math_ops.cast(weights, dtypes.float32), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
@ -452,7 +452,8 @@ def accuracy(labels,
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
return mean(is_correct, weights, metrics_collections, updates_collections,
name or 'accuracy')
@ -523,7 +524,7 @@ def _confusion_matrix_at_thresholds(labels,
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.to_float(predictions),
predictions=math_ops.cast(predictions, dtypes.float32),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
@ -558,7 +559,7 @@ def _confusion_matrix_at_thresholds(labels,
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
math_ops.cast(weights, dtypes.float32), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
@ -572,8 +573,8 @@ def _confusion_matrix_at_thresholds(labels,
if 'tp' in includes:
true_p = metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
is_true_positive = math_ops.cast(
math_ops.logical_and(label_is_pos, pred_is_pos), dtypes.float32)
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_p,
@ -584,8 +585,8 @@ def _confusion_matrix_at_thresholds(labels,
if 'fn' in includes:
false_n = metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
is_false_negative = math_ops.cast(
math_ops.logical_and(label_is_pos, pred_is_neg), dtypes.float32)
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_n,
@ -596,8 +597,8 @@ def _confusion_matrix_at_thresholds(labels,
if 'tn' in includes:
true_n = metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
is_true_negative = math_ops.cast(
math_ops.logical_and(label_is_neg, pred_is_neg), dtypes.float32)
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_n,
@ -608,8 +609,8 @@ def _confusion_matrix_at_thresholds(labels,
if 'fp' in includes:
false_p = metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
is_false_positive = math_ops.cast(
math_ops.logical_and(label_is_neg, pred_is_pos), dtypes.float32)
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_p,
@ -1019,7 +1020,7 @@ def mean_per_class_accuracy(labels,
with variable_scope.variable_scope(name, 'mean_accuracy',
(predictions, labels, weights)):
labels = math_ops.to_int64(labels)
labels = math_ops.cast(labels, dtypes.int64)
# Flatten the input if its rank > 1.
if labels.get_shape().ndims > 1:
@ -1038,12 +1039,13 @@ def mean_per_class_accuracy(labels,
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
if weights.get_shape().ndims > 1:
weights = array_ops.reshape(weights, [-1])
weights = math_ops.to_float(weights)
weights = math_ops.cast(weights, dtypes.float32)
is_correct *= weights
ones *= weights
@ -1135,9 +1137,11 @@ def mean_iou(labels,
def compute_mean_iou(_, total_cm):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
sum_over_row = math_ops.cast(
math_ops.reduce_sum(total_cm, 0), dtypes.float32)
sum_over_col = math_ops.cast(
math_ops.reduce_sum(total_cm, 1), dtypes.float32)
cm_diag = math_ops.cast(array_ops.diag_part(total_cm), dtypes.float32)
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
@ -1352,7 +1356,7 @@ def mean_tensor(values,
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
values = math_ops.cast(values, dtypes.float32)
total = metric_variable(
values.get_shape(), dtypes.float32, name='total_tensor')
count = metric_variable(
@ -1363,7 +1367,7 @@ def mean_tensor(values,
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
math_ops.cast(weights, dtypes.float32), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
@ -1434,7 +1438,8 @@ def percentage_below(values,
raise RuntimeError('tf.metrics.percentage_below is not supported when '
'eager execution is enabled.')
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
is_below_threshold = math_ops.cast(
math_ops.less(values, threshold), dtypes.float32)
return mean(is_below_threshold, weights, metrics_collections,
updates_collections, name or 'percentage_below_threshold')
@ -1469,11 +1474,11 @@ def _count_condition(values,
check_ops.assert_type(values, dtypes.bool)
count = metric_variable([], dtypes.float32, name='count')
values = math_ops.to_float(values)
values = math_ops.cast(values, dtypes.float32)
if weights is not None:
with ops.control_dependencies((check_ops.assert_rank_in(
weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
weights = math_ops.cast(weights, dtypes.float32)
values = math_ops.multiply(values, weights)
value_tensor = _aggregate_variable(count, metrics_collections)
@ -2227,7 +2232,7 @@ def _select_class_id(ids, selected_id):
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(filled_selected_id_shape,
math_ops.to_int64(selected_id))
math_ops.cast(selected_id, dtypes.int64))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
@ -2292,11 +2297,11 @@ def _sparse_true_positive_at_k(labels,
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
tp = math_ops.cast(tp, dtypes.float64)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, tp),)):
weights = math_ops.to_double(weights)
weights = math_ops.cast(weights, dtypes.float64)
tp = math_ops.multiply(tp, weights)
return tp
@ -2346,7 +2351,7 @@ def _streaming_sparse_true_positive_at_k(labels,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
batch_total_tp = math_ops.cast(math_ops.reduce_sum(tp), dtypes.float64)
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
@ -2387,11 +2392,11 @@ def _sparse_false_negative_at_k(labels,
class_id)
fn = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=False))
fn = math_ops.to_double(fn)
fn = math_ops.cast(fn, dtypes.float64)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fn),)):
weights = math_ops.to_double(weights)
weights = math_ops.cast(weights, dtypes.float64)
fn = math_ops.multiply(fn, weights)
return fn
@ -2441,7 +2446,7 @@ def _streaming_sparse_false_negative_at_k(labels,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
batch_total_fn = math_ops.cast(math_ops.reduce_sum(fn), dtypes.float64)
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
@ -2597,7 +2602,7 @@ def recall_at_top_k(labels,
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
top_k_idx = math_ops.cast(predictions_idx, dtypes.int64)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
@ -2999,7 +3004,8 @@ def _sparse_average_precision_at_top_k(labels, predictions_idx):
"""
with ops.name_scope(None, 'average_precision',
(predictions_idx, labels)) as scope:
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
predictions_idx = math_ops.cast(
predictions_idx, dtypes.int64, name='predictions_idx')
if predictions_idx.get_shape().ndims == 0:
raise ValueError('The rank of predictions_idx must be at least 1.')
k = predictions_idx.get_shape().as_list()[-1]
@ -3035,12 +3041,12 @@ def _sparse_average_precision_at_top_k(labels, predictions_idx):
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k),
math_ops.to_double(retrieved_per_k),
math_ops.cast(tp_per_k, dtypes.float64),
math_ops.cast(retrieved_per_k, dtypes.float64),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k,
math_ops.to_double(relevant_per_k),
math_ops.cast(relevant_per_k, dtypes.float64),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
@ -3049,7 +3055,7 @@ def _sparse_average_precision_at_top_k(labels, predictions_idx):
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
num_relevant_items = math_ops.cast(_num_relevant(labels, k), dtypes.float64)
return math_ops.div(precision_sum, num_relevant_items, name=scope)
@ -3110,7 +3116,7 @@ def _streaming_sparse_average_precision_at_top_k(labels,
predictions_idx=predictions_idx, labels=labels)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_double(weights), average_precision)
math_ops.cast(weights, dtypes.float64), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
@ -3122,8 +3128,8 @@ def _streaming_sparse_average_precision_at_top_k(labels,
# `average_precision` rows.
max_var = metric_variable([], dtypes.float64, name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
batch_max = math_ops.cast(
array_ops.size(average_precision, name='batch_max'), dtypes.float64)
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
@ -3280,11 +3286,11 @@ def _sparse_false_positive_at_k(labels,
class_id)
fp = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
fp = math_ops.cast(fp, dtypes.float64)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fp),)):
weights = math_ops.to_double(weights)
weights = math_ops.cast(weights, dtypes.float64)
fp = math_ops.multiply(fp, weights)
return fp
@ -3334,7 +3340,7 @@ def _streaming_sparse_false_positive_at_k(labels,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
batch_total_fp = math_ops.cast(math_ops.reduce_sum(fp), dtypes.float64)
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
@ -3402,7 +3408,7 @@ def precision_at_top_k(labels,
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
top_k_idx = math_ops.cast(predictions_idx, dtypes.int64)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
@ -3642,7 +3648,7 @@ def specificity_at_sensitivity(labels,
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cast(indices_at_minval, dtypes.int64)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)

View File

@ -2755,7 +2755,7 @@ def leaky_relu(features, alpha=0.2, name=None):
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
features = math_ops.cast(features, dtypes.float32)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = alpha.item()

View File

@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
@ -103,7 +104,8 @@ def batch_gather(params, indices, name=None):
num_indices = indices.row_lengths()
params_starts = params.row_starts()
adjustments = ragged_util.repeat(params_starts, num_indices, axis=0)
adjusted_index_values = math_ops.to_int64(indices.values) + adjustments
adjusted_index_values = math_ops.cast(
indices.values, dtypes.int64) + adjustments
return ragged_tensor.RaggedTensor.from_row_splits(
ragged_gather_ops.gather(params.values, adjusted_index_values),
indices.row_splits)
@ -114,7 +116,7 @@ def batch_gather(params, indices, name=None):
elif indices_ndims == 2:
# Adjust indices from batch-local to global (in params.values)
adjustments = array_ops.expand_dims(params.row_starts(), 1)
adjusted_indices = math_ops.to_int64(indices) + adjustments
adjusted_indices = math_ops.cast(indices, dtypes.int64) + adjustments
return ragged_gather_ops.gather(params.values, adjusted_indices)
else:
raise ValueError('batch shape from indices does not match params shape')

View File

@ -232,7 +232,7 @@ def gather_nd(params, indices, name=None):
# index tuples point to the correct values in the flattened params; and
# then use ragged.gather on the flattened index tuples & params.
else:
indices = math_ops.to_int64(indices)
indices = math_ops.cast(indices, dtypes.int64)
# Flatten the outermost 2 dimensions of the index tuples & params.
flattened_index_tuples = array_ops.gather(params.row_splits,

View File

@ -623,7 +623,7 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = math_ops.cast(sequence_length, dtypes.int32)
if sequence_length.get_shape().rank not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
@ -1367,7 +1367,7 @@ def static_rnn(cell,
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
sequence_length = math_ops.cast(sequence_length, dtypes.int32)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)

View File

@ -94,7 +94,7 @@ def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disabl
axis_dim = (tensor_shape.dimension_value(input.shape[-1])
or _array_ops.shape(input)[-1])
axis_dim_float = _math_ops.to_float(axis_dim)
axis_dim_float = _math_ops.cast(axis_dim, _dtypes.float32)
if type == 1:
dct1_input = _array_ops.concat([input, input[..., -2:0:-1]], axis=-1)

View File

@ -286,7 +286,8 @@ def _rfft_grad_helper(rank, irfft_fn):
# The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
# factor, plus some additional terms to make up for the components dropped
# due to Hermitian symmetry.
input_size = _math_ops.to_float(_fft_size_for_grad(op.inputs[0], rank))
input_size = _math_ops.cast(
_fft_size_for_grad(op.inputs[0], rank), _dtypes.float32)
the_irfft = irfft_fn(grad, fft_length)
return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None
@ -311,8 +312,8 @@ def _irfft_grad_helper(rank, rfft_fn):
[[1.0], 2.0 * _array_ops.ones([input_last_dimension - 2 + is_odd]),
_array_ops.ones([1 - is_odd])], 0)
rsize = _math_ops.reciprocal(_math_ops.to_float(
_fft_size_for_grad(grad, rank)))
rsize = _math_ops.reciprocal(_math_ops.cast(
_fft_size_for_grad(grad, rank), _dtypes.float32))
# The gradient of IRFFT is the RFFT of the incoming gradient times a scaling
# factor and a mask. The mask scales the gradient for the Hermitian

View File

@ -107,4 +107,5 @@ def mfccs_from_log_mel_spectrograms(log_mel_spectrograms, name=None):
num_mel_bins = array_ops.shape(log_mel_spectrograms)[-1]
dct2 = dct_ops.dct(log_mel_spectrograms, type=2)
return dct2 * math_ops.rsqrt(math_ops.to_float(num_mel_bins) * 2.0)
return dct2 * math_ops.rsqrt(
math_ops.cast(num_mel_bins, dtypes.float32) * 2.0)

View File

@ -21,6 +21,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
@ -282,6 +283,8 @@ def _enclosing_power_of_two(value):
return constant_op.constant(
int(2**np.ceil(np.log(value_static) / np.log(2.0))), value.dtype)
return math_ops.cast(
math_ops.pow(2.0, math_ops.ceil(
math_ops.log(math_ops.to_float(value)) / math_ops.log(2.0))),
value.dtype)
math_ops.pow(
2.0,
math_ops.ceil(
math_ops.log(math_ops.cast(value, dtypes.float32)) /
math_ops.log(2.0))), value.dtype)

View File

@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
@ -109,7 +110,7 @@ def _SparseReduceSumGrad(op, out_grad):
sp_shape = op.inputs[2]
output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
scale = sp_shape // math_ops.cast(output_shape_kept_dims, dtypes.int64)
# (sparse_indices, sparse_values, sparse_shape, reduction_axes)
return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),
None, None)
@ -212,7 +213,7 @@ def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
x_shape = op.inputs[2]
y = op.inputs[3]
y_shape = math_ops.to_int64(array_ops.shape(y))
y_shape = math_ops.cast(array_ops.shape(y), dtypes.int64)
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
augmented_y_shape = array_ops.concat(

View File

@ -597,11 +597,11 @@ def _sparse_cross_internal(inputs,
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
values[i] = math_ops.cast(values[i], dtypes.int64)
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
dense_inputs[i] = math_ops.cast(dense_inputs[i], dtypes.int64)
internal_type = dtypes.int64
indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross(

View File

@ -370,7 +370,7 @@ class _GraphTensorArray(object):
[self._handle, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
with self._maybe_colocate_with(value):
lengths_64 = math_ops.to_int64(lengths)
lengths_64 = math_ops.cast(lengths, dtypes.int64)
if self._infer_shape and not context.executing_eagerly():
clengths = tensor_util.constant_value(lengths_64)
if value.shape.dims is not None:
@ -646,7 +646,7 @@ class _GraphTensorArrayV2(object):
"""See TensorArray."""
with ops.name_scope(name, "TensorArraySplit", [self._flow, value, lengths]):
value = ops.convert_to_tensor(value, name="value")
lengths_64 = math_ops.to_int64(lengths)
lengths_64 = math_ops.cast(lengths, dtypes.int64)
if self._infer_shape and not context.executing_eagerly():
clengths = tensor_util.constant_value(lengths_64)
if value.shape.dims is not None:

View File

@ -199,7 +199,7 @@ def input_producer(input_tensor,
q, [enq], cancel_op=cancel_op))
if summary_name is not None:
summary.scalar(summary_name,
math_ops.to_float(q.size()) * (1. / capacity))
math_ops.cast(q.size(), dtypes.float32) * (1. / capacity))
return q
@ -712,7 +712,7 @@ def _shapes(tensor_list_list, shapes, enqueue_many):
def _select_which_to_enqueue(tensor_list, keep_input):
"""Select which examples to enqueue based on vector `keep_input`."""
select_i = math_ops.to_int32(keep_input)
select_i = math_ops.cast(keep_input, dtypes.int32)
tensor_list = [
data_flow_ops.dynamic_partition(x, select_i, num_partitions=2)[1]
for x in tensor_list]
@ -780,8 +780,9 @@ def _batch(tensors, batch_size, keep_input, num_threads=1, capacity=32,
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
summary.scalar("fraction_of_%d_full" % capacity,
math_ops.to_float(queue.size()) * (1. / capacity))
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
@ -819,8 +820,9 @@ def _batch_join(tensors_list, batch_size, keep_input, capacity=32,
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
summary.scalar("fraction_of_%d_full" % capacity,
math_ops.to_float(queue.size()) * (1. / capacity))
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
@ -857,8 +859,8 @@ def _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
full = (math_ops.to_float(
math_ops.maximum(0, queue.size() - min_after_dequeue)) *
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue), dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
@ -899,8 +901,8 @@ def _shuffle_batch_join(tensors_list, batch_size, capacity,
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
full = (math_ops.to_float(
math_ops.maximum(0, queue.size() - min_after_dequeue)) *
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue), dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.

View File

@ -554,7 +554,8 @@ class BatchTest(test_lib.TestCase):
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
string = array_ops.tile(["string"],
math_ops.to_int32(array_ops.stack([counter])))
math_ops.cast(array_ops.stack([counter]),
dtypes.int32))
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
batched = inp.batch(
@ -1143,10 +1144,12 @@ class BatchJoinTest(test_lib.TestCase):
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(["a"],
math_ops.to_int32(array_ops.stack([counter + 1])))
b = array_ops.tile(["b"],
math_ops.to_int32(array_ops.stack([ninety_nine])))
a = array_ops.tile(
["a"],
math_ops.cast(array_ops.stack([counter + 1]), dtypes.int32))
b = array_ops.tile(
["b"],
math_ops.cast(array_ops.stack([ninety_nine]), dtypes.int32))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,
@ -1324,10 +1327,12 @@ class BatchJoinTest(test_lib.TestCase):
# These get joined together and grouped into batches of 5.
batch_size = 5
a = array_ops.tile(["a"],
math_ops.to_int32(array_ops.stack([counter + 1])))
b = array_ops.tile(["b"],
math_ops.to_int32(array_ops.stack([ninety_nine])))
a = array_ops.tile(
["a"],
math_ops.cast(array_ops.stack([counter + 1]), dtypes.int32))
b = array_ops.tile(
["b"],
math_ops.cast(array_ops.stack([ninety_nine]), dtypes.int32))
batched = inp.batch_join(
[[counter, a], [ninety_nine, b]],
batch_size=batch_size,