Switch all tf.concat(concat_dim, value, name) calls in third_party/tensorflow to tf.concat_v2(value, axis, name).
Change: 141255675
This commit is contained in:
parent
7b306e8fcf
commit
d4eb834824
@ -506,7 +506,7 @@ class InlineBijectorTest(tf.test.TestCase):
|
|||||||
def testShapeGetters(self):
|
def testShapeGetters(self):
|
||||||
with self.test_session():
|
with self.test_session():
|
||||||
bijector = bijectors.Inline(
|
bijector = bijectors.Inline(
|
||||||
forward_event_shape_fn=lambda x: tf.concat(0, (x, [1])),
|
forward_event_shape_fn=lambda x: tf.concat_v2((x, [1]), 0),
|
||||||
get_forward_event_shape_fn=lambda x: x.as_list() + [1],
|
get_forward_event_shape_fn=lambda x: x.as_list() + [1],
|
||||||
inverse_event_shape_fn=lambda x: x[:-1],
|
inverse_event_shape_fn=lambda x: x[:-1],
|
||||||
get_inverse_event_shape_fn=lambda x: x[:-1],
|
get_inverse_event_shape_fn=lambda x: x[:-1],
|
||||||
|
@ -118,7 +118,7 @@ class Bernoulli(distribution.Distribution):
|
|||||||
return tensor_shape.scalar()
|
return tensor_shape.scalar()
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
new_shape = array_ops.concat(0, ([n], self.batch_shape()))
|
new_shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
|
||||||
uniform = random_ops.random_uniform(
|
uniform = random_ops.random_uniform(
|
||||||
new_shape, seed=seed, dtype=self.p.dtype)
|
new_shape, seed=seed, dtype=self.p.dtype)
|
||||||
sample = math_ops.less(uniform, self.p)
|
sample = math_ops.less(uniform, self.p)
|
||||||
|
@ -1483,10 +1483,11 @@ class ScaleAndShift(Bijector):
|
|||||||
math_ops.equal(event_ndims, 1)
|
math_ops.equal(event_ndims, 1)
|
||||||
])]), 1, 0)
|
])]), 1, 0)
|
||||||
right = array_ops.where(math_ops.equal(event_ndims, 0), 2, 0)
|
right = array_ops.where(math_ops.equal(event_ndims, 0), 2, 0)
|
||||||
pad = array_ops.concat(0, (
|
pad = array_ops.concat_v2(
|
||||||
array_ops.ones([left], dtype=dtypes.int32),
|
(array_ops.ones(
|
||||||
array_ops.shape(scale),
|
[left], dtype=dtypes.int32), array_ops.shape(scale), array_ops.ones(
|
||||||
array_ops.ones([right], dtype=dtypes.int32)))
|
[right], dtype=dtypes.int32)),
|
||||||
|
0)
|
||||||
scale = array_ops.reshape(scale, pad)
|
scale = array_ops.reshape(scale, pad)
|
||||||
batch_ndims = ndims - 2 + right
|
batch_ndims = ndims - 2 + right
|
||||||
# For safety, explicitly zero-out the upper triangular part.
|
# For safety, explicitly zero-out the upper triangular part.
|
||||||
@ -1713,9 +1714,11 @@ class SoftmaxCentered(Bijector):
|
|||||||
y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
|
y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
|
||||||
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
|
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
|
||||||
else array_ops.rank(y))
|
else array_ops.rank(y))
|
||||||
y = array_ops.pad(y, paddings=array_ops.concat(0, (
|
y = array_ops.pad(y,
|
||||||
array_ops.zeros((ndims - 1, 2), dtype=dtypes.int32),
|
paddings=array_ops.concat_v2(
|
||||||
[[0, 1]])))
|
(array_ops.zeros(
|
||||||
|
(ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
|
||||||
|
0))
|
||||||
|
|
||||||
# Set shape hints.
|
# Set shape hints.
|
||||||
if x.get_shape().ndims is not None:
|
if x.get_shape().ndims is not None:
|
||||||
@ -1756,12 +1759,14 @@ class SoftmaxCentered(Bijector):
|
|||||||
depth=ndims,
|
depth=ndims,
|
||||||
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
|
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
|
||||||
dtype=shape.dtype)
|
dtype=shape.dtype)
|
||||||
size = array_ops.concat(0, (shape[:-1], np.asarray([1], dtype=shape.dtype)))
|
size = array_ops.concat_v2(
|
||||||
|
(shape[:-1], np.asarray(
|
||||||
|
[1], dtype=shape.dtype)), 0)
|
||||||
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
|
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
|
||||||
|
|
||||||
# Here we slice out all but the last coordinate; see above for idea.
|
# Here we slice out all but the last coordinate; see above for idea.
|
||||||
begin = array_ops.zeros_like(shape)
|
begin = array_ops.zeros_like(shape)
|
||||||
size = array_ops.concat(0, (shape[:-1], [shape[-1]-1]))
|
size = array_ops.concat_v2((shape[:-1], [shape[-1] - 1]), 0)
|
||||||
x = array_ops.strided_slice(x, begin, begin + size)
|
x = array_ops.strided_slice(x, begin, begin + size)
|
||||||
|
|
||||||
x += log_normalization
|
x += log_normalization
|
||||||
|
@ -189,7 +189,7 @@ class Categorical(distribution.Distribution):
|
|||||||
samples = math_ops.cast(samples, self.dtype)
|
samples = math_ops.cast(samples, self.dtype)
|
||||||
ret = array_ops.reshape(
|
ret = array_ops.reshape(
|
||||||
array_ops.transpose(samples),
|
array_ops.transpose(samples),
|
||||||
array_ops.concat(0, ([n], self.batch_shape())))
|
array_ops.concat_v2(([n], self.batch_shape()), 0))
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def _log_prob(self, k):
|
def _log_prob(self, k):
|
||||||
|
@ -238,7 +238,7 @@ class Dirichlet(distribution.Distribution):
|
|||||||
math_ops.cast(self.event_shape()[0], self.dtype)))
|
math_ops.cast(self.event_shape()[0], self.dtype)))
|
||||||
if self.allow_nan_stats:
|
if self.allow_nan_stats:
|
||||||
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
|
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
|
||||||
shape = array_ops.concat(0, (self.batch_shape(), self.event_shape()))
|
shape = array_ops.concat_v2((self.batch_shape(), self.event_shape()), 0)
|
||||||
return array_ops.where(
|
return array_ops.where(
|
||||||
math_ops.greater(self.alpha, 1.),
|
math_ops.greater(self.alpha, 1.),
|
||||||
mode,
|
mode,
|
||||||
|
@ -544,8 +544,9 @@ class Distribution(_BaseDistribution):
|
|||||||
return self.sample_n(sample_shape, seed, **condition_kwargs)
|
return self.sample_n(sample_shape, seed, **condition_kwargs)
|
||||||
sample_shape, total = self._expand_sample_shape(sample_shape)
|
sample_shape, total = self._expand_sample_shape(sample_shape)
|
||||||
samples = self.sample_n(total, seed, **condition_kwargs)
|
samples = self.sample_n(total, seed, **condition_kwargs)
|
||||||
output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
|
output_shape = array_ops.concat_v2(
|
||||||
array_ops.shape(samples), [1], [-1])])
|
[sample_shape, array_ops.slice(array_ops.shape(samples), [1], [-1])],
|
||||||
|
0)
|
||||||
output = array_ops.reshape(samples, output_shape)
|
output = array_ops.reshape(samples, output_shape)
|
||||||
output.set_shape(tensor_util.constant_value_as_shape(
|
output.set_shape(tensor_util.constant_value_as_shape(
|
||||||
sample_shape).concatenate(samples.get_shape()[1:]))
|
sample_shape).concatenate(samples.get_shape()[1:]))
|
||||||
|
@ -126,12 +126,12 @@ def same_dynamic_shape(a, b):
|
|||||||
# static shape inference may break the equality comparison between
|
# static shape inference may break the equality comparison between
|
||||||
# shape(a) and shape(b) in math_ops.equal.
|
# shape(a) and shape(b) in math_ops.equal.
|
||||||
lambda: math_ops.reduce_all(math_ops.equal(
|
lambda: math_ops.reduce_all(math_ops.equal(
|
||||||
array_ops.concat(0, (
|
array_ops.concat_v2((
|
||||||
array_ops.shape(a),
|
array_ops.shape(a),
|
||||||
array_ops.shape(b))),
|
array_ops.shape(b)), 0),
|
||||||
array_ops.concat(0, (
|
array_ops.concat_v2((
|
||||||
array_ops.shape(b),
|
array_ops.shape(b),
|
||||||
array_ops.shape(a))))),
|
array_ops.shape(a)), 0))),
|
||||||
lambda: constant_op.constant(False))
|
lambda: constant_op.constant(False))
|
||||||
|
|
||||||
|
|
||||||
@ -371,7 +371,7 @@ def rotate_transpose(x, shift, name="rotate_transpose"):
|
|||||||
ndims - math_ops.mod(shift, ndims))
|
ndims - math_ops.mod(shift, ndims))
|
||||||
first = math_ops.range(0, shift)
|
first = math_ops.range(0, shift)
|
||||||
last = math_ops.range(shift, ndims)
|
last = math_ops.range(shift, ndims)
|
||||||
perm = array_ops.concat(0, (last, first))
|
perm = array_ops.concat_v2((last, first), 0)
|
||||||
return array_ops.transpose(x, perm=perm)
|
return array_ops.transpose(x, perm=perm)
|
||||||
|
|
||||||
|
|
||||||
@ -426,9 +426,9 @@ def pick_vector(cond,
|
|||||||
% (true_vector.name, true_vector.dtype,
|
% (true_vector.name, true_vector.dtype,
|
||||||
false_vector.name, false_vector.dtype))
|
false_vector.name, false_vector.dtype))
|
||||||
n = array_ops.shape(true_vector)[0]
|
n = array_ops.shape(true_vector)[0]
|
||||||
return array_ops.slice(array_ops.concat(0, (true_vector, false_vector)),
|
return array_ops.slice(
|
||||||
[array_ops.where(cond, 0, n)],
|
array_ops.concat_v2((true_vector, false_vector), 0),
|
||||||
[array_ops.where(cond, n, -1)])
|
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
|
||||||
|
|
||||||
|
|
||||||
def gen_new_seed(seed, salt):
|
def gen_new_seed(seed, salt):
|
||||||
@ -557,7 +557,7 @@ def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
|
|||||||
# Gather up, reshape, and return.
|
# Gather up, reshape, and return.
|
||||||
y = array_ops.reshape(x, [-1, d])
|
y = array_ops.reshape(x, [-1, d])
|
||||||
y = array_ops.gather_nd(y, idx)
|
y = array_ops.gather_nd(y, idx)
|
||||||
y = array_ops.reshape(y, array_ops.concat(0, [batch_shape, [n, n]]))
|
y = array_ops.reshape(y, array_ops.concat_v2([batch_shape, [n, n]], 0))
|
||||||
y = array_ops.matrix_band_part(y, -1, 0)
|
y = array_ops.matrix_band_part(y, -1, 0)
|
||||||
y.set_shape(y.get_shape().merge_with(final_shape))
|
y.set_shape(y.get_shape().merge_with(final_shape))
|
||||||
return y
|
return y
|
||||||
|
@ -89,7 +89,7 @@ class Exponential(gamma.Gamma):
|
|||||||
return self._lam
|
return self._lam
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
shape = array_ops.concat(0, ([n], array_ops.shape(self._lam)))
|
shape = array_ops.concat_v2(([n], array_ops.shape(self._lam)), 0)
|
||||||
# Sample uniformly-at-random from the open-interval (0, 1).
|
# Sample uniformly-at-random from the open-interval (0, 1).
|
||||||
sampled = random_ops.random_uniform(
|
sampled = random_ops.random_uniform(
|
||||||
shape,
|
shape,
|
||||||
|
@ -158,7 +158,7 @@ class _Gumbel(distribution.Distribution):
|
|||||||
return tensor_shape.scalar()
|
return tensor_shape.scalar()
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
|
shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
|
||||||
np_dtype = self.dtype.as_numpy_dtype()
|
np_dtype = self.dtype.as_numpy_dtype()
|
||||||
minval = np.nextafter(np_dtype(0), np_dtype(1))
|
minval = np.nextafter(np_dtype(0), np_dtype(1))
|
||||||
uniform = random_ops.random_uniform(shape=shape,
|
uniform = random_ops.random_uniform(shape=shape,
|
||||||
|
@ -125,7 +125,7 @@ class Laplace(distribution.Distribution):
|
|||||||
return tensor_shape.scalar()
|
return tensor_shape.scalar()
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
shape = array_ops.concat(0, ([n], self.batch_shape()))
|
shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
|
||||||
# Sample uniformly-at-random from the open-interval (-1, 1).
|
# Sample uniformly-at-random from the open-interval (-1, 1).
|
||||||
uniform_samples = random_ops.random_uniform(
|
uniform_samples = random_ops.random_uniform(
|
||||||
shape=shape,
|
shape=shape,
|
||||||
|
@ -157,7 +157,7 @@ class _Logistic(distribution.Distribution):
|
|||||||
return tensor_shape.scalar()
|
return tensor_shape.scalar()
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
|
shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
|
||||||
np_dtype = self.dtype.as_numpy_dtype()
|
np_dtype = self.dtype.as_numpy_dtype()
|
||||||
minval = np.nextafter(np_dtype(0), np_dtype(1))
|
minval = np.nextafter(np_dtype(0), np_dtype(1))
|
||||||
uniform = random_ops.random_uniform(shape=shape,
|
uniform = random_ops.random_uniform(shape=shape,
|
||||||
|
@ -330,7 +330,7 @@ class Mixture(distribution.Distribution):
|
|||||||
partitioned_batch_indices[c])
|
partitioned_batch_indices[c])
|
||||||
samples_class_c = array_ops.reshape(
|
samples_class_c = array_ops.reshape(
|
||||||
samples_class_c,
|
samples_class_c,
|
||||||
array_ops.concat(0, ([n_class * batch_size], event_shape)))
|
array_ops.concat_v2(([n_class * batch_size], event_shape), 0))
|
||||||
samples_class_c = array_ops.gather(
|
samples_class_c = array_ops.gather(
|
||||||
samples_class_c, lookup_partitioned_batch_indices,
|
samples_class_c, lookup_partitioned_batch_indices,
|
||||||
name="samples_class_c_gather")
|
name="samples_class_c_gather")
|
||||||
@ -341,8 +341,8 @@ class Mixture(distribution.Distribution):
|
|||||||
indices=partitioned_samples_indices, data=samples_class)
|
indices=partitioned_samples_indices, data=samples_class)
|
||||||
# Reshape back to proper sample, batch, and event shape.
|
# Reshape back to proper sample, batch, and event shape.
|
||||||
ret = array_ops.reshape(lhs_flat_ret,
|
ret = array_ops.reshape(lhs_flat_ret,
|
||||||
array_ops.concat(0, (samples_shape,
|
array_ops.concat_v2((samples_shape,
|
||||||
self.event_shape())))
|
self.event_shape()), 0))
|
||||||
ret.set_shape(
|
ret.set_shape(
|
||||||
tensor_shape.TensorShape(static_samples_shape).concatenate(
|
tensor_shape.TensorShape(static_samples_shape).concatenate(
|
||||||
self.get_event_shape()))
|
self.get_event_shape()))
|
||||||
|
@ -229,7 +229,7 @@ class _MultivariateNormalOperatorPD(distribution.Distribution):
|
|||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
|
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
|
||||||
shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
|
shape = array_ops.concat_v2([self._cov.vector_shape(), [n]], 0)
|
||||||
white_samples = random_ops.random_normal(shape=shape,
|
white_samples = random_ops.random_normal(shape=shape,
|
||||||
mean=0.,
|
mean=0.,
|
||||||
stddev=1.,
|
stddev=1.,
|
||||||
@ -239,9 +239,9 @@ class _MultivariateNormalOperatorPD(distribution.Distribution):
|
|||||||
correlated_samples = self._cov.sqrt_matmul(white_samples)
|
correlated_samples = self._cov.sqrt_matmul(white_samples)
|
||||||
|
|
||||||
# Move the last dimension to the front
|
# Move the last dimension to the front
|
||||||
perm = array_ops.concat(0, (
|
perm = array_ops.concat_v2(
|
||||||
array_ops.pack([array_ops.rank(correlated_samples) - 1]),
|
(array_ops.pack([array_ops.rank(correlated_samples) - 1]),
|
||||||
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
|
math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)
|
||||||
|
|
||||||
# TODO(ebrevdo): Once we get a proper tensor contraction op,
|
# TODO(ebrevdo): Once we get a proper tensor contraction op,
|
||||||
# perform the inner product using that instead of batch_matmul
|
# perform the inner product using that instead of batch_matmul
|
||||||
|
@ -157,7 +157,7 @@ class Normal(distribution.Distribution):
|
|||||||
return tensor_shape.scalar()
|
return tensor_shape.scalar()
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
|
shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
|
||||||
sampled = random_ops.random_normal(
|
sampled = random_ops.random_normal(
|
||||||
shape=shape, mean=0, stddev=1, dtype=self.mu.dtype, seed=seed)
|
shape=shape, mean=0, stddev=1, dtype=self.mu.dtype, seed=seed)
|
||||||
return sampled * self.sigma + self.mu
|
return sampled * self.sigma + self.mu
|
||||||
|
@ -186,7 +186,7 @@ class _OneHotCategorical(distribution.Distribution):
|
|||||||
return self.logits.get_shape().with_rank_at_least(1)[-1:]
|
return self.logits.get_shape().with_rank_at_least(1)[-1:]
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
sample_shape = array_ops.concat(0, ([n], array_ops.shape(self.logits)))
|
sample_shape = array_ops.concat_v2(([n], array_ops.shape(self.logits)), 0)
|
||||||
logits = self.logits
|
logits = self.logits
|
||||||
if logits.get_shape().ndims == 2:
|
if logits.get_shape().ndims == 2:
|
||||||
logits_2d = logits
|
logits_2d = logits
|
||||||
@ -256,7 +256,7 @@ def _kl_categorical_categorical(a, b, name=None):
|
|||||||
"""
|
"""
|
||||||
with ops.name_scope(
|
with ops.name_scope(
|
||||||
name, "kl_categorical_categorical", [a.logits, b.logits]):
|
name, "kl_categorical_categorical", [a.logits, b.logits]):
|
||||||
# sum(p*ln(p/q))
|
# sum(p*ln(p/q))
|
||||||
return math_ops.reduce_sum(
|
return math_ops.reduce_sum(
|
||||||
nn_ops.softmax(a.logits)*(nn_ops.log_softmax(a.logits)
|
nn_ops.softmax(a.logits)*(nn_ops.log_softmax(a.logits)
|
||||||
- nn_ops.log_softmax(b.logits)), reduction_indices=[-1])
|
- nn_ops.log_softmax(b.logits)), reduction_indices=[-1])
|
||||||
|
@ -400,8 +400,8 @@ class OperatorPDBase(object):
|
|||||||
# Derived classes get this "for free" once .shape() is implemented.
|
# Derived classes get this "for free" once .shape() is implemented.
|
||||||
with ops.name_scope(self.name):
|
with ops.name_scope(self.name):
|
||||||
with ops.name_scope(name, values=self.inputs):
|
with ops.name_scope(name, values=self.inputs):
|
||||||
return array_ops.concat(
|
return array_ops.concat_v2(
|
||||||
0, (self.batch_shape(), [self.vector_space_dimension()]))
|
(self.batch_shape(), [self.vector_space_dimension()]), 0)
|
||||||
|
|
||||||
def vector_space_dimension(self, name="vector_space_dimension"):
|
def vector_space_dimension(self, name="vector_space_dimension"):
|
||||||
"""Dimension of vector space on which this acts. The `k` in `R^k`.
|
"""Dimension of vector space on which this acts. The `k` in `R^k`.
|
||||||
@ -675,12 +675,12 @@ def _flip_matrix_to_vector_dynamic(mat, batch_shape):
|
|||||||
"""Flip matrix to vector with dynamic shapes."""
|
"""Flip matrix to vector with dynamic shapes."""
|
||||||
mat_rank = array_ops.rank(mat)
|
mat_rank = array_ops.rank(mat)
|
||||||
k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
|
k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
|
||||||
final_shape = array_ops.concat(0, (batch_shape, [k]))
|
final_shape = array_ops.concat_v2((batch_shape, [k]), 0)
|
||||||
|
|
||||||
# mat.shape = matrix_batch_shape + [k, M]
|
# mat.shape = matrix_batch_shape + [k, M]
|
||||||
# Permutation corresponding to [M] + matrix_batch_shape + [k]
|
# Permutation corresponding to [M] + matrix_batch_shape + [k]
|
||||||
perm = array_ops.concat(
|
perm = array_ops.concat_v2(
|
||||||
0, ([mat_rank - 1], math_ops.range(0, mat_rank - 1)))
|
([mat_rank - 1], math_ops.range(0, mat_rank - 1)), 0)
|
||||||
mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
|
mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
|
||||||
vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
|
vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
|
||||||
return vector
|
return vector
|
||||||
@ -751,12 +751,12 @@ def _flip_vector_to_matrix_dynamic(vec, batch_shape):
|
|||||||
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
|
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
|
||||||
condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
|
condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
|
||||||
k = array_ops.gather(vec_shape, vec_rank - 1)
|
k = array_ops.gather(vec_shape, vec_rank - 1)
|
||||||
new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape))
|
new_shape = array_ops.concat_v2((batch_shape, [k], condensed_shape), 0)
|
||||||
|
|
||||||
def _flip_front_dims_to_back():
|
def _flip_front_dims_to_back():
|
||||||
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
|
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
|
||||||
perm = array_ops.concat(
|
perm = array_ops.concat_v2(
|
||||||
0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
|
(math_ops.range(m, vec_rank), math_ops.range(0, m)), 0)
|
||||||
return array_ops.transpose(vec, perm=perm)
|
return array_ops.transpose(vec, perm=perm)
|
||||||
|
|
||||||
x_flipped = control_flow_ops.cond(
|
x_flipped = control_flow_ops.cond(
|
||||||
@ -789,8 +789,8 @@ def _flip_vector_to_matrix_static(vec, batch_shape):
|
|||||||
|
|
||||||
def _flip_front_dims_to_back():
|
def _flip_front_dims_to_back():
|
||||||
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
|
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
|
||||||
perm = array_ops.concat(
|
perm = array_ops.concat_v2(
|
||||||
0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
|
(math_ops.range(m, vec_rank), math_ops.range(0, m)), 0)
|
||||||
return array_ops.transpose(vec, perm=perm)
|
return array_ops.transpose(vec, perm=perm)
|
||||||
|
|
||||||
if 0 < m:
|
if 0 < m:
|
||||||
|
@ -82,7 +82,7 @@ class OperatorPDDiagBase(operator_pd.OperatorPDBase):
|
|||||||
def _shape(self):
|
def _shape(self):
|
||||||
d_shape = array_ops.shape(self._diag)
|
d_shape = array_ops.shape(self._diag)
|
||||||
k = array_ops.gather(d_shape, array_ops.size(d_shape) - 1)
|
k = array_ops.gather(d_shape, array_ops.size(d_shape) - 1)
|
||||||
return array_ops.concat(0, (d_shape, [k]))
|
return array_ops.concat_v2((d_shape, [k]), 0)
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def _batch_log_det(self):
|
def _batch_log_det(self):
|
||||||
|
@ -147,7 +147,7 @@ class OperatorPDSqrtVDVTUpdate(operator_pd.OperatorPDBase):
|
|||||||
v_rank = array_ops.rank(v)
|
v_rank = array_ops.rank(v)
|
||||||
v_batch_shape = array_ops.strided_slice(v_shape, [0], [v_rank - 2])
|
v_batch_shape = array_ops.strided_slice(v_shape, [0], [v_rank - 2])
|
||||||
r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v
|
r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v
|
||||||
id_shape = array_ops.concat(0, (v_batch_shape, [r, r]))
|
id_shape = array_ops.concat_v2((v_batch_shape, [r, r]), 0)
|
||||||
return operator_pd_identity.OperatorPDIdentity(
|
return operator_pd_identity.OperatorPDIdentity(
|
||||||
id_shape, v.dtype, verify_pd=self._verify_pd)
|
id_shape, v.dtype, verify_pd=self._verify_pd)
|
||||||
|
|
||||||
|
@ -242,7 +242,7 @@ class _ExpRelaxedOneHotCategorical(distribution.Distribution):
|
|||||||
return self.logits.get_shape().with_rank_at_least(1)[-1:]
|
return self.logits.get_shape().with_rank_at_least(1)[-1:]
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
sample_shape = array_ops.concat(0, ([n], array_ops.shape(self.logits)))
|
sample_shape = array_ops.concat_v2(([n], array_ops.shape(self.logits)), 0)
|
||||||
logits = self.logits * array_ops.ones(sample_shape)
|
logits = self.logits * array_ops.ones(sample_shape)
|
||||||
if logits.get_shape().ndims == 2:
|
if logits.get_shape().ndims == 2:
|
||||||
logits_2d = logits
|
logits_2d = logits
|
||||||
|
@ -381,7 +381,7 @@ class _DistributionShape(object):
|
|||||||
self._event_ndims_is_0, (1,), event_shape)
|
self._event_ndims_is_0, (1,), event_shape)
|
||||||
batch_shape = distribution_util.pick_vector(
|
batch_shape = distribution_util.pick_vector(
|
||||||
self._batch_ndims_is_0, (1,), batch_shape)
|
self._batch_ndims_is_0, (1,), batch_shape)
|
||||||
new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape))
|
new_shape = array_ops.concat_v2(((-1,), batch_shape, event_shape), 0)
|
||||||
x = array_ops.reshape(x, shape=new_shape)
|
x = array_ops.reshape(x, shape=new_shape)
|
||||||
x = distribution_util.rotate_transpose(x, shift=-1)
|
x = distribution_util.rotate_transpose(x, shift=-1)
|
||||||
return x, sample_shape
|
return x, sample_shape
|
||||||
@ -425,7 +425,8 @@ class _DistributionShape(object):
|
|||||||
event_start = array_ops.where(
|
event_start = array_ops.where(
|
||||||
self._batch_ndims_is_0, 2, 1 + self.batch_ndims)
|
self._batch_ndims_is_0, 2, 1 + self.batch_ndims)
|
||||||
event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,))
|
event_shape = array_ops.slice(s, (event_start,), (self.event_ndims,))
|
||||||
new_shape = array_ops.concat(0, (sample_shape, batch_shape, event_shape))
|
new_shape = array_ops.concat_v2(
|
||||||
|
(sample_shape, batch_shape, event_shape), 0)
|
||||||
x = array_ops.reshape(x, shape=new_shape)
|
x = array_ops.reshape(x, shape=new_shape)
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ class StudentT(distribution.Distribution):
|
|||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
# The sampling method comes from the well known fact that if X ~ Normal(0,
|
# The sampling method comes from the well known fact that if X ~ Normal(0,
|
||||||
# 1), and Z ~ Chi2(df), then X / sqrt(Z / df) ~ StudentT(df).
|
# 1), and Z ~ Chi2(df), then X / sqrt(Z / df) ~ StudentT(df).
|
||||||
shape = array_ops.concat(0, ([n], self.batch_shape()))
|
shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
|
||||||
normal_sample = random_ops.random_normal(
|
normal_sample = random_ops.random_normal(
|
||||||
shape, dtype=self.dtype, seed=seed)
|
shape, dtype=self.dtype, seed=seed)
|
||||||
half = constant_op.constant(0.5, self.dtype)
|
half = constant_op.constant(0.5, self.dtype)
|
||||||
@ -214,7 +214,7 @@ class StudentT(distribution.Distribution):
|
|||||||
def _entropy(self):
|
def _entropy(self):
|
||||||
u = array_ops.expand_dims(self.df * self._ones(), -1)
|
u = array_ops.expand_dims(self.df * self._ones(), -1)
|
||||||
v = array_ops.expand_dims(self._ones(), -1)
|
v = array_ops.expand_dims(self._ones(), -1)
|
||||||
beta_arg = array_ops.concat(len(u.get_shape()) - 1, [u, v]) / 2
|
beta_arg = array_ops.concat_v2([u, v], len(u.get_shape()) - 1) / 2
|
||||||
half_df = 0.5 * self.df
|
half_df = 0.5 * self.df
|
||||||
return ((0.5 + half_df) * (math_ops.digamma(0.5 + half_df) -
|
return ((0.5 + half_df) * (math_ops.digamma(0.5 + half_df) -
|
||||||
math_ops.digamma(half_df)) +
|
math_ops.digamma(half_df)) +
|
||||||
|
@ -136,7 +136,7 @@ class Uniform(distribution.Distribution):
|
|||||||
return tensor_shape.scalar()
|
return tensor_shape.scalar()
|
||||||
|
|
||||||
def _sample_n(self, n, seed=None):
|
def _sample_n(self, n, seed=None):
|
||||||
shape = array_ops.concat(0, ([n], self.batch_shape()))
|
shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
|
||||||
samples = random_ops.random_uniform(shape=shape,
|
samples = random_ops.random_uniform(shape=shape,
|
||||||
dtype=self.dtype,
|
dtype=self.dtype,
|
||||||
seed=seed)
|
seed=seed)
|
||||||
|
@ -198,7 +198,7 @@ class _WishartOperatorPD(distribution.Distribution):
|
|||||||
batch_ndims = array_ops.shape(batch_shape)[0]
|
batch_ndims = array_ops.shape(batch_shape)[0]
|
||||||
|
|
||||||
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
|
ndims = batch_ndims + 3 # sample_ndims=1, event_ndims=2
|
||||||
shape = array_ops.concat(0, ((n,), batch_shape, event_shape))
|
shape = array_ops.concat_v2(((n,), batch_shape, event_shape), 0)
|
||||||
|
|
||||||
# Complexity: O(nbk^2)
|
# Complexity: O(nbk^2)
|
||||||
x = random_ops.random_normal(shape=shape,
|
x = random_ops.random_normal(shape=shape,
|
||||||
@ -226,9 +226,9 @@ class _WishartOperatorPD(distribution.Distribution):
|
|||||||
|
|
||||||
# Make batch-op ready.
|
# Make batch-op ready.
|
||||||
# Complexity: O(nbk^2)
|
# Complexity: O(nbk^2)
|
||||||
perm = array_ops.concat(0, (math_ops.range(1, ndims), (0,)))
|
perm = array_ops.concat_v2((math_ops.range(1, ndims), (0,)), 0)
|
||||||
x = array_ops.transpose(x, perm)
|
x = array_ops.transpose(x, perm)
|
||||||
shape = array_ops.concat(0, (batch_shape, (event_shape[0], -1)))
|
shape = array_ops.concat_v2((batch_shape, (event_shape[0], -1)), 0)
|
||||||
x = array_ops.reshape(x, shape)
|
x = array_ops.reshape(x, shape)
|
||||||
|
|
||||||
# Complexity: O(nbM) where M is the complexity of the operator solving a
|
# Complexity: O(nbM) where M is the complexity of the operator solving a
|
||||||
@ -239,9 +239,9 @@ class _WishartOperatorPD(distribution.Distribution):
|
|||||||
|
|
||||||
# Undo make batch-op ready.
|
# Undo make batch-op ready.
|
||||||
# Complexity: O(nbk^2)
|
# Complexity: O(nbk^2)
|
||||||
shape = array_ops.concat(0, (batch_shape, event_shape, (n,)))
|
shape = array_ops.concat_v2((batch_shape, event_shape, (n,)), 0)
|
||||||
x = array_ops.reshape(x, shape)
|
x = array_ops.reshape(x, shape)
|
||||||
perm = array_ops.concat(0, ((ndims-1,), math_ops.range(0, ndims-1)))
|
perm = array_ops.concat_v2(((ndims - 1,), math_ops.range(0, ndims - 1)), 0)
|
||||||
x = array_ops.transpose(x, perm)
|
x = array_ops.transpose(x, perm)
|
||||||
|
|
||||||
if not self.cholesky_input_output_matrices:
|
if not self.cholesky_input_output_matrices:
|
||||||
@ -278,12 +278,13 @@ class _WishartOperatorPD(distribution.Distribution):
|
|||||||
|
|
||||||
# Complexity: O(nbk^2) since transpose must access every element.
|
# Complexity: O(nbk^2) since transpose must access every element.
|
||||||
scale_sqrt_inv_x_sqrt = x_sqrt
|
scale_sqrt_inv_x_sqrt = x_sqrt
|
||||||
perm = array_ops.concat(0, (math_ops.range(sample_ndims, ndims),
|
perm = array_ops.concat_v2((math_ops.range(sample_ndims, ndims),
|
||||||
math_ops.range(0, sample_ndims)))
|
math_ops.range(0, sample_ndims)), 0)
|
||||||
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
|
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
|
||||||
shape = array_ops.concat(
|
shape = array_ops.concat_v2(
|
||||||
0, (batch_shape,
|
(batch_shape, (math_ops.cast(
|
||||||
(math_ops.cast(self.dimension, dtype=dtypes.int32), -1)))
|
self.dimension, dtype=dtypes.int32), -1)),
|
||||||
|
0)
|
||||||
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
|
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
|
||||||
|
|
||||||
# Complexity: O(nbM*k) where M is the complexity of the operator solving
|
# Complexity: O(nbM*k) where M is the complexity of the operator solving
|
||||||
@ -295,10 +296,10 @@ class _WishartOperatorPD(distribution.Distribution):
|
|||||||
|
|
||||||
# Undo make batch-op ready.
|
# Undo make batch-op ready.
|
||||||
# Complexity: O(nbk^2)
|
# Complexity: O(nbk^2)
|
||||||
shape = array_ops.concat(0, (batch_shape, event_shape, sample_shape))
|
shape = array_ops.concat_v2((batch_shape, event_shape, sample_shape), 0)
|
||||||
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
|
scale_sqrt_inv_x_sqrt = array_ops.reshape(scale_sqrt_inv_x_sqrt, shape)
|
||||||
perm = array_ops.concat(0, (math_ops.range(ndims - sample_ndims, ndims),
|
perm = array_ops.concat_v2((math_ops.range(ndims - sample_ndims, ndims),
|
||||||
math_ops.range(0, ndims - sample_ndims)))
|
math_ops.range(0, ndims - sample_ndims)), 0)
|
||||||
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
|
scale_sqrt_inv_x_sqrt = array_ops.transpose(scale_sqrt_inv_x_sqrt, perm)
|
||||||
|
|
||||||
# Write V = SS', X = LL'. Then:
|
# Write V = SS', X = LL'. Then:
|
||||||
|
@ -350,10 +350,12 @@ class KMeans(object):
|
|||||||
num_unique_cluster_idx)
|
num_unique_cluster_idx)
|
||||||
# Shape to enable broadcasting count_updates and learning_rate to inp.
|
# Shape to enable broadcasting count_updates and learning_rate to inp.
|
||||||
# It extends the shape with 1's to match the rank of inp.
|
# It extends the shape with 1's to match the rank of inp.
|
||||||
broadcast_shape = tf.concat(
|
broadcast_shape = tf.concat_v2(
|
||||||
0,
|
[
|
||||||
[tf.reshape(num_unique_cluster_idx, [1]),
|
tf.reshape(num_unique_cluster_idx, [1]), tf.ones(
|
||||||
tf.ones(tf.reshape(tf.rank(inp) - 1, [1]), dtype=tf.int32)])
|
tf.reshape(tf.rank(inp) - 1, [1]), dtype=tf.int32)
|
||||||
|
],
|
||||||
|
0)
|
||||||
# Subtract k * x, see comment above.
|
# Subtract k * x, see comment above.
|
||||||
cluster_center_updates -= tf.cast(
|
cluster_center_updates -= tf.cast(
|
||||||
tf.reshape(count_updates, broadcast_shape),
|
tf.reshape(count_updates, broadcast_shape),
|
||||||
|
@ -765,9 +765,9 @@ class WALSModel(object):
|
|||||||
col_shape = [num_rows]
|
col_shape = [num_rows]
|
||||||
right = embedding_ops.embedding_lookup(right_factors, gather_indices,
|
right = embedding_ops.embedding_lookup(right_factors, gather_indices,
|
||||||
partition_strategy='div')
|
partition_strategy='div')
|
||||||
new_sp_indices = tf.concat(1, [row_ids, col_ids])
|
new_sp_indices = tf.concat_v2([row_ids, col_ids], 1)
|
||||||
new_sp_shape = (tf.concat(0, [row_shape, col_shape]) if transpose_input
|
new_sp_shape = (tf.concat_v2([row_shape, col_shape], 0) if transpose_input
|
||||||
else tf.concat(0, [col_shape, row_shape]))
|
else tf.concat_v2([col_shape, row_shape], 0))
|
||||||
new_sp_input = tf.SparseTensor(indices=new_sp_indices,
|
new_sp_input = tf.SparseTensor(indices=new_sp_indices,
|
||||||
values=sp_input.values,
|
values=sp_input.values,
|
||||||
dense_shape=new_sp_shape)
|
dense_shape=new_sp_shape)
|
||||||
|
@ -169,7 +169,8 @@ class GMM(estimator.Estimator, TransformerMixin):
|
|||||||
|
|
||||||
def _parse_tensor_or_dict(self, features):
|
def _parse_tensor_or_dict(self, features):
|
||||||
if isinstance(features, dict):
|
if isinstance(features, dict):
|
||||||
return array_ops.concat(1, [features[k] for k in sorted(features.keys())])
|
return array_ops.concat_v2([features[k] for k in sorted(features.keys())],
|
||||||
|
1)
|
||||||
return features
|
return features
|
||||||
|
|
||||||
def _get_train_ops(self, features, _):
|
def _get_train_ops(self, features, _):
|
||||||
|
@ -320,11 +320,12 @@ class GmmAlgorithm(object):
|
|||||||
tf.squeeze(shard, [0]), transpose_a=True), 1)
|
tf.squeeze(shard, [0]), transpose_a=True), 1)
|
||||||
self._w_mul_x.append(w_mul_x)
|
self._w_mul_x.append(w_mul_x)
|
||||||
# Partial covariances.
|
# Partial covariances.
|
||||||
x = tf.concat(0, [shard for _ in range(self._num_classes)])
|
x = tf.concat_v2([shard for _ in range(self._num_classes)], 0)
|
||||||
x_trans = tf.transpose(x, perm=[0, 2, 1])
|
x_trans = tf.transpose(x, perm=[0, 2, 1])
|
||||||
x_mul_w = tf.concat(0, [
|
x_mul_w = tf.concat_v2([
|
||||||
tf.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
|
tf.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
|
||||||
for k in range(self._num_classes)])
|
for k in range(self._num_classes)
|
||||||
|
], 0)
|
||||||
self._w_mul_x2.append(tf.matmul(x_mul_w, x))
|
self._w_mul_x2.append(tf.matmul(x_mul_w, x))
|
||||||
|
|
||||||
def _define_maximization_operation(self, num_batches):
|
def _define_maximization_operation(self, num_batches):
|
||||||
@ -365,7 +366,7 @@ class GmmAlgorithm(object):
|
|||||||
new_covs.append(tf.expand_dims(new_cov, 0))
|
new_covs.append(tf.expand_dims(new_cov, 0))
|
||||||
elif self._covariance_type == DIAG_COVARIANCE:
|
elif self._covariance_type == DIAG_COVARIANCE:
|
||||||
new_covs.append(tf.expand_dims(tf.diag_part(new_cov), 0))
|
new_covs.append(tf.expand_dims(tf.diag_part(new_cov), 0))
|
||||||
new_covs = tf.concat(0, new_covs)
|
new_covs = tf.concat_v2(new_covs, 0)
|
||||||
if 'c' in self._params:
|
if 'c' in self._params:
|
||||||
# Train operations don't need to take care of the means
|
# Train operations don't need to take care of the means
|
||||||
# because covariances already depend on it.
|
# because covariances already depend on it.
|
||||||
@ -397,15 +398,15 @@ class GmmAlgorithm(object):
|
|||||||
diff, perm=[0, 2, 1]))))
|
diff, perm=[0, 2, 1]))))
|
||||||
self._all_scores.append(
|
self._all_scores.append(
|
||||||
tf.reshape(
|
tf.reshape(
|
||||||
tf.concat(1, all_scores),
|
tf.concat_v2(all_scores, 1),
|
||||||
tf.stack([self._num_examples, self._num_classes])))
|
tf.stack([self._num_examples, self._num_classes])))
|
||||||
|
|
||||||
# Distance to the associated class.
|
# Distance to the associated class.
|
||||||
self._all_scores = tf.concat(0, self._all_scores)
|
self._all_scores = tf.concat_v2(self._all_scores, 0)
|
||||||
assignments = tf.concat(0, self.assignments())
|
assignments = tf.concat_v2(self.assignments(), 0)
|
||||||
rows = tf.to_int64(tf.range(0, self._num_examples))
|
rows = tf.to_int64(tf.range(0, self._num_examples))
|
||||||
indices = tf.concat(1, [tf.expand_dims(rows, 1),
|
indices = tf.concat_v2(
|
||||||
tf.expand_dims(assignments, 1)])
|
[tf.expand_dims(rows, 1), tf.expand_dims(assignments, 1)], 1)
|
||||||
self._scores = tf.gather_nd(self._all_scores, indices)
|
self._scores = tf.gather_nd(self._all_scores, indices)
|
||||||
|
|
||||||
def _define_loglikelihood_operation(self):
|
def _define_loglikelihood_operation(self):
|
||||||
|
@ -191,13 +191,13 @@ class GridRNNCell(rnn_cell.RNNCell):
|
|||||||
|
|
||||||
output_tensors = [new_output[i] for i in self._config.outputs]
|
output_tensors = [new_output[i] for i in self._config.outputs]
|
||||||
output = array_ops.zeros(
|
output = array_ops.zeros(
|
||||||
[0, 0], dtype) if len(output_tensors) == 0 else array_ops.concat(
|
[0, 0], dtype) if len(output_tensors) == 0 else array_ops.concat_v2(
|
||||||
1, output_tensors)
|
output_tensors, 1)
|
||||||
|
|
||||||
state_tensors = [new_state[i] for i in self._config.recurrents]
|
state_tensors = [new_state[i] for i in self._config.recurrents]
|
||||||
states = array_ops.zeros(
|
states = array_ops.zeros(
|
||||||
[0, 0], dtype) if len(state_tensors) == 0 else array_ops.concat(
|
[0, 0], dtype) if len(state_tensors) == 0 else array_ops.concat_v2(
|
||||||
1, state_tensors)
|
state_tensors, 1)
|
||||||
|
|
||||||
return output, states
|
return output, states
|
||||||
|
|
||||||
@ -428,7 +428,7 @@ def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state,
|
|||||||
for d in conf.dims[:-1]:
|
for d in conf.dims[:-1]:
|
||||||
ls_cell_inputs[d.idx] = new_output[d.idx] if new_output[
|
ls_cell_inputs[d.idx] = new_output[d.idx] if new_output[
|
||||||
d.idx] is not None else m_prev[d.idx]
|
d.idx] is not None else m_prev[d.idx]
|
||||||
cell_inputs = array_ops.concat(1, ls_cell_inputs)
|
cell_inputs = array_ops.concat_v2(ls_cell_inputs, 1)
|
||||||
else:
|
else:
|
||||||
cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0],
|
cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0],
|
||||||
m_prev[0].dtype)
|
m_prev[0].dtype)
|
||||||
@ -438,9 +438,9 @@ def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state,
|
|||||||
for i in dim_indices:
|
for i in dim_indices:
|
||||||
d = conf.dims[i]
|
d = conf.dims[i]
|
||||||
if d.non_recurrent_fn:
|
if d.non_recurrent_fn:
|
||||||
linear_args = array_ops.concat(
|
linear_args = array_ops.concat_v2(
|
||||||
1, [cell_inputs, last_dim_output
|
[cell_inputs, last_dim_output],
|
||||||
]) if conf.num_dims > 1 else last_dim_output
|
1) if conf.num_dims > 1 else last_dim_output
|
||||||
with vs.variable_scope('non_recurrent' if conf.tied else
|
with vs.variable_scope('non_recurrent' if conf.tied else
|
||||||
'non_recurrent/cell_{}'.format(i)):
|
'non_recurrent/cell_{}'.format(i)):
|
||||||
if conf.tied and not (first_call and i == dim_indices[0]):
|
if conf.tied and not (first_call and i == dim_indices[0]):
|
||||||
@ -453,7 +453,7 @@ def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state,
|
|||||||
layers.initializers.xavier_initializer)
|
layers.initializers.xavier_initializer)
|
||||||
else:
|
else:
|
||||||
if c_prev[i] is not None:
|
if c_prev[i] is not None:
|
||||||
cell_state = array_ops.concat(1, [c_prev[i], last_dim_output])
|
cell_state = array_ops.concat_v2([c_prev[i], last_dim_output], 1)
|
||||||
else:
|
else:
|
||||||
# for GRU/RNN, the state is just the previous output
|
# for GRU/RNN, the state is just the previous output
|
||||||
cell_state = last_dim_output
|
cell_state = last_dim_output
|
||||||
|
@ -198,7 +198,7 @@ def concat(labeled_tensors, axis_name, name=None):
|
|||||||
|
|
||||||
concat_axis = core.concat_axes(concat_axis_list)
|
concat_axis = core.concat_axes(concat_axis_list)
|
||||||
concat_dimension = axis_names.index(axis_name)
|
concat_dimension = axis_names.index(axis_name)
|
||||||
concat_tensor = array_ops.concat(concat_dimension, tensors, name=scope)
|
concat_tensor = array_ops.concat_v2(tensors, concat_dimension, name=scope)
|
||||||
values = list(axes_0.values())
|
values = list(axes_0.values())
|
||||||
concat_axes = (values[:concat_dimension] + [concat_axis] +
|
concat_axes = (values[:concat_dimension] + [concat_axis] +
|
||||||
values[concat_dimension + 1:])
|
values[concat_dimension + 1:])
|
||||||
|
@ -903,7 +903,7 @@ class WhereTest(Base):
|
|||||||
where_lt = ops.where(condition, x, y)
|
where_lt = ops.where(condition, x, y)
|
||||||
|
|
||||||
golden_lt = core.LabeledTensor(
|
golden_lt = core.LabeledTensor(
|
||||||
tf.concat(0, [tf.ones(3), tf.zeros(2)]), ['x'])
|
tf.concat_v2([tf.ones(3), tf.zeros(2)], 0), ['x'])
|
||||||
self.assertLabeledTensorsEqual(where_lt, golden_lt)
|
self.assertLabeledTensorsEqual(where_lt, golden_lt)
|
||||||
|
|
||||||
def test_mismatched_axes(self):
|
def test_mismatched_axes(self):
|
||||||
|
@ -156,11 +156,14 @@ def safe_embedding_lookup_sparse(embedding_weights,
|
|||||||
name=scope)
|
name=scope)
|
||||||
|
|
||||||
# Reshape back from linear ids back into higher-dimensional dense result.
|
# Reshape back from linear ids back into higher-dimensional dense result.
|
||||||
final_result = array_ops.reshape(result, array_ops.concat(0, [
|
final_result = array_ops.reshape(
|
||||||
array_ops.slice(
|
result,
|
||||||
math_ops.cast(original_shape, dtypes.int32),
|
array_ops.concat_v2([
|
||||||
[0], [original_rank - 1]),
|
array_ops.slice(
|
||||||
array_ops.slice(array_ops.shape(result), [1], [-1])]))
|
math_ops.cast(original_shape, dtypes.int32), [0],
|
||||||
|
[original_rank - 1]),
|
||||||
|
array_ops.slice(array_ops.shape(result), [1], [-1])
|
||||||
|
], 0))
|
||||||
final_result.set_shape(tensor_shape.unknown_shape(
|
final_result.set_shape(tensor_shape.unknown_shape(
|
||||||
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
|
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
|
||||||
return final_result
|
return final_result
|
||||||
@ -267,8 +270,8 @@ def scattered_embedding_lookup(params,
|
|||||||
result = embedding_ops.embedding_lookup(
|
result = embedding_ops.embedding_lookup(
|
||||||
params, ids, partition_strategy="div", validate_indices=False)
|
params, ids, partition_strategy="div", validate_indices=False)
|
||||||
|
|
||||||
return array_ops.reshape(result, array_ops.concat(
|
return array_ops.reshape(
|
||||||
0, [values_shape, [dimension]]))
|
result, array_ops.concat_v2([values_shape, [dimension]], 0))
|
||||||
|
|
||||||
|
|
||||||
def scattered_embedding_lookup_sparse(params,
|
def scattered_embedding_lookup_sparse(params,
|
||||||
@ -382,8 +385,8 @@ def embedding_lookup_unique(params, ids, name=None):
|
|||||||
unique_ids, idx = array_ops.unique(ids_flat)
|
unique_ids, idx = array_ops.unique(ids_flat)
|
||||||
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
|
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
|
||||||
embeds_flat = array_ops.gather(unique_embeddings, idx)
|
embeds_flat = array_ops.gather(unique_embeddings, idx)
|
||||||
embed_shape = array_ops.concat(
|
embed_shape = array_ops.concat_v2(
|
||||||
0, [shape, array_ops.shape(unique_embeddings)[1:]])
|
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
|
||||||
embeds = array_ops.reshape(embeds_flat, embed_shape)
|
embeds = array_ops.reshape(embeds_flat, embed_shape)
|
||||||
embeds.set_shape(ids.get_shape().concatenate(
|
embeds.set_shape(ids.get_shape().concatenate(
|
||||||
unique_embeddings.get_shape()[1:]))
|
unique_embeddings.get_shape()[1:]))
|
||||||
|
@ -181,7 +181,7 @@ def _input_from_feature_columns(columns_to_tensors,
|
|||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise ValueError('Error creating input layer for column: {}.\n'
|
raise ValueError('Error creating input layer for column: {}.\n'
|
||||||
'{}, {}'.format(column.name, e, ee))
|
'{}, {}'.format(column.name, e, ee))
|
||||||
return array_ops.concat(output_rank - 1, output_tensors)
|
return array_ops.concat_v2(output_tensors, output_rank - 1)
|
||||||
|
|
||||||
|
|
||||||
def input_from_feature_columns(columns_to_tensors,
|
def input_from_feature_columns(columns_to_tensors,
|
||||||
|
@ -1202,8 +1202,8 @@ def _sparse_inner_flatten(inputs, new_rank):
|
|||||||
"""Helper function for `inner_flatten`."""
|
"""Helper function for `inner_flatten`."""
|
||||||
outer_dimensions = inputs.shape[:new_rank - 1]
|
outer_dimensions = inputs.shape[:new_rank - 1]
|
||||||
inner_dimensions = inputs.shape[new_rank - 1:]
|
inner_dimensions = inputs.shape[new_rank - 1:]
|
||||||
new_shape = array_ops.concat(0, (outer_dimensions,
|
new_shape = array_ops.concat_v2((outer_dimensions,
|
||||||
[math_ops.reduce_prod(inner_dimensions)]))
|
[math_ops.reduce_prod(inner_dimensions)]), 0)
|
||||||
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
|
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
|
||||||
return flattened
|
return flattened
|
||||||
|
|
||||||
@ -1215,7 +1215,7 @@ def _dense_inner_flatten(inputs, new_rank):
|
|||||||
with ops.control_dependencies([rank_assertion]):
|
with ops.control_dependencies([rank_assertion]):
|
||||||
outer_dimensions = array_ops.strided_slice(
|
outer_dimensions = array_ops.strided_slice(
|
||||||
array_ops.shape(inputs), [0], [new_rank - 1])
|
array_ops.shape(inputs), [0], [new_rank - 1])
|
||||||
new_shape = array_ops.concat(0, (outer_dimensions, [-1]))
|
new_shape = array_ops.concat_v2((outer_dimensions, [-1]), 0)
|
||||||
reshaped = array_ops.reshape(inputs, new_shape)
|
reshaped = array_ops.reshape(inputs, new_shape)
|
||||||
|
|
||||||
# if `new_rank` is an integer, try to calculate new shape.
|
# if `new_rank` is an integer, try to calculate new shape.
|
||||||
@ -1975,7 +1975,7 @@ def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
|
|||||||
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
|
array_ops.strided_slice(array_ops.shape(inputs), [dim], [dim + 1]))
|
||||||
if dim < (input_rank - 1):
|
if dim < (input_rank - 1):
|
||||||
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
|
multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32))
|
||||||
multiples = array_ops.concat(0, multiples)
|
multiples = array_ops.concat_v2(multiples, 0)
|
||||||
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
|
return math_ops.div(inputs, array_ops.tile(lengths, multiples))
|
||||||
|
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ class _MultiClassTargetColumn(_TargetColumn):
|
|||||||
|
|
||||||
def logits_to_predictions(self, logits, proba=False):
|
def logits_to_predictions(self, logits, proba=False):
|
||||||
if self.num_label_columns == 1:
|
if self.num_label_columns == 1:
|
||||||
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
|
logits = array_ops.concat_v2([array_ops.zeros_like(logits), logits], 1)
|
||||||
|
|
||||||
if proba:
|
if proba:
|
||||||
return nn.softmax(logits)
|
return nn.softmax(logits)
|
||||||
@ -387,7 +387,7 @@ class _BinarySvmTargetColumn(_MultiClassTargetColumn):
|
|||||||
raise ValueError(
|
raise ValueError(
|
||||||
"logits to probabilities is not supported for _BinarySvmTargetColumn")
|
"logits to probabilities is not supported for _BinarySvmTargetColumn")
|
||||||
|
|
||||||
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
|
logits = array_ops.concat_v2([array_ops.zeros_like(logits), logits], 1)
|
||||||
return math_ops.argmax(logits, 1)
|
return math_ops.argmax(logits, 1)
|
||||||
|
|
||||||
|
|
||||||
|
@ -169,8 +169,8 @@ def _concatenate_context_input(sequence_input, context_input):
|
|||||||
padded_length = array_ops.shape(sequence_input)[1]
|
padded_length = array_ops.shape(sequence_input)[1]
|
||||||
tiled_context_input = array_ops.tile(
|
tiled_context_input = array_ops.tile(
|
||||||
array_ops.expand_dims(context_input, 1),
|
array_ops.expand_dims(context_input, 1),
|
||||||
array_ops.concat(0, [[1], [padded_length], [1]]))
|
array_ops.concat_v2([[1], [padded_length], [1]], 0))
|
||||||
return array_ops.concat(2, [sequence_input, tiled_context_input])
|
return array_ops.concat_v2([sequence_input, tiled_context_input], 2)
|
||||||
|
|
||||||
|
|
||||||
def build_sequence_input(features,
|
def build_sequence_input(features,
|
||||||
@ -352,7 +352,7 @@ def _multi_value_predictions(
|
|||||||
flattened_activations, proba=True)
|
flattened_activations, proba=True)
|
||||||
flat_predictions = math_ops.argmax(flat_probabilities, 1)
|
flat_predictions = math_ops.argmax(flat_probabilities, 1)
|
||||||
if target_column.num_label_columns == 1:
|
if target_column.num_label_columns == 1:
|
||||||
probability_shape = array_ops.concat(0, [activations_shape[:2], [2]])
|
probability_shape = array_ops.concat_v2([activations_shape[:2], [2]], 0)
|
||||||
else:
|
else:
|
||||||
probability_shape = activations_shape
|
probability_shape = activations_shape
|
||||||
probabilities = array_ops.reshape(
|
probabilities = array_ops.reshape(
|
||||||
|
@ -87,7 +87,8 @@ def boston_eval_fn():
|
|||||||
features = tf.reshape(
|
features = tf.reshape(
|
||||||
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
|
tf.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
|
||||||
labels = tf.reshape(tf.constant(boston.target), [n_examples, 1])
|
labels = tf.reshape(tf.constant(boston.target), [n_examples, 1])
|
||||||
return tf.concat(0, [features, features]), tf.concat(0, [labels, labels])
|
return tf.concat_v2([features, features], 0), tf.concat_v2([labels, labels],
|
||||||
|
0)
|
||||||
|
|
||||||
|
|
||||||
def linear_model_params_fn(features, labels, mode, params):
|
def linear_model_params_fn(features, labels, mode, params):
|
||||||
|
@ -400,7 +400,7 @@ def _log_loss_with_two_classes(logits, labels):
|
|||||||
|
|
||||||
|
|
||||||
def _one_class_to_two_class_logits(logits):
|
def _one_class_to_two_class_logits(logits):
|
||||||
return array_ops.concat(1, (array_ops.zeros_like(logits), logits))
|
return array_ops.concat_v2((array_ops.zeros_like(logits), logits), 1)
|
||||||
|
|
||||||
|
|
||||||
class _BinaryLogisticHead(_Head):
|
class _BinaryLogisticHead(_Head):
|
||||||
|
@ -231,8 +231,8 @@ class Seq2SeqTest(tf.test.TestCase):
|
|||||||
cell = tf.contrib.rnn.GRUCell(2)
|
cell = tf.contrib.rnn.GRUCell(2)
|
||||||
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
||||||
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
||||||
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
|
attn_states = tf.concat_v2(
|
||||||
for e in enc_outputs])
|
[tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs], 1)
|
||||||
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
||||||
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
||||||
dec_inp, enc_state,
|
dec_inp, enc_state,
|
||||||
@ -251,8 +251,8 @@ class Seq2SeqTest(tf.test.TestCase):
|
|||||||
cell = tf.contrib.rnn.GRUCell(2)
|
cell = tf.contrib.rnn.GRUCell(2)
|
||||||
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
||||||
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
||||||
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
|
attn_states = tf.concat_v2(
|
||||||
for e in enc_outputs])
|
[tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs], 1)
|
||||||
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
||||||
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
||||||
dec_inp, enc_state,
|
dec_inp, enc_state,
|
||||||
@ -313,8 +313,8 @@ class Seq2SeqTest(tf.test.TestCase):
|
|||||||
state_is_tuple=True)
|
state_is_tuple=True)
|
||||||
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
||||||
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
||||||
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
|
attn_states = tf.concat_v2(
|
||||||
for e in enc_outputs])
|
[tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs], 1)
|
||||||
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
||||||
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
||||||
dec_inp, enc_state,
|
dec_inp, enc_state,
|
||||||
@ -340,8 +340,9 @@ class Seq2SeqTest(tf.test.TestCase):
|
|||||||
state_is_tuple=True)
|
state_is_tuple=True)
|
||||||
inp = tf.constant(0.5, shape=[2, 2, 2])
|
inp = tf.constant(0.5, shape=[2, 2, 2])
|
||||||
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
||||||
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
|
attn_states = tf.concat_v2(
|
||||||
for e in enc_outputs])
|
[tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs],
|
||||||
|
1)
|
||||||
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
dec_inp = [tf.constant(0.4, shape=[2, 2])] * 3
|
||||||
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
dec, mem = tf.contrib.legacy_seq2seq.attention_decoder(
|
||||||
dec_inp, enc_state,
|
dec_inp, enc_state,
|
||||||
@ -364,8 +365,8 @@ class Seq2SeqTest(tf.test.TestCase):
|
|||||||
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
inp = [tf.constant(0.5, shape=[2, 2])] * 2
|
||||||
cell = tf.contrib.rnn.GRUCell(2)
|
cell = tf.contrib.rnn.GRUCell(2)
|
||||||
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
enc_outputs, enc_state = tf.nn.rnn(cell, inp, dtype=tf.float32)
|
||||||
attn_states = tf.concat(1, [tf.reshape(e, [-1, 1, cell.output_size])
|
attn_states = tf.concat_v2(
|
||||||
for e in enc_outputs])
|
[tf.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs], 1)
|
||||||
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
|
dec_inp = [tf.constant(i, tf.int32, shape=[2]) for i in range(3)]
|
||||||
dec, mem = tf.contrib.legacy_seq2seq.embedding_attention_decoder(
|
dec, mem = tf.contrib.legacy_seq2seq.embedding_attention_decoder(
|
||||||
dec_inp, enc_state, attn_states, cell, num_symbols=4,
|
dec_inp, enc_state, attn_states, cell, num_symbols=4,
|
||||||
|
@ -135,7 +135,7 @@ class LinearOperatorDiagTest(
|
|||||||
self.assertAllEqual((2, 1, 3, 3), operator.shape)
|
self.assertAllEqual((2, 1, 3, 3), operator.shape)
|
||||||
|
|
||||||
# Create a batch matrix with the broadcast shape of operator.
|
# Create a batch matrix with the broadcast shape of operator.
|
||||||
diag_broadcast = tf.concat(1, (diag, diag))
|
diag_broadcast = tf.concat_v2((diag, diag), 1)
|
||||||
mat = tf.matrix_diag(diag_broadcast)
|
mat = tf.matrix_diag(diag_broadcast)
|
||||||
self.assertAllEqual((2, 2, 3, 3), mat.get_shape()) # being pedantic.
|
self.assertAllEqual((2, 2, 3, 3), mat.get_shape()) # being pedantic.
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ class LinearOperatorDiag(linear_operator.LinearOperator):
|
|||||||
def _shape_dynamic(self):
|
def _shape_dynamic(self):
|
||||||
d_shape = array_ops.shape(self._diag)
|
d_shape = array_ops.shape(self._diag)
|
||||||
k = d_shape[-1]
|
k = d_shape[-1]
|
||||||
return array_ops.concat(0, (d_shape, [k]))
|
return array_ops.concat_v2((d_shape, [k]), 0)
|
||||||
|
|
||||||
def _assert_non_singular(self):
|
def _assert_non_singular(self):
|
||||||
return linear_operator_util.assert_no_entries_with_modulus_zero(
|
return linear_operator_util.assert_no_entries_with_modulus_zero(
|
||||||
|
@ -31,7 +31,7 @@ class LinearOperatorDerivedClassTest(tf.test.TestCase):
|
|||||||
test methods to work.
|
test methods to work.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Absolute/relative tolerance for tests.
|
# Absolute/relative tolerance for tests.
|
||||||
_atol = {
|
_atol = {
|
||||||
tf.float16: 1e-3, tf.float32: 1e-6, tf.float64: 1e-12, tf.complex64: 1e-6,
|
tf.float16: 1e-3, tf.float32: 1e-6, tf.float64: 1e-12, tf.complex64: 1e-6,
|
||||||
tf.complex128: 1e-12}
|
tf.complex128: 1e-12}
|
||||||
@ -218,7 +218,7 @@ class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
|
|||||||
else:
|
else:
|
||||||
batch_shape = operator.batch_shape_dynamic()
|
batch_shape = operator.batch_shape_dynamic()
|
||||||
n = operator.domain_dimension_dynamic()
|
n = operator.domain_dimension_dynamic()
|
||||||
rhs_shape = tf.concat(0, (batch_shape, [n, r]))
|
rhs_shape = tf.concat_v2((batch_shape, [n, r]), 0)
|
||||||
|
|
||||||
x = tf.random_normal(shape=rhs_shape, dtype=operator.dtype.real_dtype)
|
x = tf.random_normal(shape=rhs_shape, dtype=operator.dtype.real_dtype)
|
||||||
if operator.dtype.is_complex:
|
if operator.dtype.is_complex:
|
||||||
|
@ -1438,13 +1438,12 @@ def expand_and_tile(tensor, multiple, dim=0, name=None):
|
|||||||
array_ops.size(tensor.dense_shape) + dim, [1])
|
array_ops.size(tensor.dense_shape) + dim, [1])
|
||||||
else:
|
else:
|
||||||
expand_dims = [dim]
|
expand_dims = [dim]
|
||||||
expanded_shape = array_ops.concat(
|
expanded_shape = array_ops.concat_v2(
|
||||||
0, (array_ops.strided_slice(
|
(array_ops.strided_slice(tensor.dense_shape, [0], expand_dims),
|
||||||
tensor.dense_shape, [0], expand_dims),
|
[1],
|
||||||
[1],
|
array_ops.strided_slice(
|
||||||
array_ops.strided_slice(
|
tensor.dense_shape, expand_dims, [-1], end_mask=1 << 0)),
|
||||||
tensor.dense_shape, expand_dims, [-1], end_mask=1 << 0)),
|
0, name='expanded_shape')
|
||||||
name='expanded_shape')
|
|
||||||
expanded = sparse_ops.sparse_reshape(
|
expanded = sparse_ops.sparse_reshape(
|
||||||
tensor, shape=expanded_shape, name='expand')
|
tensor, shape=expanded_shape, name='expand')
|
||||||
if multiple == 1:
|
if multiple == 1:
|
||||||
@ -1458,8 +1457,8 @@ def expand_and_tile(tensor, multiple, dim=0, name=None):
|
|||||||
if multiple == 1:
|
if multiple == 1:
|
||||||
return expanded
|
return expanded
|
||||||
ones = array_ops.ones_like(array_ops.shape(tensor))
|
ones = array_ops.ones_like(array_ops.shape(tensor))
|
||||||
tile_multiples = array_ops.concat(
|
tile_multiples = array_ops.concat_v2(
|
||||||
0, (ones[:dim], (multiple,), ones[dim:]), name='multiples')
|
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
|
||||||
return array_ops.tile(expanded, tile_multiples, name=scope)
|
return array_ops.tile(expanded, tile_multiples, name=scope)
|
||||||
|
|
||||||
|
|
||||||
|
@ -4356,12 +4356,14 @@ class StreamingMeanIOUTest(tf.test.TestCase):
|
|||||||
self.assertAlmostEqual(desired_output, miou.eval())
|
self.assertAlmostEqual(desired_output, miou.eval())
|
||||||
|
|
||||||
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
|
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
|
||||||
predictions = tf.concat(0,
|
predictions = tf.concat_v2(
|
||||||
[tf.constant(0, shape=[5]),
|
[tf.constant(
|
||||||
tf.constant(1, shape=[5])])
|
0, shape=[5]), tf.constant(
|
||||||
labels = tf.concat(0,
|
1, shape=[5])], 0)
|
||||||
[tf.constant(0, shape=[3]),
|
labels = tf.concat_v2(
|
||||||
tf.constant(1, shape=[7])])
|
[tf.constant(
|
||||||
|
0, shape=[3]), tf.constant(
|
||||||
|
1, shape=[7])], 0)
|
||||||
num_classes = 2
|
num_classes = 2
|
||||||
with self.test_session() as sess:
|
with self.test_session() as sess:
|
||||||
miou, update_op = metrics.streaming_mean_iou(
|
miou, update_op = metrics.streaming_mean_iou(
|
||||||
@ -4395,14 +4397,23 @@ class StreamingMeanIOUTest(tf.test.TestCase):
|
|||||||
self.assertEqual(0., miou.eval())
|
self.assertEqual(0., miou.eval())
|
||||||
|
|
||||||
def testResultsWithSomeMissing(self):
|
def testResultsWithSomeMissing(self):
|
||||||
predictions = tf.concat(0, [tf.constant(0, shape=[5]),
|
predictions = tf.concat_v2(
|
||||||
tf.constant(1, shape=[5])])
|
[tf.constant(
|
||||||
labels = tf.concat(0, [tf.constant(0, shape=[3]),
|
0, shape=[5]), tf.constant(
|
||||||
tf.constant(1, shape=[7])])
|
1, shape=[5])], 0)
|
||||||
|
labels = tf.concat_v2(
|
||||||
|
[tf.constant(
|
||||||
|
0, shape=[3]), tf.constant(
|
||||||
|
1, shape=[7])], 0)
|
||||||
num_classes = 2
|
num_classes = 2
|
||||||
weights = tf.concat(0, [tf.constant(0, shape=[1]),
|
weights = tf.concat_v2(
|
||||||
tf.constant(1, shape=[8]),
|
[
|
||||||
tf.constant(0, shape=[1])])
|
tf.constant(
|
||||||
|
0, shape=[1]), tf.constant(
|
||||||
|
1, shape=[8]), tf.constant(
|
||||||
|
0, shape=[1])
|
||||||
|
],
|
||||||
|
0)
|
||||||
with self.test_session() as sess:
|
with self.test_session() as sess:
|
||||||
miou, update_op = metrics.streaming_mean_iou(
|
miou, update_op = metrics.streaming_mean_iou(
|
||||||
predictions, labels, num_classes, weights=weights)
|
predictions, labels, num_classes, weights=weights)
|
||||||
|
@ -85,7 +85,7 @@ def horizontal_lstm(images, num_filters_out, scope=None):
|
|||||||
lstm1d.ndlstm_base(sequence,
|
lstm1d.ndlstm_base(sequence,
|
||||||
num_filters_out - num_filters_out // 2,
|
num_filters_out - num_filters_out // 2,
|
||||||
reverse=1))
|
reverse=1))
|
||||||
output_sequence = tf.concat(2, [hidden_sequence_lr, hidden_sequence_rl])
|
output_sequence = tf.concat_v2([hidden_sequence_lr, hidden_sequence_rl], 2)
|
||||||
output = sequence_to_images(output_sequence, batch_size)
|
output = sequence_to_images(output_sequence, batch_size)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ def one_hot_mask(labels, num_classes, scope=None):
|
|||||||
sparse_labels = tf.to_int32(tf.reshape(labels, [-1, 1]))
|
sparse_labels = tf.to_int32(tf.reshape(labels, [-1, 1]))
|
||||||
sparse_size, _ = _shape(sparse_labels)
|
sparse_size, _ = _shape(sparse_labels)
|
||||||
indices = tf.reshape(tf.range(0, sparse_size, 1), [-1, 1])
|
indices = tf.reshape(tf.range(0, sparse_size, 1), [-1, 1])
|
||||||
concated = tf.concat(1, [indices, sparse_labels])
|
concated = tf.concat_v2([indices, sparse_labels], 1)
|
||||||
dense_result = tf.sparse_to_dense(concated, [sparse_size, num_classes], 1.0,
|
dense_result = tf.sparse_to_dense(concated, [sparse_size, num_classes], 1.0,
|
||||||
0.0)
|
0.0)
|
||||||
result = tf.reshape(dense_result, [height, width, num_classes])
|
result = tf.reshape(dense_result, [height, width, num_classes])
|
||||||
|
@ -203,7 +203,7 @@ class ExternalOptimizerInterface(object):
|
|||||||
return array_ops.reshape(tensors[0], [-1])
|
return array_ops.reshape(tensors[0], [-1])
|
||||||
else:
|
else:
|
||||||
flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
|
flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
|
||||||
return array_ops.concat(0, flattened)
|
return array_ops.concat_v2(flattened, 0)
|
||||||
|
|
||||||
def _make_eval_func(self, tensors, session, feed_dict, fetches,
|
def _make_eval_func(self, tensors, session, feed_dict, fetches,
|
||||||
callback=None):
|
callback=None):
|
||||||
|
@ -109,7 +109,7 @@ class FusedRnnCellTest(tf.test.TestCase):
|
|||||||
fw_outputs, fw_state = fused_cell(inputs, dtype=tf.float64, scope="fw")
|
fw_outputs, fw_state = fused_cell(inputs, dtype=tf.float64, scope="fw")
|
||||||
bw_outputs, bw_state = fused_bw_cell(
|
bw_outputs, bw_state = fused_bw_cell(
|
||||||
inputs, dtype=tf.float64, scope="bw")
|
inputs, dtype=tf.float64, scope="bw")
|
||||||
outputs = tf.concat(2, [fw_outputs, bw_outputs])
|
outputs = tf.concat_v2([fw_outputs, bw_outputs], 2)
|
||||||
fused_vars = [v for v in tf.trainable_variables()
|
fused_vars = [v for v in tf.trainable_variables()
|
||||||
if v.name.startswith("fused/")]
|
if v.name.startswith("fused/")]
|
||||||
sess.run([tf.global_variables_initializer()])
|
sess.run([tf.global_variables_initializer()])
|
||||||
|
@ -483,8 +483,8 @@ class RNNCellTest(tf.test.TestCase):
|
|||||||
dtype=np.float32), dtype=tf.float32)
|
dtype=np.float32), dtype=tf.float32)
|
||||||
output, state = cell(inputs, zero_state)
|
output, state = cell(inputs, zero_state)
|
||||||
if state_is_tuple:
|
if state_is_tuple:
|
||||||
concat_state = tf.concat(
|
concat_state = tf.concat_v2(
|
||||||
1, [state[0][0], state[0][1], state[1], state[2]])
|
[state[0][0], state[0][1], state[1], state[2]], 1)
|
||||||
else:
|
else:
|
||||||
concat_state = state
|
concat_state = state
|
||||||
sess.run(tf.global_variables_initializer())
|
sess.run(tf.global_variables_initializer())
|
||||||
@ -540,14 +540,15 @@ class RNNCellTest(tf.test.TestCase):
|
|||||||
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed+4)
|
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed+4)
|
||||||
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
|
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
|
||||||
if not state_is_tuple:
|
if not state_is_tuple:
|
||||||
zero_state = tf.concat(1,
|
zero_state = tf.concat_v2([
|
||||||
[zero_state[0][0], zero_state[0][1],
|
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
|
||||||
zero_state[1], zero_state[2]])
|
], 1)
|
||||||
inputs = tf.random_uniform(
|
inputs = tf.random_uniform(
|
||||||
(batch_size, num_units), 0.0, 1.0, seed=seed+5)
|
(batch_size, num_units), 0.0, 1.0, seed=seed+5)
|
||||||
output, state = cell(inputs, zero_state)
|
output, state = cell(inputs, zero_state)
|
||||||
if state_is_tuple:
|
if state_is_tuple:
|
||||||
state = tf.concat(1, [state[0][0], state[0][1], state[1], state[2]])
|
state = tf.concat_v2([state[0][0], state[0][1], state[1], state[2]],
|
||||||
|
1)
|
||||||
sess.run(tf.global_variables_initializer())
|
sess.run(tf.global_variables_initializer())
|
||||||
self.assertAllClose(sess.run(output), expected_output)
|
self.assertAllClose(sess.run(output), expected_output)
|
||||||
self.assertAllClose(sess.run(state), expected_state)
|
self.assertAllClose(sess.run(state), expected_state)
|
||||||
|
@ -82,11 +82,11 @@ def _GRUBlockCellGrad(op, *grad):
|
|||||||
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = _gru_ops_so.gru_block_cell_grad(
|
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = _gru_ops_so.gru_block_cell_grad(
|
||||||
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
|
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
|
||||||
|
|
||||||
x_h_prev = array_ops.concat(1, [x, h_prev])
|
x_h_prev = array_ops.concat_v2([x, h_prev], 1)
|
||||||
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
|
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
|
||||||
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
|
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
|
||||||
|
|
||||||
x_h_prevr = array_ops.concat(1, [x, h_prev * r])
|
x_h_prevr = array_ops.concat_v2([x, h_prev * r], 1)
|
||||||
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
|
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
|
||||||
d_b_c = nn_ops.bias_add_grad(d_c_bar)
|
d_b_c = nn_ops.bias_add_grad(d_c_bar)
|
||||||
|
|
||||||
|
@ -277,7 +277,7 @@ def _LSTMBlockCellGrad(op, *grad):
|
|||||||
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
|
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
|
||||||
|
|
||||||
# Backprop from dicfo to w.
|
# Backprop from dicfo to w.
|
||||||
xh = array_ops.concat(1, [x, h_prev])
|
xh = array_ops.concat_v2([x, h_prev], 1)
|
||||||
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
|
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
|
||||||
w_grad.get_shape().merge_with(w.get_shape())
|
w_grad.get_shape().merge_with(w.get_shape())
|
||||||
|
|
||||||
@ -527,10 +527,10 @@ class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
|
|||||||
# correctly,since we want to access the last valid state at
|
# correctly,since we want to access the last valid state at
|
||||||
# sequence_length - 1, which can even be -1, corresponding to the
|
# sequence_length - 1, which can even be -1, corresponding to the
|
||||||
# initial state.
|
# initial state.
|
||||||
mod_cell_states = array_ops.concat(
|
mod_cell_states = array_ops.concat_v2(
|
||||||
0, [array_ops.expand_dims(initial_cell_state, [0]), cell_states])
|
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
|
||||||
mod_outputs = array_ops.concat(
|
mod_outputs = array_ops.concat_v2(
|
||||||
0, [array_ops.expand_dims(initial_output, [0]), outputs])
|
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
|
||||||
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
|
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
|
||||||
batch_size)
|
batch_size)
|
||||||
final_output = self._gather_states(mod_outputs, sequence_length,
|
final_output = self._gather_states(mod_outputs, sequence_length,
|
||||||
|
@ -212,7 +212,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
|
|||||||
sequence_length=sequence_length,
|
sequence_length=sequence_length,
|
||||||
dtype=dtype)
|
dtype=dtype)
|
||||||
# Concat the outputs to create the new input.
|
# Concat the outputs to create the new input.
|
||||||
prev_layer = tf.concat(2, outputs)
|
prev_layer = tf.concat_v2(outputs, 2)
|
||||||
states_fw.append(state_fw)
|
states_fw.append(state_fw)
|
||||||
states_bw.append(state_bw)
|
states_bw.append(state_bw)
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ def _get_concat_variable(name, shape, dtype, num_shards):
|
|||||||
if value.name == concat_full_name:
|
if value.name == concat_full_name:
|
||||||
return value
|
return value
|
||||||
|
|
||||||
concat_variable = array_ops.concat(0, sharded_variable, name=concat_name)
|
concat_variable = array_ops.concat_v2(sharded_variable, 0, name=concat_name)
|
||||||
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
|
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
|
||||||
concat_variable)
|
concat_variable)
|
||||||
return concat_variable
|
return concat_variable
|
||||||
@ -131,7 +131,7 @@ class CoupledInputForgetGateLSTMCell(rnn_cell.RNNCell):
|
|||||||
if not state_is_tuple:
|
if not state_is_tuple:
|
||||||
logging.warn(
|
logging.warn(
|
||||||
"%s: Using a concatenated state is slower and will soon be "
|
"%s: Using a concatenated state is slower and will soon be "
|
||||||
"deprecated. Use state_is_tuple=True." % self)
|
"deprecated. Use state_is_tuple=True.", self)
|
||||||
self._num_units = num_units
|
self._num_units = num_units
|
||||||
self._use_peepholes = use_peepholes
|
self._use_peepholes = use_peepholes
|
||||||
self._initializer = initializer
|
self._initializer = initializer
|
||||||
@ -212,7 +212,7 @@ class CoupledInputForgetGateLSTMCell(rnn_cell.RNNCell):
|
|||||||
initializer=init_ops.zeros_initializer, dtype=dtype)
|
initializer=init_ops.zeros_initializer, dtype=dtype)
|
||||||
|
|
||||||
# j = new_input, f = forget_gate, o = output_gate
|
# j = new_input, f = forget_gate, o = output_gate
|
||||||
cell_inputs = array_ops.concat(1, [inputs, m_prev])
|
cell_inputs = array_ops.concat_v2([inputs, m_prev], 1)
|
||||||
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
|
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
|
||||||
j, f, o = array_ops.split(1, 3, lstm_matrix)
|
j, f, o = array_ops.split(1, 3, lstm_matrix)
|
||||||
|
|
||||||
@ -245,8 +245,8 @@ class CoupledInputForgetGateLSTMCell(rnn_cell.RNNCell):
|
|||||||
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
|
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
|
||||||
# pylint: enable=invalid-unary-operand-type
|
# pylint: enable=invalid-unary-operand-type
|
||||||
|
|
||||||
new_state = (rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple
|
new_state = (rnn_cell.LSTMStateTuple(c, m) if self._state_is_tuple else
|
||||||
else array_ops.concat(1, [c, m]))
|
array_ops.concat_v2([c, m], 1))
|
||||||
return m, new_state
|
return m, new_state
|
||||||
|
|
||||||
|
|
||||||
@ -356,8 +356,8 @@ class TimeFreqLSTMCell(rnn_cell.RNNCell):
|
|||||||
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
|
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
|
||||||
[-1, self._num_units])
|
[-1, self._num_units])
|
||||||
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
|
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
|
||||||
cell_inputs = array_ops.concat(1, [freq_inputs[fq], m_prev,
|
cell_inputs = array_ops.concat_v2(
|
||||||
m_prev_freq])
|
[freq_inputs[fq], m_prev, m_prev_freq], 1)
|
||||||
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
|
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
|
||||||
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
|
i, j, f, o = array_ops.split(1, 4, lstm_matrix)
|
||||||
|
|
||||||
@ -378,11 +378,11 @@ class TimeFreqLSTMCell(rnn_cell.RNNCell):
|
|||||||
m = sigmoid(o) * tanh(c)
|
m = sigmoid(o) * tanh(c)
|
||||||
m_prev_freq = m
|
m_prev_freq = m
|
||||||
if fq == 0:
|
if fq == 0:
|
||||||
state_out = array_ops.concat(1, [c, m])
|
state_out = array_ops.concat_v2([c, m], 1)
|
||||||
m_out = m
|
m_out = m
|
||||||
else:
|
else:
|
||||||
state_out = array_ops.concat(1, [state_out, c, m])
|
state_out = array_ops.concat_v2([state_out, c, m], 1)
|
||||||
m_out = array_ops.concat(1, [m_out, m])
|
m_out = array_ops.concat_v2([m_out, m], 1)
|
||||||
return m_out, state_out
|
return m_out, state_out
|
||||||
|
|
||||||
def _make_tf_features(self, input_feat):
|
def _make_tf_features(self, input_feat):
|
||||||
@ -560,8 +560,8 @@ class GridLSTMCell(rnn_cell.RNNCell):
|
|||||||
if self._state_is_tuple:
|
if self._state_is_tuple:
|
||||||
state_out = self._state_tuple_type(*state_out_lst)
|
state_out = self._state_tuple_type(*state_out_lst)
|
||||||
else:
|
else:
|
||||||
state_out = array_ops.concat(1, state_out_lst)
|
state_out = array_ops.concat_v2(state_out_lst, 1)
|
||||||
m_out = array_ops.concat(1, m_out_lst)
|
m_out = array_ops.concat_v2(m_out_lst, 1)
|
||||||
return m_out, state_out
|
return m_out, state_out
|
||||||
|
|
||||||
def _compute(self, freq_inputs, block, state, batch_size,
|
def _compute(self, freq_inputs, block, state, batch_size,
|
||||||
@ -655,8 +655,8 @@ class GridLSTMCell(rnn_cell.RNNCell):
|
|||||||
[-1, self._num_units])
|
[-1, self._num_units])
|
||||||
|
|
||||||
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
|
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
|
||||||
cell_inputs = array_ops.concat(1, [freq_inputs[freq_index], m_prev_time,
|
cell_inputs = array_ops.concat_v2(
|
||||||
m_prev_freq])
|
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
|
||||||
|
|
||||||
# F-LSTM
|
# F-LSTM
|
||||||
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
|
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
|
||||||
@ -994,7 +994,7 @@ class BidirectionalGridLSTMCell(GridLSTMCell):
|
|||||||
bwd_state_out_lst.extend(bwd_state_out_lst_current)
|
bwd_state_out_lst.extend(bwd_state_out_lst_current)
|
||||||
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
|
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
|
||||||
# Outputs are always concated as it is never used separately.
|
# Outputs are always concated as it is never used separately.
|
||||||
m_out = array_ops.concat(1, fwd_m_out_lst + bwd_m_out_lst)
|
m_out = array_ops.concat_v2(fwd_m_out_lst + bwd_m_out_lst, 1)
|
||||||
return m_out, state_out
|
return m_out, state_out
|
||||||
|
|
||||||
|
|
||||||
@ -1045,7 +1045,7 @@ class AttentionCellWrapper(rnn_cell.RNNCell):
|
|||||||
if not state_is_tuple:
|
if not state_is_tuple:
|
||||||
logging.warn(
|
logging.warn(
|
||||||
"%s: Using a concatenated state is slower and will soon be "
|
"%s: Using a concatenated state is slower and will soon be "
|
||||||
"deprecated. Use state_is_tuple=True." % self)
|
"deprecated. Use state_is_tuple=True.", self)
|
||||||
if attn_size is None:
|
if attn_size is None:
|
||||||
attn_size = cell.output_size
|
attn_size = cell.output_size
|
||||||
if attn_vec_size is None:
|
if attn_vec_size is None:
|
||||||
@ -1091,19 +1091,19 @@ class AttentionCellWrapper(rnn_cell.RNNCell):
|
|||||||
inputs = _linear([inputs, attns], input_size, True)
|
inputs = _linear([inputs, attns], input_size, True)
|
||||||
lstm_output, new_state = self._cell(inputs, state)
|
lstm_output, new_state = self._cell(inputs, state)
|
||||||
if self._state_is_tuple:
|
if self._state_is_tuple:
|
||||||
new_state_cat = array_ops.concat(1, nest.flatten(new_state))
|
new_state_cat = array_ops.concat_v2(nest.flatten(new_state), 1)
|
||||||
else:
|
else:
|
||||||
new_state_cat = new_state
|
new_state_cat = new_state
|
||||||
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
|
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
|
||||||
with vs.variable_scope("attn_output_projection"):
|
with vs.variable_scope("attn_output_projection"):
|
||||||
output = _linear([lstm_output, new_attns], self._attn_size, True)
|
output = _linear([lstm_output, new_attns], self._attn_size, True)
|
||||||
new_attn_states = array_ops.concat(1, [new_attn_states,
|
new_attn_states = array_ops.concat_v2(
|
||||||
array_ops.expand_dims(output, 1)])
|
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
|
||||||
new_attn_states = array_ops.reshape(
|
new_attn_states = array_ops.reshape(
|
||||||
new_attn_states, [-1, self._attn_length * self._attn_size])
|
new_attn_states, [-1, self._attn_length * self._attn_size])
|
||||||
new_state = (new_state, new_attns, new_attn_states)
|
new_state = (new_state, new_attns, new_attn_states)
|
||||||
if not self._state_is_tuple:
|
if not self._state_is_tuple:
|
||||||
new_state = array_ops.concat(1, list(new_state))
|
new_state = array_ops.concat_v2(list(new_state), 1)
|
||||||
return output, new_state
|
return output, new_state
|
||||||
|
|
||||||
def _attention(self, query, attn_states):
|
def _attention(self, query, attn_states):
|
||||||
@ -1218,7 +1218,7 @@ class LayerNormBasicLSTMCell(rnn_cell.RNNCell):
|
|||||||
|
|
||||||
with vs.variable_scope(scope or "layer_norm_basic_lstm_cell"):
|
with vs.variable_scope(scope or "layer_norm_basic_lstm_cell"):
|
||||||
c, h = state
|
c, h = state
|
||||||
args = array_ops.concat(1, [inputs, h])
|
args = array_ops.concat_v2([inputs, h], 1)
|
||||||
concat = self._linear(args)
|
concat = self._linear(args)
|
||||||
|
|
||||||
i, j, f, o = array_ops.split(1, 4, concat)
|
i, j, f, o = array_ops.split(1, 4, concat)
|
||||||
|
@ -137,7 +137,7 @@ class BoundingBox(ItemHandler):
|
|||||||
side = array_ops.expand_dims(keys_to_tensors[key].values, 0)
|
side = array_ops.expand_dims(keys_to_tensors[key].values, 0)
|
||||||
sides.append(side)
|
sides.append(side)
|
||||||
|
|
||||||
bounding_box = array_ops.concat(0, sides)
|
bounding_box = array_ops.concat_v2(sides, 0)
|
||||||
return array_ops.transpose(bounding_box)
|
return array_ops.transpose(bounding_box)
|
||||||
|
|
||||||
|
|
||||||
@ -252,8 +252,8 @@ class SparseTensor(ItemHandler):
|
|||||||
ids = math_ops.to_int64(indices.values)
|
ids = math_ops.to_int64(indices.values)
|
||||||
indices_columns_to_preserve = array_ops.slice(
|
indices_columns_to_preserve = array_ops.slice(
|
||||||
indices.indices, [0, 0], array_ops.pack([-1, rank - 1]))
|
indices.indices, [0, 0], array_ops.pack([-1, rank - 1]))
|
||||||
new_indices = array_ops.concat(1, [indices_columns_to_preserve,
|
new_indices = array_ops.concat_v2(
|
||||||
array_ops.reshape(ids, [-1, 1])])
|
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
|
||||||
|
|
||||||
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
|
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
|
||||||
if self._densify:
|
if self._densify:
|
||||||
|
@ -91,7 +91,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -164,7 +164,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -198,7 +198,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -220,7 +220,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ def inception_v1_base(inputs,
|
|||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
|
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if final_endpoint == end_point: return net, end_points
|
if final_endpoint == end_point: return net, end_points
|
||||||
raise ValueError('Unknown final endpoint %s' % final_endpoint)
|
raise ValueError('Unknown final endpoint %s' % final_endpoint)
|
||||||
|
@ -143,7 +143,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(32), [1, 1],
|
branch_3, depth(32), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# 28 x 28 x 256
|
# 28 x 28 x 256
|
||||||
@ -173,7 +173,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(64), [1, 1],
|
branch_3, depth(64), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# 28 x 28 x 320
|
# 28 x 28 x 320
|
||||||
@ -198,7 +198,7 @@ def inception_v2_base(inputs,
|
|||||||
with tf.variable_scope('Branch_2'):
|
with tf.variable_scope('Branch_2'):
|
||||||
branch_2 = slim.max_pool2d(
|
branch_2 = slim.max_pool2d(
|
||||||
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
|
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2])
|
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# 14 x 14 x 576
|
# 14 x 14 x 576
|
||||||
@ -228,7 +228,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(128), [1, 1],
|
branch_3, depth(128), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# 14 x 14 x 576
|
# 14 x 14 x 576
|
||||||
@ -258,7 +258,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(128), [1, 1],
|
branch_3, depth(128), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# 14 x 14 x 576
|
# 14 x 14 x 576
|
||||||
@ -288,7 +288,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(96), [1, 1],
|
branch_3, depth(96), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -319,7 +319,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(96), [1, 1],
|
branch_3, depth(96), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# 14 x 14 x 576
|
# 14 x 14 x 576
|
||||||
@ -344,7 +344,7 @@ def inception_v2_base(inputs,
|
|||||||
with tf.variable_scope('Branch_2'):
|
with tf.variable_scope('Branch_2'):
|
||||||
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
|
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
|
||||||
scope='MaxPool_1a_3x3')
|
scope='MaxPool_1a_3x3')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2])
|
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# 7 x 7 x 1024
|
# 7 x 7 x 1024
|
||||||
@ -374,7 +374,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(128), [1, 1],
|
branch_3, depth(128), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -405,7 +405,7 @@ def inception_v2_base(inputs,
|
|||||||
branch_3, depth(128), [1, 1],
|
branch_3, depth(128), [1, 1],
|
||||||
weights_initializer=trunc_normal(0.1),
|
weights_initializer=trunc_normal(0.1),
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
raise ValueError('Unknown final endpoint %s' % final_endpoint)
|
raise ValueError('Unknown final endpoint %s' % final_endpoint)
|
||||||
|
@ -156,7 +156,7 @@ def inception_v3_base(inputs,
|
|||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
|
branch_3 = slim.conv2d(branch_3, depth(32), [1, 1],
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -180,7 +180,7 @@ def inception_v3_base(inputs,
|
|||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
|
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ def inception_v3_base(inputs,
|
|||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
|
branch_3 = slim.conv2d(branch_3, depth(64), [1, 1],
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -222,7 +222,7 @@ def inception_v3_base(inputs,
|
|||||||
with tf.variable_scope('Branch_2'):
|
with tf.variable_scope('Branch_2'):
|
||||||
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
|
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
|
||||||
scope='MaxPool_1a_3x3')
|
scope='MaxPool_1a_3x3')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2])
|
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ def inception_v3_base(inputs,
|
|||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -280,7 +280,7 @@ def inception_v3_base(inputs,
|
|||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# mixed_6: 17 x 17 x 768.
|
# mixed_6: 17 x 17 x 768.
|
||||||
@ -308,7 +308,7 @@ def inception_v3_base(inputs,
|
|||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ def inception_v3_base(inputs,
|
|||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
branch_3 = slim.conv2d(branch_3, depth(192), [1, 1],
|
||||||
scope='Conv2d_0b_1x1')
|
scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ def inception_v3_base(inputs,
|
|||||||
with tf.variable_scope('Branch_2'):
|
with tf.variable_scope('Branch_2'):
|
||||||
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
|
branch_2 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
|
||||||
scope='MaxPool_1a_3x3')
|
scope='MaxPool_1a_3x3')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2])
|
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
# mixed_9: 8 x 8 x 2048.
|
# mixed_9: 8 x 8 x 2048.
|
||||||
@ -369,21 +369,31 @@ def inception_v3_base(inputs,
|
|||||||
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
|
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
|
||||||
with tf.variable_scope('Branch_1'):
|
with tf.variable_scope('Branch_1'):
|
||||||
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
|
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
|
||||||
branch_1 = tf.concat(3, [
|
branch_1 = tf.concat_v2(
|
||||||
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
|
[
|
||||||
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')])
|
slim.conv2d(
|
||||||
|
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
|
||||||
|
slim.conv2d(
|
||||||
|
branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')
|
||||||
|
],
|
||||||
|
3)
|
||||||
with tf.variable_scope('Branch_2'):
|
with tf.variable_scope('Branch_2'):
|
||||||
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
|
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
|
||||||
branch_2 = slim.conv2d(
|
branch_2 = slim.conv2d(
|
||||||
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
|
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
|
||||||
branch_2 = tf.concat(3, [
|
branch_2 = tf.concat_v2(
|
||||||
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
|
[
|
||||||
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
|
slim.conv2d(
|
||||||
|
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
|
||||||
|
slim.conv2d(
|
||||||
|
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
|
||||||
|
],
|
||||||
|
3)
|
||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(
|
branch_3 = slim.conv2d(
|
||||||
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
|
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
|
|
||||||
@ -394,21 +404,31 @@ def inception_v3_base(inputs,
|
|||||||
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
|
branch_0 = slim.conv2d(net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
|
||||||
with tf.variable_scope('Branch_1'):
|
with tf.variable_scope('Branch_1'):
|
||||||
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
|
branch_1 = slim.conv2d(net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
|
||||||
branch_1 = tf.concat(3, [
|
branch_1 = tf.concat_v2(
|
||||||
slim.conv2d(branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
|
[
|
||||||
slim.conv2d(branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')])
|
slim.conv2d(
|
||||||
|
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
|
||||||
|
slim.conv2d(
|
||||||
|
branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')
|
||||||
|
],
|
||||||
|
3)
|
||||||
with tf.variable_scope('Branch_2'):
|
with tf.variable_scope('Branch_2'):
|
||||||
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
|
branch_2 = slim.conv2d(net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
|
||||||
branch_2 = slim.conv2d(
|
branch_2 = slim.conv2d(
|
||||||
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
|
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
|
||||||
branch_2 = tf.concat(3, [
|
branch_2 = tf.concat_v2(
|
||||||
slim.conv2d(branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
|
[
|
||||||
slim.conv2d(branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')])
|
slim.conv2d(
|
||||||
|
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
|
||||||
|
slim.conv2d(
|
||||||
|
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
|
||||||
|
],
|
||||||
|
3)
|
||||||
with tf.variable_scope('Branch_3'):
|
with tf.variable_scope('Branch_3'):
|
||||||
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
|
||||||
branch_3 = slim.conv2d(
|
branch_3 = slim.conv2d(
|
||||||
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
|
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
|
||||||
net = tf.concat(3, [branch_0, branch_1, branch_2, branch_3])
|
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
|
||||||
end_points[end_point] = net
|
end_points[end_point] = net
|
||||||
if end_point == final_endpoint: return net, end_points
|
if end_point == final_endpoint: return net, end_points
|
||||||
raise ValueError('Unknown final endpoint %s' % final_endpoint)
|
raise ValueError('Unknown final endpoint %s' % final_endpoint)
|
||||||
|
@ -228,4 +228,5 @@ def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
|
|||||||
beta = tf.expand_dims(beta[:-1], 0)
|
beta = tf.expand_dims(beta[:-1], 0)
|
||||||
shape = tf.shape(matrix)
|
shape = tf.shape(matrix)
|
||||||
zero_column = tf.expand_dims(tf.zeros(shape[:1], dtype=matrix.dtype), 1)
|
zero_column = tf.expand_dims(tf.zeros(shape[:1], dtype=matrix.dtype), 1)
|
||||||
return matrix * alpha + tf.concat(1, [zero_column, matrix[:, :-1] * beta])
|
return matrix * alpha + tf.concat_v2([zero_column, matrix[:, :-1] * beta],
|
||||||
|
1)
|
||||||
|
@ -60,7 +60,7 @@ class Conc(specs_lib.Composable):
|
|||||||
|
|
||||||
def funcall(self, x):
|
def funcall(self, x):
|
||||||
outputs = [f.funcall(x) for f in self.funs]
|
outputs = [f.funcall(x) for f in self.funs]
|
||||||
return tf.concat(self.dim, outputs)
|
return tf.concat_v2(outputs, self.dim)
|
||||||
|
|
||||||
|
|
||||||
External = specs_lib.External
|
External = specs_lib.External
|
||||||
|
@ -97,7 +97,7 @@ def ParseDataTensorOrDict(data):
|
|||||||
if is_sparse:
|
if is_sparse:
|
||||||
return sparse_ops.sparse_concat(1, features), data_spec
|
return sparse_ops.sparse_concat(1, features), data_spec
|
||||||
else:
|
else:
|
||||||
return array_ops.concat(1, features), data_spec
|
return array_ops.concat_v2(features, 1), data_spec
|
||||||
else:
|
else:
|
||||||
return (data, [constants.DATA_FLOAT])
|
return (data, [constants.DATA_FLOAT])
|
||||||
|
|
||||||
@ -118,10 +118,15 @@ def ParseLabelTensorOrDict(labels):
|
|||||||
A 2-D tensor for labels/outputs.
|
A 2-D tensor for labels/outputs.
|
||||||
"""
|
"""
|
||||||
if isinstance(labels, dict):
|
if isinstance(labels, dict):
|
||||||
return math_ops.to_float(array_ops.concat(
|
return math_ops.to_float(
|
||||||
1, [sparse_ops.sparse_tensor_to_dense(labels[k], default_value=-1)
|
array_ops.concat_v2(
|
||||||
if isinstance(labels, sparse_tensor.SparseTensor)
|
[
|
||||||
else labels[k] for k in sorted(labels.keys())]))
|
sparse_ops.sparse_tensor_to_dense(
|
||||||
|
labels[k], default_value=-1) if isinstance(
|
||||||
|
labels, sparse_tensor.SparseTensor) else labels[k]
|
||||||
|
for k in sorted(labels.keys())
|
||||||
|
],
|
||||||
|
1))
|
||||||
else:
|
else:
|
||||||
if isinstance(labels, sparse_tensor.SparseTensor):
|
if isinstance(labels, sparse_tensor.SparseTensor):
|
||||||
return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
|
return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
|
||||||
|
@ -76,7 +76,7 @@ class FlattenedFullyConnectedLayer(hybrid_layer.HybridLayer):
|
|||||||
nn_activations[-1],
|
nn_activations[-1],
|
||||||
self.params.layer_size))
|
self.params.layer_size))
|
||||||
|
|
||||||
nn_activations_tensor = array_ops.concat(
|
nn_activations_tensor = array_ops.concat_v2(
|
||||||
1, nn_activations, name="flattened_nn_activations")
|
nn_activations, 1, name="flattened_nn_activations")
|
||||||
|
|
||||||
return nn_activations_tensor
|
return nn_activations_tensor
|
||||||
|
@ -335,8 +335,8 @@ class RandomForestGraphs(object):
|
|||||||
|
|
||||||
def _bag_features(self, tree_num, input_data):
|
def _bag_features(self, tree_num, input_data):
|
||||||
split_data = array_ops.split(1, self.params.num_features, input_data)
|
split_data = array_ops.split(1, self.params.num_features, input_data)
|
||||||
return array_ops.concat(
|
return array_ops.concat_v2(
|
||||||
1, [split_data[ind] for ind in self.params.bagged_features[tree_num]])
|
[split_data[ind] for ind in self.params.bagged_features[tree_num]], 1)
|
||||||
|
|
||||||
def training_graph(self,
|
def training_graph(self,
|
||||||
input_data,
|
input_data,
|
||||||
@ -808,8 +808,8 @@ class RandomTreeGraphs(object):
|
|||||||
state_ops.scatter_update(self.variables.accumulator_to_node_map,
|
state_ops.scatter_update(self.variables.accumulator_to_node_map,
|
||||||
a2n_map_updates[0], a2n_map_updates[1]))
|
a2n_map_updates[0], a2n_map_updates[1]))
|
||||||
|
|
||||||
cleared_and_allocated_accumulators = array_ops.concat(
|
cleared_and_allocated_accumulators = array_ops.concat_v2(
|
||||||
0, [accumulators_cleared, accumulators_allocated])
|
[accumulators_cleared, accumulators_allocated], 0)
|
||||||
|
|
||||||
# Calculate values to put into scatter update for candidate counts.
|
# Calculate values to put into scatter update for candidate counts.
|
||||||
# Candidate split counts are always reset back to 0 for both cleared
|
# Candidate split counts are always reset back to 0 for both cleared
|
||||||
@ -839,7 +839,7 @@ class RandomTreeGraphs(object):
|
|||||||
array_ops.zeros_like(accumulators_allocated,
|
array_ops.zeros_like(accumulators_allocated,
|
||||||
dtype=dtypes.float32), 1),
|
dtype=dtypes.float32), 1),
|
||||||
[1, self.params.num_output_columns])
|
[1, self.params.num_output_columns])
|
||||||
accumulator_updates = array_ops.concat(0, [total_cleared, total_reset])
|
accumulator_updates = array_ops.concat_v2([total_cleared, total_reset], 0)
|
||||||
updates.append(state_ops.scatter_update(
|
updates.append(state_ops.scatter_update(
|
||||||
self.variables.accumulator_sums,
|
self.variables.accumulator_sums,
|
||||||
cleared_and_allocated_accumulators, accumulator_updates))
|
cleared_and_allocated_accumulators, accumulator_updates))
|
||||||
|
@ -113,9 +113,10 @@ class TopN(object):
|
|||||||
shortlist_ids_to_remove, new_length = self.ops.top_n_remove(self.sl_ids,
|
shortlist_ids_to_remove, new_length = self.ops.top_n_remove(self.sl_ids,
|
||||||
ids)
|
ids)
|
||||||
u1 = tf.scatter_update(
|
u1 = tf.scatter_update(
|
||||||
self.sl_ids, tf.concat(0, [[0], shortlist_ids_to_remove]),
|
self.sl_ids,
|
||||||
tf.concat(0, [new_length,
|
tf.concat_v2([[0], shortlist_ids_to_remove], 0),
|
||||||
tf.ones_like(shortlist_ids_to_remove) * -1]))
|
tf.concat_v2([new_length, tf.ones_like(shortlist_ids_to_remove) * -1],
|
||||||
|
0))
|
||||||
u2 = tf.scatter_update(
|
u2 = tf.scatter_update(
|
||||||
self.sl_scores,
|
self.sl_scores,
|
||||||
shortlist_ids_to_remove,
|
shortlist_ids_to_remove,
|
||||||
@ -133,9 +134,9 @@ class TopN(object):
|
|||||||
new_length = tf.reduce_sum(
|
new_length = tf.reduce_sum(
|
||||||
tf.to_int32(tf.greater(new_scores, tf.float32.min)))
|
tf.to_int32(tf.greater(new_scores, tf.float32.min)))
|
||||||
u1 = self.sl_ids.assign(
|
u1 = self.sl_ids.assign(
|
||||||
tf.to_int64(tf.concat(0, [[new_length], new_ids])))
|
tf.to_int64(tf.concat_v2([[new_length], new_ids], 0)))
|
||||||
u2 = self.sl_scores.assign(
|
u2 = self.sl_scores.assign(
|
||||||
tf.concat(0, [[smallest_new_score], new_scores]))
|
tf.concat_v2([[smallest_new_score], new_scores], 0))
|
||||||
self.last_ops = [u1, u2]
|
self.last_ops = [u1, u2]
|
||||||
return tf.group(u1, u2)
|
return tf.group(u1, u2)
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ def resample_at_rate(inputs, rates, scope=None, seed=None, back_prop=False):
|
|||||||
# concatenating zero-size TensorArrays" limitation:
|
# concatenating zero-size TensorArrays" limitation:
|
||||||
def _empty_tensor_like(t):
|
def _empty_tensor_like(t):
|
||||||
result = array_ops.zeros(
|
result = array_ops.zeros(
|
||||||
shape=(array_ops.concat(0, [[0], array_ops.shape(t)[1:]])),
|
shape=(array_ops.concat_v2([[0], array_ops.shape(t)[1:]], 0)),
|
||||||
dtype=t.dtype)
|
dtype=t.dtype)
|
||||||
if t.get_shape().ndims is not None:
|
if t.get_shape().ndims is not None:
|
||||||
# preserve known shapes
|
# preserve known shapes
|
||||||
|
@ -1065,38 +1065,52 @@ class SequenceQueueingStateSaver(object):
|
|||||||
":",
|
":",
|
||||||
self._key],
|
self._key],
|
||||||
name="StringJoinCurrentKeys")
|
name="StringJoinCurrentKeys")
|
||||||
next_keys = array_ops.concat(
|
next_keys = array_ops.concat_v2(
|
||||||
0, [array_ops.slice(current_keys, [1], [-1]),
|
[
|
||||||
array_ops.expand_dims(string_ops.string_join(
|
array_ops.slice(current_keys, [1], [-1]), array_ops.expand_dims(
|
||||||
["STOP:", self._key], name="StringJoinStop"), 0)],
|
string_ops.string_join(
|
||||||
|
["STOP:", self._key], name="StringJoinStop"),
|
||||||
|
0)
|
||||||
|
],
|
||||||
|
0,
|
||||||
name="concat_next_keys")
|
name="concat_next_keys")
|
||||||
reshaped_sequences = collections.OrderedDict(
|
reshaped_sequences = collections.OrderedDict((
|
||||||
(k, _check_dimensions(
|
k,
|
||||||
|
_check_dimensions(
|
||||||
# Reshape sequences to sequence_count rows
|
# Reshape sequences to sequence_count rows
|
||||||
array_ops.reshape(
|
array_ops.reshape(
|
||||||
v, array_ops.concat(
|
v,
|
||||||
0, [array_ops.expand_dims(sequence_count, 0),
|
array_ops.concat_v2(
|
||||||
|
[
|
||||||
|
array_ops.expand_dims(sequence_count, 0),
|
||||||
array_ops.expand_dims(self._num_unroll, 0),
|
array_ops.expand_dims(self._num_unroll, 0),
|
||||||
v.get_shape().as_list()[1:]],
|
v.get_shape().as_list()[1:]
|
||||||
|
],
|
||||||
|
0,
|
||||||
name="concat_sequences_%s" % k),
|
name="concat_sequences_%s" % k),
|
||||||
name="reshape_sequences_%s" % k),
|
name="reshape_sequences_%s" % k),
|
||||||
[0, 1] + list(range(2, v.get_shape().ndims + 1)),
|
[0, 1] + list(range(2, v.get_shape().ndims + 1)),
|
||||||
[sequence_count, self._num_unroll] + v.get_shape().as_list()[1:],
|
[sequence_count, self._num_unroll] + v.get_shape().as_list()[1:],
|
||||||
debug_prefix="reshaped_sequences_%s" % k))
|
debug_prefix="reshaped_sequences_%s" %
|
||||||
for k, v in self._sorted_sequences.items())
|
k)) for k, v in self._sorted_sequences.items())
|
||||||
expanded_context = collections.OrderedDict(
|
expanded_context = collections.OrderedDict(
|
||||||
(k, _check_dimensions(
|
(
|
||||||
# Copy context to be sequence_count rows
|
k,
|
||||||
array_ops.tile(
|
_check_dimensions(
|
||||||
array_ops.expand_dims(v, 0),
|
# Copy context to be sequence_count rows
|
||||||
array_ops.concat(
|
array_ops.tile(
|
||||||
0, [array_ops.expand_dims(sequence_count, 0),
|
array_ops.expand_dims(v, 0),
|
||||||
[1] * v.get_shape().ndims],
|
array_ops.concat_v2(
|
||||||
name="concat_context_%s" % k),
|
[
|
||||||
name="tile_context_%s" % k),
|
array_ops.expand_dims(sequence_count, 0),
|
||||||
[0] + list(range(1, v.get_shape().ndims + 1)),
|
[1] * v.get_shape().ndims
|
||||||
[sequence_count] + v.get_shape().as_list(),
|
],
|
||||||
debug_prefix="expanded_context_%s" % k))
|
0,
|
||||||
|
name="concat_context_%s" % k),
|
||||||
|
name="tile_context_%s" % k),
|
||||||
|
[0] + list(range(1, v.get_shape().ndims + 1)),
|
||||||
|
[sequence_count] + v.get_shape().as_list(),
|
||||||
|
debug_prefix="expanded_context_%s" % k))
|
||||||
for k, v in self._sorted_context.items())
|
for k, v in self._sorted_context.items())
|
||||||
|
|
||||||
# Storing into the barrier, for each current_key:
|
# Storing into the barrier, for each current_key:
|
||||||
@ -1474,12 +1488,12 @@ def _padding(sequences, num_unroll):
|
|||||||
# the shape of the paddings that we concat with the original value will be
|
# the shape of the paddings that we concat with the original value will be
|
||||||
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
|
# [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ...,
|
||||||
# tf.shape(value)[tf.rank(value) - 1])]
|
# tf.shape(value)[tf.rank(value) - 1])]
|
||||||
padding_shape = array_ops.concat(0, (
|
padding_shape = array_ops.concat_v2((num_paddings,
|
||||||
num_paddings, array_ops.shape(value)[1:]))
|
array_ops.shape(value)[1:]), 0)
|
||||||
# 2. fill padding shape with dummies
|
# 2. fill padding shape with dummies
|
||||||
dummy = array_ops.constant("" if value.dtype == dtypes.string else 0,
|
dummy = array_ops.constant("" if value.dtype == dtypes.string else 0,
|
||||||
dtype=value.dtype)
|
dtype=value.dtype)
|
||||||
paddings = array_ops.fill(dims=padding_shape, value=dummy)
|
paddings = array_ops.fill(dims=padding_shape, value=dummy)
|
||||||
# 3. concat values with paddings
|
# 3. concat values with paddings
|
||||||
padded_sequences[key] = array_ops.concat(0, [value, paddings])
|
padded_sequences[key] = array_ops.concat_v2([value, paddings], 0)
|
||||||
return length, padded_sequences
|
return length, padded_sequences
|
||||||
|
@ -573,10 +573,10 @@
|
|||||||
" with tf.control_dependencies([saved_output.assign(output),\n",
|
" with tf.control_dependencies([saved_output.assign(output),\n",
|
||||||
" saved_state.assign(state)]):\n",
|
" saved_state.assign(state)]):\n",
|
||||||
" # Classifier.\n",
|
" # Classifier.\n",
|
||||||
" logits = tf.nn.xw_plus_b(tf.concat(0, outputs), w, b)\n",
|
" logits = tf.nn.xw_plus_b(tf.concat_v2(outputs, 0), w, b)\n",
|
||||||
" loss = tf.reduce_mean(\n",
|
" loss = tf.reduce_mean(\n",
|
||||||
" tf.nn.softmax_cross_entropy_with_logits(\n",
|
" tf.nn.softmax_cross_entropy_with_logits(\n",
|
||||||
" logits, tf.concat(0, train_labels)))\n",
|
" logits, tf.concat_v2(train_labels, 0)))\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Optimizer.\n",
|
" # Optimizer.\n",
|
||||||
" global_step = tf.Variable(0)\n",
|
" global_step = tf.Variable(0)\n",
|
||||||
@ -1066,4 +1066,4 @@
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ def average_gradients(tower_grads):
|
|||||||
grads.append(expanded_g)
|
grads.append(expanded_g)
|
||||||
|
|
||||||
# Average over the 'tower' dimension.
|
# Average over the 'tower' dimension.
|
||||||
grad = tf.concat(0, grads)
|
grad = tf.concat_v2(grads, 0)
|
||||||
grad = tf.reduce_mean(grad, 0)
|
grad = tf.reduce_mean(grad, 0)
|
||||||
|
|
||||||
# Keep in mind that the Variables are redundant because they are shared
|
# Keep in mind that the Variables are redundant because they are shared
|
||||||
|
@ -141,7 +141,7 @@ class PTBModel(object):
|
|||||||
(cell_output, state) = cell(inputs[:, time_step, :], state)
|
(cell_output, state) = cell(inputs[:, time_step, :], state)
|
||||||
outputs.append(cell_output)
|
outputs.append(cell_output)
|
||||||
|
|
||||||
output = tf.reshape(tf.concat(1, outputs), [-1, size])
|
output = tf.reshape(tf.concat_v2(outputs, 1), [-1, size])
|
||||||
softmax_w = tf.get_variable(
|
softmax_w = tf.get_variable(
|
||||||
"softmax_w", [size, vocab_size], dtype=data_type())
|
"softmax_w", [size, vocab_size], dtype=data_type())
|
||||||
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
|
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
|
||||||
|
@ -637,7 +637,7 @@ class UnrollLSTMTest(tf.test.TestCase):
|
|||||||
# Helper to construct a LSTM cell graph.
|
# Helper to construct a LSTM cell graph.
|
||||||
@classmethod
|
@classmethod
|
||||||
def LSTMCell(cls, x, mprev, cprev, weights):
|
def LSTMCell(cls, x, mprev, cprev, weights):
|
||||||
xm = tf.concat(1, [x, mprev])
|
xm = tf.concat_v2([x, mprev], 1)
|
||||||
i_i, i_g, f_g, o_g = tf.split(1, 4, tf.matmul(xm, weights))
|
i_i, i_g, f_g, o_g = tf.split(1, 4, tf.matmul(xm, weights))
|
||||||
new_c = tf.sigmoid(f_g) * cprev + tf.sigmoid(i_g) * tf.tanh(i_i)
|
new_c = tf.sigmoid(f_g) * cprev + tf.sigmoid(i_g) * tf.tanh(i_i)
|
||||||
new_c = tf.clip_by_value(new_c, -50.0, 50.0)
|
new_c = tf.clip_by_value(new_c, -50.0, 50.0)
|
||||||
|
@ -553,7 +553,7 @@ class ConstantValueTest(tf.test.TestCase):
|
|||||||
self.assertAllClose(np_val, tf.contrib.util.constant_value(tf_val))
|
self.assertAllClose(np_val, tf.contrib.util.constant_value(tf_val))
|
||||||
|
|
||||||
def testUnknown(self):
|
def testUnknown(self):
|
||||||
tf_val = gen_state_ops._variable(shape=[3, 4, 7], dtype=tf.float32,
|
tf_val = gen_state_ops._variable(shape=[3, 4, 7], dtype=tf.float32,
|
||||||
name="tf_val", container="", shared_name="")
|
name="tf_val", container="", shared_name="")
|
||||||
self.assertIs(None, tf.contrib.util.constant_value(tf_val))
|
self.assertIs(None, tf.contrib.util.constant_value(tf_val))
|
||||||
|
|
||||||
@ -607,21 +607,18 @@ class ConstantValueTest(tf.test.TestCase):
|
|||||||
|
|
||||||
def testConcat(self):
|
def testConcat(self):
|
||||||
np_val = np.random.rand(3, 4, 7).astype(np.float32)
|
np_val = np.random.rand(3, 4, 7).astype(np.float32)
|
||||||
tf_val = tf.concat(
|
tf_val = tf.concat_v2(
|
||||||
0, [np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]])
|
[np_val[0:1, :, :], np_val[1:2, :, :], np_val[2:3, :, :]], 0)
|
||||||
c_val = tf.contrib.util.constant_value(tf_val)
|
c_val = tf.contrib.util.constant_value(tf_val)
|
||||||
self.assertAllClose(np_val, c_val)
|
self.assertAllClose(np_val, c_val)
|
||||||
|
|
||||||
tf_val = tf.concat(
|
tf_val = tf.concat_v2([np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]],
|
||||||
tf.placeholder(tf.int32),
|
tf.placeholder(tf.int32))
|
||||||
[np_val[0, :, :], np_val[1, :, :], np_val[2, :, :]])
|
|
||||||
c_val = tf.contrib.util.constant_value(tf_val)
|
c_val = tf.contrib.util.constant_value(tf_val)
|
||||||
self.assertIs(None, c_val)
|
self.assertIs(None, c_val)
|
||||||
|
|
||||||
tf_val = tf.concat(
|
tf_val = tf.concat_v2(
|
||||||
1,
|
[np_val[0, :, :], tf.placeholder(tf.float32), np_val[2, :, :]], 1)
|
||||||
[np_val[0, :, :], tf.placeholder(tf.float32),
|
|
||||||
np_val[2, :, :]])
|
|
||||||
c_val = tf.contrib.util.constant_value(tf_val)
|
c_val = tf.contrib.util.constant_value(tf_val)
|
||||||
self.assertIs(None, c_val)
|
self.assertIs(None, c_val)
|
||||||
|
|
||||||
@ -660,12 +657,13 @@ class ConstantValueAsShapeTest(tf.test.TestCase):
|
|||||||
self.assertEqual([16, 37, None], c_val.as_list())
|
self.assertEqual([16, 37, None], c_val.as_list())
|
||||||
|
|
||||||
def testConcat(self):
|
def testConcat(self):
|
||||||
tf_val = tf.concat(0, [[16, 37], tf.placeholder(tf.int32, shape=(2,))])
|
tf_val = tf.concat_v2([[16, 37], tf.placeholder(tf.int32, shape=(2,))], 0)
|
||||||
c_val = tensor_util.constant_value_as_shape(tf_val)
|
c_val = tensor_util.constant_value_as_shape(tf_val)
|
||||||
self.assertEqual([16, 37, None, None], c_val.as_list())
|
self.assertEqual([16, 37, None, None], c_val.as_list())
|
||||||
|
|
||||||
tf_val = tf.concat(0,
|
tf_val = tf.concat_v2(
|
||||||
[[16, 37], tf.placeholder(tf.int32, shape=(1,)), [48]])
|
[[16, 37], tf.placeholder(
|
||||||
|
tf.int32, shape=(1,)), [48]], 0)
|
||||||
c_val = tensor_util.constant_value_as_shape(tf_val)
|
c_val = tensor_util.constant_value_as_shape(tf_val)
|
||||||
self.assertEqual([16, 37, None, 48], c_val.as_list())
|
self.assertEqual([16, 37, None, 48], c_val.as_list())
|
||||||
|
|
||||||
|
@ -63,11 +63,13 @@ class ConfusionMatrixTest(tf.test.TestCase):
|
|||||||
neg = tf.random_normal([20], mean=m_neg, stddev=s, dtype=tf.float32)
|
neg = tf.random_normal([20], mean=m_neg, stddev=s, dtype=tf.float32)
|
||||||
pos = tf.random_normal([20], mean=m_pos, stddev=s, dtype=tf.float32)
|
pos = tf.random_normal([20], mean=m_pos, stddev=s, dtype=tf.float32)
|
||||||
|
|
||||||
data = tf.concat(0, [neg, pos])
|
data = tf.concat_v2([neg, pos], 0)
|
||||||
data = tf.cast(tf.round(data), tf_dtype)
|
data = tf.cast(tf.round(data), tf_dtype)
|
||||||
data = tf.minimum(tf.maximum(data, 0), 1)
|
data = tf.minimum(tf.maximum(data, 0), 1)
|
||||||
lab = tf.concat(0, [tf.zeros([20], dtype=tf_dtype),
|
lab = tf.concat_v2(
|
||||||
tf.ones([20], dtype=tf_dtype)])
|
[tf.zeros(
|
||||||
|
[20], dtype=tf_dtype), tf.ones(
|
||||||
|
[20], dtype=tf_dtype)], 0)
|
||||||
|
|
||||||
cm = tf.confusion_matrix(
|
cm = tf.confusion_matrix(
|
||||||
lab, data, dtype=tf_dtype, num_classes=2)
|
lab, data, dtype=tf_dtype, num_classes=2)
|
||||||
|
@ -646,7 +646,7 @@ class ControlFlowTest(tf.test.TestCase):
|
|||||||
def compute(i, c, o):
|
def compute(i, c, o):
|
||||||
c = tf.strided_slice(x, tf.expand_dims(i, 0),
|
c = tf.strided_slice(x, tf.expand_dims(i, 0),
|
||||||
[1] + tf.expand_dims(i, 0))
|
[1] + tf.expand_dims(i, 0))
|
||||||
o = tf.concat(0, [o, c])
|
o = tf.concat_v2([o, c], 0)
|
||||||
i = tf.add(i, 1)
|
i = tf.add(i, 1)
|
||||||
return [i, c, o]
|
return [i, c, o]
|
||||||
|
|
||||||
@ -726,7 +726,7 @@ class ControlFlowTest(tf.test.TestCase):
|
|||||||
c = lambda i, j: tf.less(i, 2)
|
c = lambda i, j: tf.less(i, 2)
|
||||||
def b(i, j):
|
def b(i, j):
|
||||||
new_i = tf.add(i, 1)
|
new_i = tf.add(i, 1)
|
||||||
new_j = tf.concat(0, [j, j])
|
new_j = tf.concat_v2([j, j], 0)
|
||||||
return [new_i, new_j]
|
return [new_i, new_j]
|
||||||
r = tf.while_loop(c, b, [i, m],
|
r = tf.while_loop(c, b, [i, m],
|
||||||
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
|
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
|
||||||
@ -1626,7 +1626,7 @@ class ControlFlowTest(tf.test.TestCase):
|
|||||||
return i < 2
|
return i < 2
|
||||||
|
|
||||||
def body(i, h):
|
def body(i, h):
|
||||||
return i+1, tf.concat(0, [h, x])
|
return i + 1, tf.concat_v2([h, x], 0)
|
||||||
|
|
||||||
_, h = tf.while_loop(
|
_, h = tf.while_loop(
|
||||||
condition, body, [i0, h0],
|
condition, body, [i0, h0],
|
||||||
|
@ -146,7 +146,7 @@ def _EmbeddingParamsAsPartitionedVariable(num_shards, vocab_size,
|
|||||||
partitioned_variable = tf.get_variable(
|
partitioned_variable = tf.get_variable(
|
||||||
"p",
|
"p",
|
||||||
shape=[vocab_size] + shape,
|
shape=[vocab_size] + shape,
|
||||||
initializer=tf.concat(0, [params[p_i.name] for p_i in p]),
|
initializer=tf.concat_v2([params[p_i.name] for p_i in p], 0),
|
||||||
partitioner=tf.min_max_variable_partitioner(
|
partitioner=tf.min_max_variable_partitioner(
|
||||||
max_partitions=num_shards, min_slice_size=1))
|
max_partitions=num_shards, min_slice_size=1))
|
||||||
return p, partitioned_variable, params, feed_dict
|
return p, partitioned_variable, params, feed_dict
|
||||||
|
@ -28,7 +28,7 @@ class LargeConcatOpTest(tf.test.TestCase):
|
|||||||
with tf.device("/cpu:0"):
|
with tf.device("/cpu:0"):
|
||||||
a = tf.ones([2**31 + 6], dtype=tf.int8)
|
a = tf.ones([2**31 + 6], dtype=tf.int8)
|
||||||
b = tf.zeros([1024], dtype=tf.int8)
|
b = tf.zeros([1024], dtype=tf.int8)
|
||||||
onezeros = tf.concat(0, [a, b])
|
onezeros = tf.concat_v2([a, b], 0)
|
||||||
with self.test_session(use_gpu=False):
|
with self.test_session(use_gpu=False):
|
||||||
# TODO(dga): Add more depth to this test to validate correctness,
|
# TODO(dga): Add more depth to this test to validate correctness,
|
||||||
# not just non-crashingness, once other large tensor fixes have gone in.
|
# not just non-crashingness, once other large tensor fixes have gone in.
|
||||||
|
@ -3300,12 +3300,14 @@ class MeanIOUTest(tf.test.TestCase):
|
|||||||
self.assertAlmostEqual(desired_output, miou.eval())
|
self.assertAlmostEqual(desired_output, miou.eval())
|
||||||
|
|
||||||
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
|
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
|
||||||
predictions = tf.concat(0,
|
predictions = tf.concat_v2(
|
||||||
[tf.constant(0, shape=[5]),
|
[tf.constant(
|
||||||
tf.constant(1, shape=[5])])
|
0, shape=[5]), tf.constant(
|
||||||
labels = tf.concat(0,
|
1, shape=[5])], 0)
|
||||||
[tf.constant(0, shape=[3]),
|
labels = tf.concat_v2(
|
||||||
tf.constant(1, shape=[7])])
|
[tf.constant(
|
||||||
|
0, shape=[3]), tf.constant(
|
||||||
|
1, shape=[7])], 0)
|
||||||
num_classes = 2
|
num_classes = 2
|
||||||
with self.test_session() as sess:
|
with self.test_session() as sess:
|
||||||
miou, update_op = metrics.mean_iou(
|
miou, update_op = metrics.mean_iou(
|
||||||
@ -3339,14 +3341,23 @@ class MeanIOUTest(tf.test.TestCase):
|
|||||||
self.assertEqual(0., miou.eval())
|
self.assertEqual(0., miou.eval())
|
||||||
|
|
||||||
def testResultsWithSomeMissing(self):
|
def testResultsWithSomeMissing(self):
|
||||||
predictions = tf.concat(0, [tf.constant(0, shape=[5]),
|
predictions = tf.concat_v2(
|
||||||
tf.constant(1, shape=[5])])
|
[tf.constant(
|
||||||
labels = tf.concat(0, [tf.constant(0, shape=[3]),
|
0, shape=[5]), tf.constant(
|
||||||
tf.constant(1, shape=[7])])
|
1, shape=[5])], 0)
|
||||||
|
labels = tf.concat_v2(
|
||||||
|
[tf.constant(
|
||||||
|
0, shape=[3]), tf.constant(
|
||||||
|
1, shape=[7])], 0)
|
||||||
num_classes = 2
|
num_classes = 2
|
||||||
weights = tf.concat(0, [tf.constant(0, shape=[1]),
|
weights = tf.concat_v2(
|
||||||
tf.constant(1, shape=[8]),
|
[
|
||||||
tf.constant(0, shape=[1])])
|
tf.constant(
|
||||||
|
0, shape=[1]), tf.constant(
|
||||||
|
1, shape=[8]), tf.constant(
|
||||||
|
0, shape=[1])
|
||||||
|
],
|
||||||
|
0)
|
||||||
with self.test_session() as sess:
|
with self.test_session() as sess:
|
||||||
miou, update_op = metrics.mean_iou(
|
miou, update_op = metrics.mean_iou(
|
||||||
labels, predictions, num_classes, weights=weights)
|
labels, predictions, num_classes, weights=weights)
|
||||||
|
@ -250,7 +250,7 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
|
|||||||
rnd_par = tf.constant([1, 2, 3, 4])
|
rnd_par = tf.constant([1, 2, 3, 4])
|
||||||
vs = tf.create_partitioned_variables([4], [4], rnd_par)
|
vs = tf.create_partitioned_variables([4], [4], rnd_par)
|
||||||
tf.global_variables_initializer().run()
|
tf.global_variables_initializer().run()
|
||||||
val = tf.concat(0, vs).eval()
|
val = tf.concat_v2(vs, 0).eval()
|
||||||
rnd = rnd_par.eval()
|
rnd = rnd_par.eval()
|
||||||
self.assertAllClose(rnd, val)
|
self.assertAllClose(rnd, val)
|
||||||
self.assertEqual([tf.int32] * 4, [v.dtype.base_dtype for v in vs])
|
self.assertEqual([tf.int32] * 4, [v.dtype.base_dtype for v in vs])
|
||||||
@ -261,7 +261,7 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
|
|||||||
rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
|
rnd_par = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
|
||||||
vs = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
|
vs = tf.create_partitioned_variables([2, 4], [1, 2], rnd_par)
|
||||||
tf.global_variables_initializer().run()
|
tf.global_variables_initializer().run()
|
||||||
val = tf.concat(1, vs).eval()
|
val = tf.concat_v2(vs, 1).eval()
|
||||||
rnd = rnd_par.eval()
|
rnd = rnd_par.eval()
|
||||||
self.assertAllClose(rnd, val)
|
self.assertAllClose(rnd, val)
|
||||||
self.assertEqual([tf.int32] * 2, [v.dtype.base_dtype for v in vs])
|
self.assertEqual([tf.int32] * 2, [v.dtype.base_dtype for v in vs])
|
||||||
@ -323,7 +323,7 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
|
|||||||
vs = tf.create_partitioned_variables(
|
vs = tf.create_partitioned_variables(
|
||||||
rnd.get_shape(), [1, 10], rnd.initialized_value())
|
rnd.get_shape(), [1, 10], rnd.initialized_value())
|
||||||
tf.global_variables_initializer().run()
|
tf.global_variables_initializer().run()
|
||||||
val = tf.concat(1, vs).eval()
|
val = tf.concat_v2(vs, 1).eval()
|
||||||
rnd = rnd.eval()
|
rnd = rnd.eval()
|
||||||
self.assertAllClose(rnd, val)
|
self.assertAllClose(rnd, val)
|
||||||
self.assertEqual([tf.float32] * 10, [v.dtype.base_dtype for v in vs])
|
self.assertEqual([tf.float32] * 10, [v.dtype.base_dtype for v in vs])
|
||||||
@ -372,7 +372,7 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
|
|||||||
"20 43 0,20:27,8",
|
"20 43 0,20:27,8",
|
||||||
"20 43 0,20:35,8"]]
|
"20 43 0,20:35,8"]]
|
||||||
for i, vs in enumerate(var_lists):
|
for i, vs in enumerate(var_lists):
|
||||||
var_val = tf.concat(1, vs).eval()
|
var_val = tf.concat_v2(vs, 1).eval()
|
||||||
self.assertAllClose(rnd_val, var_val)
|
self.assertAllClose(rnd_val, var_val)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[tf.float64] * len(vs), [v.dtype.base_dtype for v in vs])
|
[tf.float64] * len(vs), [v.dtype.base_dtype for v in vs])
|
||||||
@ -385,7 +385,7 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
|
|||||||
vs = tf.create_partitioned_variables(
|
vs = tf.create_partitioned_variables(
|
||||||
rnd.get_shape(), [1, 1], rnd.initialized_value())
|
rnd.get_shape(), [1, 1], rnd.initialized_value())
|
||||||
tf.global_variables_initializer().run()
|
tf.global_variables_initializer().run()
|
||||||
val = tf.concat(0, vs).eval()
|
val = tf.concat_v2(vs, 0).eval()
|
||||||
rnd = rnd.eval()
|
rnd = rnd.eval()
|
||||||
self.assertAllClose(rnd, val)
|
self.assertAllClose(rnd, val)
|
||||||
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
|
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
|
||||||
@ -396,7 +396,7 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
|
|||||||
vs = tf.create_partitioned_variables(
|
vs = tf.create_partitioned_variables(
|
||||||
rnd.get_shape(), [10, 1], rnd.initialized_value())
|
rnd.get_shape(), [10, 1], rnd.initialized_value())
|
||||||
tf.global_variables_initializer().run()
|
tf.global_variables_initializer().run()
|
||||||
val = tf.concat(0, vs).eval()
|
val = tf.concat_v2(vs, 0).eval()
|
||||||
rnd = rnd.eval()
|
rnd = rnd.eval()
|
||||||
self.assertAllClose(rnd, val)
|
self.assertAllClose(rnd, val)
|
||||||
self._TestSaveSpec(vs, ["10 43 0,1:0,43",
|
self._TestSaveSpec(vs, ["10 43 0,1:0,43",
|
||||||
@ -420,7 +420,7 @@ class PartitionedVariablesTestCase(tf.test.TestCase):
|
|||||||
slice0 = _IotaInitializer([5, 5])
|
slice0 = _IotaInitializer([5, 5])
|
||||||
slice1 = _IotaInitializer([4, 5])
|
slice1 = _IotaInitializer([4, 5])
|
||||||
slice2 = _IotaInitializer([4, 5])
|
slice2 = _IotaInitializer([4, 5])
|
||||||
val = tf.concat(0, vs).eval()
|
val = tf.concat_v2(vs, 0).eval()
|
||||||
self.assertAllClose(slice0 + slice1 + slice2, val)
|
self.assertAllClose(slice0 + slice1 + slice2, val)
|
||||||
self._TestSaveSpec(vs, ["13 5 0,5:0,5",
|
self._TestSaveSpec(vs, ["13 5 0,5:0,5",
|
||||||
"13 5 5,4:0,5",
|
"13 5 5,4:0,5",
|
||||||
|
@ -127,9 +127,11 @@ class ReshapeTest(tf.test.TestCase):
|
|||||||
y = tf.reshape(x, [tf.placeholder(tf.int32), 37])
|
y = tf.reshape(x, [tf.placeholder(tf.int32), 37])
|
||||||
self.assertEqual([None, 37], y.get_shape().as_list())
|
self.assertEqual([None, 37], y.get_shape().as_list())
|
||||||
|
|
||||||
# Unknown input shape, partial new shape using `tf.concat()`.
|
# Unknown input shape, partial new shape using `tf.concat_v2()`.
|
||||||
y = tf.reshape(x, tf.concat(0, [tf.placeholder(tf.int32, shape=(2,)),
|
y = tf.reshape(
|
||||||
[37, 42]]))
|
x, tf.concat_v2(
|
||||||
|
[tf.placeholder(
|
||||||
|
tf.int32, shape=(2,)), [37, 42]], 0))
|
||||||
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
|
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
|
||||||
|
|
||||||
# Unknown input shape, partial new shape using `tf.shape()`.
|
# Unknown input shape, partial new shape using `tf.shape()`.
|
||||||
|
@ -530,7 +530,7 @@ class BidirectionalRNNTest(tf.test.TestCase):
|
|||||||
dtype=tf.float32,
|
dtype=tf.float32,
|
||||||
time_major=use_time_major,
|
time_major=use_time_major,
|
||||||
scope=scope)
|
scope=scope)
|
||||||
outputs = tf.concat(2, outputs)
|
outputs = tf.concat_v2(outputs, 2)
|
||||||
state_fw, state_bw = states
|
state_fw, state_bw = states
|
||||||
outputs_shape = [None, max_length, 2 * num_units]
|
outputs_shape = [None, max_length, 2 * num_units]
|
||||||
if use_shape:
|
if use_shape:
|
||||||
|
@ -81,10 +81,10 @@ def _GetSvdOpTest(dtype_, shape_, use_static_shape_):
|
|||||||
if full_matrices:
|
if full_matrices:
|
||||||
if m > n:
|
if m > n:
|
||||||
zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
|
zeros = tf.zeros(batch_shape + (m - n, n), dtype=dtype_)
|
||||||
diag_s = tf.concat(a.ndim - 2, [diag_s, zeros])
|
diag_s = tf.concat_v2([diag_s, zeros], a.ndim - 2)
|
||||||
elif n > m:
|
elif n > m:
|
||||||
zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
|
zeros = tf.zeros(batch_shape + (m, n - m), dtype=dtype_)
|
||||||
diag_s = tf.concat(a.ndim - 1, [diag_s, zeros])
|
diag_s = tf.concat_v2([diag_s, zeros], a.ndim - 1)
|
||||||
a_recon = tf.matmul(u, diag_s)
|
a_recon = tf.matmul(u, diag_s)
|
||||||
a_recon = tf.matmul(a_recon, v, adjoint_b=True)
|
a_recon = tf.matmul(a_recon, v, adjoint_b=True)
|
||||||
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
|
self.assertAllClose(a_recon.eval(), a, rtol=tol, atol=tol)
|
||||||
|
@ -60,7 +60,7 @@ def build_graph(device, input_shape, variable, num_inputs, axis, grad):
|
|||||||
]) for _ in range(num_inputs)
|
]) for _ in range(num_inputs)
|
||||||
]
|
]
|
||||||
|
|
||||||
outputs = [tf.concat(axis, inputs) for _ in range(100)]
|
outputs = [tf.concat_v2(inputs, axis) for _ in range(100)]
|
||||||
if grad:
|
if grad:
|
||||||
return tf.group(*list(
|
return tf.group(*list(
|
||||||
itertools.chain.from_iterable(
|
itertools.chain.from_iterable(
|
||||||
|
@ -2286,7 +2286,7 @@ class WhileContext(ControlFlowContext):
|
|||||||
if self.outer_context: self.outer_context.Exit()
|
if self.outer_context: self.outer_context.Exit()
|
||||||
else:
|
else:
|
||||||
values_shape = array_ops.shape_internal(op.inputs[0], optimize=False)[1:]
|
values_shape = array_ops.shape_internal(op.inputs[0], optimize=False)[1:]
|
||||||
values_shape = array_ops.concat(0, [[1], values_shape])
|
values_shape = array_ops.concat_v2([[1], values_shape], 0)
|
||||||
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
|
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
|
||||||
indices_acc = constant_op.constant([0], indices.dtype)
|
indices_acc = constant_op.constant([0], indices.dtype)
|
||||||
shape_acc = None
|
shape_acc = None
|
||||||
@ -2317,8 +2317,10 @@ class WhileContext(ControlFlowContext):
|
|||||||
switch_acc = [switch(x, self._pivot) for x in merge_acc]
|
switch_acc = [switch(x, self._pivot) for x in merge_acc]
|
||||||
|
|
||||||
# The actual accumulation.
|
# The actual accumulation.
|
||||||
acc_indexed_slices = [array_ops.concat(0, [xa[1], xv])
|
acc_indexed_slices = [
|
||||||
for xa, xv in zip(switch_acc[:2], [indices, values])]
|
array_ops.concat_v2([xa[1], xv], 0)
|
||||||
|
for xa, xv in zip(switch_acc[:2], [indices, values])
|
||||||
|
]
|
||||||
if shape_acc is not None:
|
if shape_acc is not None:
|
||||||
# For the shape we just keep the maximum
|
# For the shape we just keep the maximum
|
||||||
acc_indexed_slices.append(
|
acc_indexed_slices.append(
|
||||||
@ -2600,7 +2602,7 @@ def while_loop(cond, body, loop_vars, shape_invariants=None,
|
|||||||
i0 = tf.constant(0)
|
i0 = tf.constant(0)
|
||||||
m0 = tf.ones([2, 2])
|
m0 = tf.ones([2, 2])
|
||||||
c = lambda i, m: i < 10
|
c = lambda i, m: i < 10
|
||||||
b = lambda i, m: [i+1, tf.concat(0, [m, m])]
|
b = lambda i, m: [i+1, tf.concat_v2(0, [m, m])]
|
||||||
tf.while_loop(
|
tf.while_loop(
|
||||||
c, b, loop_vars=[i0, m0],
|
c, b, loop_vars=[i0, m0],
|
||||||
shape_invariants=[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
|
shape_invariants=[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
|
||||||
|
@ -179,15 +179,19 @@ def embedding_lookup(params, ids, partition_strategy="mod", name=None,
|
|||||||
for p in params[1:]:
|
for p in params[1:]:
|
||||||
element_shape = element_shape.merge_with(p.get_shape()[1:])
|
element_shape = element_shape.merge_with(p.get_shape()[1:])
|
||||||
if element_shape.is_fully_defined():
|
if element_shape.is_fully_defined():
|
||||||
ret = array_ops.reshape(ret, array_ops.concat(0, [
|
ret = array_ops.reshape(ret,
|
||||||
array_ops.shape(ids), element_shape]))
|
array_ops.concat_v2(
|
||||||
|
[array_ops.shape(ids), element_shape], 0))
|
||||||
else:
|
else:
|
||||||
# It's important that we compute params[0].shape on the right device
|
# It's important that we compute params[0].shape on the right device
|
||||||
# to avoid data motion.
|
# to avoid data motion.
|
||||||
with ops.colocate_with(params[0]):
|
with ops.colocate_with(params[0]):
|
||||||
params_shape = array_ops.shape(params[0])
|
params_shape = array_ops.shape(params[0])
|
||||||
ret = array_ops.reshape(ret, array_ops.concat(0, [
|
ret = array_ops.reshape(ret,
|
||||||
array_ops.shape(ids), array_ops.slice(params_shape, [1], [-1])]))
|
array_ops.concat_v2([
|
||||||
|
array_ops.shape(ids),
|
||||||
|
array_ops.slice(params_shape, [1], [-1])
|
||||||
|
], 0))
|
||||||
# output shape = ids.shape + params[*].shape[1:]
|
# output shape = ids.shape + params[*].shape[1:]
|
||||||
# Normally the reshape is sufficient, but setting shape explicitly
|
# Normally the reshape is sufficient, but setting shape explicitly
|
||||||
# teaches shape inference that params[1:].get_shape() matters.
|
# teaches shape inference that params[1:].get_shape() matters.
|
||||||
@ -315,8 +319,8 @@ def embedding_lookup_sparse(params, sp_ids, sp_weights,
|
|||||||
# Reshape weights to allow broadcast
|
# Reshape weights to allow broadcast
|
||||||
ones = array_ops.fill(
|
ones = array_ops.fill(
|
||||||
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
|
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
|
||||||
bcast_weights_shape = array_ops.concat(0, [
|
bcast_weights_shape = array_ops.concat_v2(
|
||||||
array_ops.shape(weights), ones])
|
[array_ops.shape(weights), ones], 0)
|
||||||
|
|
||||||
orig_weights_shape = weights.get_shape()
|
orig_weights_shape = weights.get_shape()
|
||||||
weights = array_ops.reshape(weights, bcast_weights_shape)
|
weights = array_ops.reshape(weights, bcast_weights_shape)
|
||||||
|
@ -770,8 +770,8 @@ def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
|
|||||||
# Form IndexedSlices out of the concatenated values and
|
# Form IndexedSlices out of the concatenated values and
|
||||||
# indices.
|
# indices.
|
||||||
out_grads[i] = ops.IndexedSlices(
|
out_grads[i] = ops.IndexedSlices(
|
||||||
array_ops.concat(0, [x.values for x in out_grad]),
|
array_ops.concat_v2([x.values for x in out_grad], 0),
|
||||||
array_ops.concat(0, [x.indices for x in out_grad]),
|
array_ops.concat_v2([x.indices for x in out_grad], 0),
|
||||||
out_grad[0].dense_shape)
|
out_grad[0].dense_shape)
|
||||||
else:
|
else:
|
||||||
out_grads[i] = []
|
out_grads[i] = []
|
||||||
|
@ -111,9 +111,9 @@ class GradientsTest(test_util.TensorFlowTestCase):
|
|||||||
t2 = constant(2.0)
|
t2 = constant(2.0)
|
||||||
t3 = array_ops.pack([t1, t2])
|
t3 = array_ops.pack([t1, t2])
|
||||||
t4 = constant([1.0])
|
t4 = constant([1.0])
|
||||||
t5 = array_ops.concat(0, [t4, t3])
|
t5 = array_ops.concat_v2([t4, t3], 0)
|
||||||
t6 = constant([2.0])
|
t6 = constant([2.0])
|
||||||
t7 = array_ops.concat(0, [t5, t6])
|
t7 = array_ops.concat_v2([t5, t6], 0)
|
||||||
self._assertOpListEqual([t7.op, t5.op, t4.op],
|
self._assertOpListEqual([t7.op, t5.op, t4.op],
|
||||||
_OpsBetween(g, [t7.op], [t4.op]))
|
_OpsBetween(g, [t7.op], [t4.op]))
|
||||||
|
|
||||||
@ -122,10 +122,10 @@ class GradientsTest(test_util.TensorFlowTestCase):
|
|||||||
t1 = constant(1.0)
|
t1 = constant(1.0)
|
||||||
t2 = constant(2.0)
|
t2 = constant(2.0)
|
||||||
t3 = array_ops.pack([t1, t2])
|
t3 = array_ops.pack([t1, t2])
|
||||||
t4 = array_ops.concat(0, [t3, t3, t3])
|
t4 = array_ops.concat_v2([t3, t3, t3], 0)
|
||||||
t5 = constant([1.0])
|
t5 = constant([1.0])
|
||||||
t6 = array_ops.concat(0, [t4, t5])
|
t6 = array_ops.concat_v2([t4, t5], 0)
|
||||||
t7 = array_ops.concat(0, [t6, t3])
|
t7 = array_ops.concat_v2([t6, t3], 0)
|
||||||
self._assertOpListEqual([t6.op, t4.op, t3.op],
|
self._assertOpListEqual([t6.op, t4.op, t3.op],
|
||||||
_OpsBetween(g, [t6.op], [t3.op]))
|
_OpsBetween(g, [t6.op], [t3.op]))
|
||||||
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
|
self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
|
||||||
|
@ -1017,7 +1017,7 @@ def grayscale_to_rgb(images, name=None):
|
|||||||
shape_list = (
|
shape_list = (
|
||||||
[array_ops.ones(rank_1,
|
[array_ops.ones(rank_1,
|
||||||
dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)])
|
dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)])
|
||||||
multiples = array_ops.concat(0, shape_list)
|
multiples = array_ops.concat_v2(shape_list, 0)
|
||||||
rgb = array_ops.tile(images, multiples, name=name)
|
rgb = array_ops.tile(images, multiples, name=name)
|
||||||
rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
|
rgb.set_shape(images.get_shape()[:-1].concatenate([3]))
|
||||||
return rgb
|
return rgb
|
||||||
@ -1100,7 +1100,7 @@ def adjust_hue(image, delta, name=None):
|
|||||||
# floating point number since delta is [-0.5, 0.5].
|
# floating point number since delta is [-0.5, 0.5].
|
||||||
hue = math_ops.mod(hue + (delta + 1.), 1.)
|
hue = math_ops.mod(hue + (delta + 1.), 1.)
|
||||||
|
|
||||||
hsv_altered = array_ops.concat(2, [hue, saturation, value])
|
hsv_altered = array_ops.concat_v2([hue, saturation, value], 2)
|
||||||
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
|
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
|
||||||
else:
|
else:
|
||||||
rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)
|
rgb_altered = gen_image_ops.adjust_hue(flt_image, delta)
|
||||||
@ -1176,7 +1176,7 @@ def adjust_saturation(image, saturation_factor, name=None):
|
|||||||
saturation *= saturation_factor
|
saturation *= saturation_factor
|
||||||
saturation = clip_ops.clip_by_value(saturation, 0.0, 1.0)
|
saturation = clip_ops.clip_by_value(saturation, 0.0, 1.0)
|
||||||
|
|
||||||
hsv_altered = array_ops.concat(2, [hue, saturation, value])
|
hsv_altered = array_ops.concat_v2([hue, saturation, value], 2)
|
||||||
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
|
rgb_altered = gen_image_ops.hsv_to_rgb(hsv_altered)
|
||||||
|
|
||||||
return convert_image_dtype(rgb_altered, orig_dtype)
|
return convert_image_dtype(rgb_altered, orig_dtype)
|
||||||
|
@ -50,7 +50,7 @@ def _MatrixDeterminantGrad(op, grad):
|
|||||||
c = op.outputs[0]
|
c = op.outputs[0]
|
||||||
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
|
a_adj_inv = linalg_ops.matrix_inverse(a, adjoint=True)
|
||||||
multipliers = array_ops.reshape(
|
multipliers = array_ops.reshape(
|
||||||
grad * c, array_ops.concat(0, [array_ops.shape(c), [1, 1]]))
|
grad * c, array_ops.concat_v2([array_ops.shape(c), [1, 1]], 0))
|
||||||
return multipliers * a_adj_inv
|
return multipliers * a_adj_inv
|
||||||
|
|
||||||
|
|
||||||
|
@ -122,13 +122,13 @@ def eye(
|
|||||||
diag_size = num_rows
|
diag_size = num_rows
|
||||||
else:
|
else:
|
||||||
diag_size = math_ops.minimum(num_rows, num_columns)
|
diag_size = math_ops.minimum(num_rows, num_columns)
|
||||||
diag_shape = array_ops.concat(0, (batch_shape, [diag_size]))
|
diag_shape = array_ops.concat_v2((batch_shape, [diag_size]), 0)
|
||||||
diag_ones = array_ops.ones(diag_shape, dtype=dtype)
|
diag_ones = array_ops.ones(diag_shape, dtype=dtype)
|
||||||
|
|
||||||
if num_columns is None:
|
if num_columns is None:
|
||||||
return array_ops.matrix_diag(diag_ones)
|
return array_ops.matrix_diag(diag_ones)
|
||||||
else:
|
else:
|
||||||
shape = array_ops.concat(0, (batch_shape, [num_rows, num_columns]))
|
shape = array_ops.concat_v2((batch_shape, [num_rows, num_columns]), 0)
|
||||||
zero_matrix = array_ops.zeros(shape, dtype=dtype)
|
zero_matrix = array_ops.zeros(shape, dtype=dtype)
|
||||||
return array_ops.matrix_set_diag(zero_matrix, diag_ones)
|
return array_ops.matrix_set_diag(zero_matrix, diag_ones)
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ def _ProdGrad(op, grad):
|
|||||||
reduced = math_ops.cast(reduction_indices, dtypes.int32)
|
reduced = math_ops.cast(reduction_indices, dtypes.int32)
|
||||||
idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
|
idx = math_ops.range(0, array_ops.rank(op.inputs[0]))
|
||||||
other, _ = array_ops.setdiff1d(idx, reduced)
|
other, _ = array_ops.setdiff1d(idx, reduced)
|
||||||
perm = array_ops.concat(0, [reduced, other])
|
perm = array_ops.concat_v2([reduced, other], 0)
|
||||||
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
|
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
|
||||||
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
|
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
|
||||||
permuted = array_ops.transpose(op.inputs[0], perm)
|
permuted = array_ops.transpose(op.inputs[0], perm)
|
||||||
@ -155,9 +155,10 @@ def _SegmentSumGrad(op, grad):
|
|||||||
def _SegmentMeanGrad(op, grad):
|
def _SegmentMeanGrad(op, grad):
|
||||||
"""Gradient for SegmentMean."""
|
"""Gradient for SegmentMean."""
|
||||||
input_rank = array_ops.rank(op.inputs[0])
|
input_rank = array_ops.rank(op.inputs[0])
|
||||||
ones_shape = array_ops.concat(
|
ones_shape = array_ops.concat_v2([
|
||||||
0, [array_ops.shape(op.inputs[1]),
|
array_ops.shape(op.inputs[1]),
|
||||||
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)])
|
array_ops.fill(array_ops.expand_dims(input_rank - 1, 0), 1)
|
||||||
|
], 0)
|
||||||
ones = array_ops.fill(ones_shape,
|
ones = array_ops.fill(ones_shape,
|
||||||
constant_op.constant(1, dtype=grad.dtype))
|
constant_op.constant(1, dtype=grad.dtype))
|
||||||
scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))
|
scaled_grad = math_ops.div(grad, math_ops.segment_sum(ones, op.inputs[1]))
|
||||||
|
@ -98,14 +98,14 @@ class MinOrMaxGradientTest(tf.test.TestCase):
|
|||||||
|
|
||||||
def testMinGradient(self):
|
def testMinGradient(self):
|
||||||
inputs = tf.constant([1.0], dtype=tf.float32)
|
inputs = tf.constant([1.0], dtype=tf.float32)
|
||||||
outputs = tf.reduce_min(tf.concat(0, [inputs, inputs]))
|
outputs = tf.reduce_min(tf.concat_v2([inputs, inputs], 0))
|
||||||
with self.test_session():
|
with self.test_session():
|
||||||
error = tf.test.compute_gradient_error(inputs, [1], outputs, [])
|
error = tf.test.compute_gradient_error(inputs, [1], outputs, [])
|
||||||
self.assertLess(error, 1e-4)
|
self.assertLess(error, 1e-4)
|
||||||
|
|
||||||
def testMaxGradient(self):
|
def testMaxGradient(self):
|
||||||
inputs = tf.constant([1.0], dtype=tf.float32)
|
inputs = tf.constant([1.0], dtype=tf.float32)
|
||||||
outputs = tf.reduce_max(tf.concat(0, [inputs, inputs]))
|
outputs = tf.reduce_max(tf.concat_v2([inputs, inputs], 0))
|
||||||
with self.test_session():
|
with self.test_session():
|
||||||
error = tf.test.compute_gradient_error(inputs, [1], outputs, [])
|
error = tf.test.compute_gradient_error(inputs, [1], outputs, [])
|
||||||
self.assertLess(error, 1e-4)
|
self.assertLess(error, 1e-4)
|
||||||
@ -131,7 +131,7 @@ class SegmentMinOrMaxGradientTest(tf.test.TestCase):
|
|||||||
|
|
||||||
def testSegmentMinGradientWithTies(self):
|
def testSegmentMinGradientWithTies(self):
|
||||||
inputs = tf.constant([1.0], dtype=tf.float32)
|
inputs = tf.constant([1.0], dtype=tf.float32)
|
||||||
data = tf.concat(0, [inputs, inputs])
|
data = tf.concat_v2([inputs, inputs], 0)
|
||||||
segment_ids = tf.constant([0, 0], dtype=tf.int64)
|
segment_ids = tf.constant([0, 0], dtype=tf.int64)
|
||||||
segment_min = tf.segment_min(data, segment_ids)
|
segment_min = tf.segment_min(data, segment_ids)
|
||||||
with self.test_session():
|
with self.test_session():
|
||||||
@ -140,7 +140,7 @@ class SegmentMinOrMaxGradientTest(tf.test.TestCase):
|
|||||||
|
|
||||||
def testSegmentMaxGradientWithTies(self):
|
def testSegmentMaxGradientWithTies(self):
|
||||||
inputs = tf.constant([1.0], dtype=tf.float32)
|
inputs = tf.constant([1.0], dtype=tf.float32)
|
||||||
data = tf.concat(0, [inputs, inputs])
|
data = tf.concat_v2([inputs, inputs], 0)
|
||||||
segment_ids = tf.constant([0, 0], dtype=tf.int64)
|
segment_ids = tf.constant([0, 0], dtype=tf.int64)
|
||||||
segment_max = tf.segment_max(data, segment_ids)
|
segment_max = tf.segment_max(data, segment_ids)
|
||||||
with self.test_session():
|
with self.test_session():
|
||||||
|
@ -1992,9 +1992,10 @@ def _expand_and_tile(tensor, multiple, dim=0, name=None):
|
|||||||
array_ops.size(tensor.shape) + dim, [1])
|
array_ops.size(tensor.shape) + dim, [1])
|
||||||
else:
|
else:
|
||||||
expand_dims = [dim]
|
expand_dims = [dim]
|
||||||
expanded_shape = array_ops.concat(
|
expanded_shape = array_ops.concat_v2(
|
||||||
0, (array_ops.slice(tensor.shape, [0], expand_dims), [1],
|
(array_ops.slice(tensor.shape, [0], expand_dims), [1],
|
||||||
array_ops.slice(tensor.shape, expand_dims, [-1])),
|
array_ops.slice(tensor.shape, expand_dims, [-1])),
|
||||||
|
0,
|
||||||
name='expanded_shape')
|
name='expanded_shape')
|
||||||
expanded = sparse_ops.sparse_reshape(
|
expanded = sparse_ops.sparse_reshape(
|
||||||
tensor, shape=expanded_shape, name='expand')
|
tensor, shape=expanded_shape, name='expand')
|
||||||
@ -2009,8 +2010,8 @@ def _expand_and_tile(tensor, multiple, dim=0, name=None):
|
|||||||
if multiple == 1:
|
if multiple == 1:
|
||||||
return expanded
|
return expanded
|
||||||
ones = array_ops.ones_like(array_ops.shape(tensor))
|
ones = array_ops.ones_like(array_ops.shape(tensor))
|
||||||
tile_multiples = array_ops.concat(
|
tile_multiples = array_ops.concat_v2(
|
||||||
0, (ones[:dim], (multiple,), ones[dim:]), name='multiples')
|
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
|
||||||
return array_ops.tile(expanded, tile_multiples, name=scope)
|
return array_ops.tile(expanded, tile_multiples, name=scope)
|
||||||
|
|
||||||
|
|
||||||
|
@ -226,14 +226,15 @@ def _BiasAddGradGrad(op, received_grad):
|
|||||||
bias_shape = array_ops.shape(received_grad)
|
bias_shape = array_ops.shape(received_grad)
|
||||||
|
|
||||||
if data_format == b"NCHW":
|
if data_format == b"NCHW":
|
||||||
expanded_shape = array_ops.concat(
|
expanded_shape = array_ops.concat_v2([
|
||||||
0,
|
array_ops.ones_like(shape[:-3]), bias_shape, array_ops.ones_like(shape[
|
||||||
[array_ops.ones_like(shape[:-3]), bias_shape, array_ops.ones_like(shape[-2:])]
|
-2:])
|
||||||
)
|
], 0)
|
||||||
tile_mults = array_ops.concat(0, [shape[:-3], [1], shape[-2:]])
|
tile_mults = array_ops.concat_v2([shape[:-3], [1], shape[-2:]], 0)
|
||||||
else:
|
else:
|
||||||
expanded_shape = array_ops.concat(0, [array_ops.ones_like(shape[:-1]), bias_shape])
|
expanded_shape = array_ops.concat_v2(
|
||||||
tile_mults = array_ops.concat(0, [shape[:-1], [1]])
|
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
|
||||||
|
tile_mults = array_ops.concat_v2([shape[:-1], [1]], 0)
|
||||||
|
|
||||||
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
|
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
|
||||||
return array_ops.tile(expanded_grad, tile_mults)
|
return array_ops.tile(expanded_grad, tile_mults)
|
||||||
|
@ -889,7 +889,7 @@ def _compute_sampled_logits(weights,
|
|||||||
|
|
||||||
# labels_flat is a [batch_size * num_true] tensor
|
# labels_flat is a [batch_size * num_true] tensor
|
||||||
# sampled is a [num_sampled] int tensor
|
# sampled is a [num_sampled] int tensor
|
||||||
all_ids = array_ops.concat(0, [labels_flat, sampled])
|
all_ids = array_ops.concat_v2([labels_flat, sampled], 0)
|
||||||
|
|
||||||
# weights shape is [num_classes, dim]
|
# weights shape is [num_classes, dim]
|
||||||
all_w = embedding_ops.embedding_lookup(
|
all_w = embedding_ops.embedding_lookup(
|
||||||
@ -905,14 +905,14 @@ def _compute_sampled_logits(weights,
|
|||||||
# true_w shape is [batch_size * num_true, dim]
|
# true_w shape is [batch_size * num_true, dim]
|
||||||
# row_wise_dots is [batch_size, num_true, dim]
|
# row_wise_dots is [batch_size, num_true, dim]
|
||||||
dim = array_ops.shape(true_w)[1:2]
|
dim = array_ops.shape(true_w)[1:2]
|
||||||
new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim])
|
new_true_w_shape = array_ops.concat_v2([[-1, num_true], dim], 0)
|
||||||
row_wise_dots = math_ops.mul(
|
row_wise_dots = math_ops.mul(
|
||||||
array_ops.expand_dims(inputs, 1),
|
array_ops.expand_dims(inputs, 1),
|
||||||
array_ops.reshape(true_w, new_true_w_shape))
|
array_ops.reshape(true_w, new_true_w_shape))
|
||||||
# We want the row-wise dot plus biases which yields a
|
# We want the row-wise dot plus biases which yields a
|
||||||
# [batch_size, num_true] tensor of true_logits.
|
# [batch_size, num_true] tensor of true_logits.
|
||||||
dots_as_matrix = array_ops.reshape(row_wise_dots,
|
dots_as_matrix = array_ops.reshape(row_wise_dots,
|
||||||
array_ops.concat(0, [[-1], dim]))
|
array_ops.concat_v2([[-1], dim], 0))
|
||||||
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
|
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
|
||||||
true_b = array_ops.reshape(true_b, [-1, num_true])
|
true_b = array_ops.reshape(true_b, [-1, num_true])
|
||||||
true_logits += true_b
|
true_logits += true_b
|
||||||
@ -940,12 +940,12 @@ def _compute_sampled_logits(weights,
|
|||||||
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
|
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
|
||||||
acc_ids_2d_int32 = array_ops.reshape(
|
acc_ids_2d_int32 = array_ops.reshape(
|
||||||
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
|
math_ops.cast(acc_ids, dtypes.int32), [-1, 1])
|
||||||
sparse_indices = array_ops.concat(1, [acc_indices_2d, acc_ids_2d_int32],
|
sparse_indices = array_ops.concat_v2([acc_indices_2d, acc_ids_2d_int32],
|
||||||
"sparse_indices")
|
1, "sparse_indices")
|
||||||
# Create sampled_logits_shape = [batch_size, num_sampled]
|
# Create sampled_logits_shape = [batch_size, num_sampled]
|
||||||
sampled_logits_shape = array_ops.concat(
|
sampled_logits_shape = array_ops.concat_v2(
|
||||||
0,
|
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)],
|
||||||
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)])
|
0)
|
||||||
if sampled_logits.dtype != acc_weights.dtype:
|
if sampled_logits.dtype != acc_weights.dtype:
|
||||||
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
|
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
|
||||||
sampled_logits += sparse_ops.sparse_to_dense(
|
sampled_logits += sparse_ops.sparse_to_dense(
|
||||||
@ -961,14 +961,14 @@ def _compute_sampled_logits(weights,
|
|||||||
sampled_logits -= math_ops.log(sampled_expected_count)
|
sampled_logits -= math_ops.log(sampled_expected_count)
|
||||||
|
|
||||||
# Construct output logits and labels. The true labels/logits start at col 0.
|
# Construct output logits and labels. The true labels/logits start at col 0.
|
||||||
out_logits = array_ops.concat(1, [true_logits, sampled_logits])
|
out_logits = array_ops.concat_v2([true_logits, sampled_logits], 1)
|
||||||
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
|
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
|
||||||
# of ones. We then divide by num_true to ensure the per-example labels sum
|
# of ones. We then divide by num_true to ensure the per-example labels sum
|
||||||
# to 1.0, i.e. form a proper probability distribution.
|
# to 1.0, i.e. form a proper probability distribution.
|
||||||
out_labels = array_ops.concat(1, [
|
out_labels = array_ops.concat_v2([
|
||||||
array_ops.ones_like(true_logits) / num_true,
|
array_ops.ones_like(true_logits) / num_true,
|
||||||
array_ops.zeros_like(sampled_logits)
|
array_ops.zeros_like(sampled_logits)
|
||||||
])
|
], 1)
|
||||||
|
|
||||||
return out_logits, out_labels
|
return out_logits, out_labels
|
||||||
|
|
||||||
|
@ -408,7 +408,7 @@ def with_space_to_batch(input, dilation_rate, padding, op, filter_shape=None, #
|
|||||||
if const_orig is not None:
|
if const_orig is not None:
|
||||||
return np.concatenate(parts)
|
return np.concatenate(parts)
|
||||||
else:
|
else:
|
||||||
return array_ops.concat(0, parts)
|
return array_ops.concat_v2(parts, 0)
|
||||||
|
|
||||||
dilation_rate = adjust(dilation_rate, 1)
|
dilation_rate = adjust(dilation_rate, 1)
|
||||||
paddings = adjust(paddings, 0)
|
paddings = adjust(paddings, 0)
|
||||||
@ -1363,7 +1363,7 @@ def crelu(features, name=None):
|
|||||||
"""
|
"""
|
||||||
with ops.name_scope(name, "CRelu", [features]) as name:
|
with ops.name_scope(name, "CRelu", [features]) as name:
|
||||||
features = ops.convert_to_tensor(features, name="features")
|
features = ops.convert_to_tensor(features, name="features")
|
||||||
c = array_ops.concat(-1, [features, -features], name=name)
|
c = array_ops.concat_v2([features, -features], -1, name=name)
|
||||||
return gen_nn_ops.relu(c)
|
return gen_nn_ops.relu(c)
|
||||||
|
|
||||||
|
|
||||||
@ -1388,7 +1388,8 @@ def _flatten_outer_dims(logits):
|
|||||||
rank = array_ops.rank(logits)
|
rank = array_ops.rank(logits)
|
||||||
last_dim_size = array_ops.slice(
|
last_dim_size = array_ops.slice(
|
||||||
array_ops.shape(logits), [math_ops.sub(rank, 1)], [1])
|
array_ops.shape(logits), [math_ops.sub(rank, 1)], [1])
|
||||||
output = array_ops.reshape(logits, array_ops.concat(0, [[-1], last_dim_size]))
|
output = array_ops.reshape(logits,
|
||||||
|
array_ops.concat_v2([[-1], last_dim_size], 0))
|
||||||
|
|
||||||
# Set output shape if known.
|
# Set output shape if known.
|
||||||
shape = logits.get_shape()
|
shape = logits.get_shape()
|
||||||
@ -1432,9 +1433,12 @@ def _softmax(logits, compute_op, dim=-1, name=None):
|
|||||||
"""
|
"""
|
||||||
def _swap_axis(logits, dim_index, last_index):
|
def _swap_axis(logits, dim_index, last_index):
|
||||||
"""Swaps logits's dim_index and last_index."""
|
"""Swaps logits's dim_index and last_index."""
|
||||||
return array_ops.transpose(logits, array_ops.concat(
|
return array_ops.transpose(logits,
|
||||||
0, [math_ops.range(dim_index), [last_index],
|
array_ops.concat_v2([
|
||||||
math_ops.range(dim_index + 1, last_index), [dim_index]]))
|
math_ops.range(dim_index), [last_index],
|
||||||
|
math_ops.range(dim_index + 1, last_index),
|
||||||
|
[dim_index]
|
||||||
|
], 0))
|
||||||
|
|
||||||
logits = ops.convert_to_tensor(logits)
|
logits = ops.convert_to_tensor(logits)
|
||||||
if logits.get_shape().ndims is 2 and dim is -1:
|
if logits.get_shape().ndims is 2 and dim is -1:
|
||||||
@ -1574,9 +1578,12 @@ def softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
|
|||||||
# Move the dim to the end if dim is not the last dimension.
|
# Move the dim to the end if dim is not the last dimension.
|
||||||
if dim is not -1:
|
if dim is not -1:
|
||||||
def _move_dim_to_end(tensor, dim_index, rank):
|
def _move_dim_to_end(tensor, dim_index, rank):
|
||||||
return array_ops.transpose(tensor, array_ops.concat(
|
return array_ops.transpose(tensor,
|
||||||
0, [math_ops.range(dim_index), math_ops.range(dim_index + 1, rank),
|
array_ops.concat_v2([
|
||||||
[dim_index]]))
|
math_ops.range(dim_index),
|
||||||
|
math_ops.range(dim_index + 1, rank),
|
||||||
|
[dim_index]
|
||||||
|
], 0))
|
||||||
|
|
||||||
precise_logits = _move_dim_to_end(precise_logits, dim, input_rank)
|
precise_logits = _move_dim_to_end(precise_logits, dim, input_rank)
|
||||||
labels = _move_dim_to_end(labels, dim, input_rank)
|
labels = _move_dim_to_end(labels, dim, input_rank)
|
||||||
|
@ -420,8 +420,8 @@ def random_gamma(shape,
|
|||||||
name: Optional name for the operation.
|
name: Optional name for the operation.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
samples: a `Tensor` of shape `tf.concat(shape, tf.shape(alpha + beta))` with
|
samples: a `Tensor` of shape `tf.concat_v2(shape, tf.shape(alpha + beta))`
|
||||||
values of type `dtype`.
|
with values of type `dtype`.
|
||||||
"""
|
"""
|
||||||
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
|
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
|
||||||
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
|
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
|
||||||
|
@ -561,8 +561,9 @@ def bidirectional_rnn(cell_fw, cell_bw, inputs,
|
|||||||
flat_output_fw = nest.flatten(output_fw)
|
flat_output_fw = nest.flatten(output_fw)
|
||||||
flat_output_bw = nest.flatten(output_bw)
|
flat_output_bw = nest.flatten(output_bw)
|
||||||
|
|
||||||
flat_outputs = tuple(array_ops.concat(1, [fw, bw])
|
flat_outputs = tuple(
|
||||||
for fw, bw in zip(flat_output_fw, flat_output_bw))
|
array_ops.concat_v2([fw, bw], 1)
|
||||||
|
for fw, bw in zip(flat_output_fw, flat_output_bw))
|
||||||
|
|
||||||
outputs = nest.pack_sequence_as(structure=output_fw,
|
outputs = nest.pack_sequence_as(structure=output_fw,
|
||||||
flat_sequence=flat_outputs)
|
flat_sequence=flat_outputs)
|
||||||
@ -643,7 +644,7 @@ def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
|
|||||||
It returns a tuple instead of a single concatenated `Tensor`, unlike
|
It returns a tuple instead of a single concatenated `Tensor`, unlike
|
||||||
in the `bidirectional_rnn`. If the concatenated one is preferred,
|
in the `bidirectional_rnn`. If the concatenated one is preferred,
|
||||||
the forward and backward outputs can be concatenated as
|
the forward and backward outputs can be concatenated as
|
||||||
`tf.concat(2, outputs)`.
|
`tf.concat_v2(outputs, 2)`.
|
||||||
output_states: A tuple (output_state_fw, output_state_bw) containing
|
output_states: A tuple (output_state_fw, output_state_bw) containing
|
||||||
the forward and the backward final states of bidirectional rnn.
|
the forward and the backward final states of bidirectional rnn.
|
||||||
|
|
||||||
|
@ -301,7 +301,7 @@ class BasicLSTMCell(RNNCell):
|
|||||||
if self._state_is_tuple:
|
if self._state_is_tuple:
|
||||||
new_state = LSTMStateTuple(new_c, new_h)
|
new_state = LSTMStateTuple(new_c, new_h)
|
||||||
else:
|
else:
|
||||||
new_state = array_ops.concat(1, [new_c, new_h])
|
new_state = array_ops.concat_v2([new_c, new_h], 1)
|
||||||
return new_h, new_state
|
return new_h, new_state
|
||||||
|
|
||||||
|
|
||||||
@ -493,8 +493,8 @@ class LSTMCell(RNNCell):
|
|||||||
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
|
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
|
||||||
# pylint: enable=invalid-unary-operand-type
|
# pylint: enable=invalid-unary-operand-type
|
||||||
|
|
||||||
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple
|
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
|
||||||
else array_ops.concat(1, [c, m]))
|
array_ops.concat_v2([c, m], 1))
|
||||||
return m, new_state
|
return m, new_state
|
||||||
|
|
||||||
|
|
||||||
@ -766,8 +766,8 @@ class MultiRNNCell(RNNCell):
|
|||||||
cur_state_pos += cell.state_size
|
cur_state_pos += cell.state_size
|
||||||
cur_inp, new_state = cell(cur_inp, cur_state)
|
cur_inp, new_state = cell(cur_inp, cur_state)
|
||||||
new_states.append(new_state)
|
new_states.append(new_state)
|
||||||
new_states = (tuple(new_states) if self._state_is_tuple
|
new_states = (tuple(new_states) if self._state_is_tuple else
|
||||||
else array_ops.concat(1, new_states))
|
array_ops.concat_v2(new_states, 1))
|
||||||
return cur_inp, new_states
|
return cur_inp, new_states
|
||||||
|
|
||||||
|
|
||||||
@ -860,7 +860,7 @@ def _linear(args, output_size, bias, bias_start=0.0, scope=None):
|
|||||||
if len(args) == 1:
|
if len(args) == 1:
|
||||||
res = math_ops.matmul(args[0], weights)
|
res = math_ops.matmul(args[0], weights)
|
||||||
else:
|
else:
|
||||||
res = math_ops.matmul(array_ops.concat(1, args), weights)
|
res = math_ops.matmul(array_ops.concat_v2(args, 1), weights)
|
||||||
if not bias:
|
if not bias:
|
||||||
return res
|
return res
|
||||||
with vs.variable_scope(outer_scope) as inner_scope:
|
with vs.variable_scope(outer_scope) as inner_scope:
|
||||||
|
@ -610,7 +610,7 @@ def attention_decoder(decoder_inputs,
|
|||||||
ndims = q.get_shape().ndims
|
ndims = q.get_shape().ndims
|
||||||
if ndims:
|
if ndims:
|
||||||
assert ndims == 2
|
assert ndims == 2
|
||||||
query = array_ops.concat(1, query_list)
|
query = array_ops.concat_v2(query_list, 1)
|
||||||
for a in xrange(num_heads):
|
for a in xrange(num_heads):
|
||||||
with variable_scope.variable_scope("Attention_%d" % a):
|
with variable_scope.variable_scope("Attention_%d" % a):
|
||||||
y = linear(query, attention_vec_size, True)
|
y = linear(query, attention_vec_size, True)
|
||||||
@ -820,7 +820,7 @@ def embedding_attention_seq2seq(encoder_inputs,
|
|||||||
# First calculate a concatenation of encoder outputs to put attention on.
|
# First calculate a concatenation of encoder outputs to put attention on.
|
||||||
top_states = [array_ops.reshape(e, [-1, 1, cell.output_size])
|
top_states = [array_ops.reshape(e, [-1, 1, cell.output_size])
|
||||||
for e in encoder_outputs]
|
for e in encoder_outputs]
|
||||||
attention_states = array_ops.concat(1, top_states)
|
attention_states = array_ops.concat_v2(top_states, 1)
|
||||||
|
|
||||||
# Decoder.
|
# Decoder.
|
||||||
output_size = None
|
output_size = None
|
||||||
|
@ -191,15 +191,13 @@ def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
|
|||||||
y_shape = math_ops.to_int64(array_ops.shape(y))
|
y_shape = math_ops.to_int64(array_ops.shape(y))
|
||||||
num_added_dims = array_ops.expand_dims(
|
num_added_dims = array_ops.expand_dims(
|
||||||
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
|
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
|
||||||
augmented_y_shape = array_ops.concat(0, [array_ops.ones(num_added_dims,
|
augmented_y_shape = array_ops.concat_v2(
|
||||||
ops.dtypes.int64),
|
[array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)
|
||||||
y_shape])
|
|
||||||
|
|
||||||
scaling = x_shape // augmented_y_shape
|
scaling = x_shape // augmented_y_shape
|
||||||
scaled_indices = x_indices // scaling
|
scaled_indices = x_indices // scaling
|
||||||
scaled_indices = array_ops.slice(scaled_indices,
|
scaled_indices = array_ops.slice(
|
||||||
array_ops.concat(0, [[0], num_added_dims]),
|
scaled_indices, array_ops.concat_v2([[0], num_added_dims], 0), [-1, -1])
|
||||||
[-1, -1])
|
|
||||||
dense_vals = array_ops.gather_nd(y, scaled_indices)
|
dense_vals = array_ops.gather_nd(y, scaled_indices)
|
||||||
|
|
||||||
if is_mul:
|
if is_mul:
|
||||||
|
@ -228,13 +228,14 @@ def sparse_concat(axis,
|
|||||||
|
|
||||||
if expand_nonconcat_dim:
|
if expand_nonconcat_dim:
|
||||||
max_shape = math_ops.reduce_max(
|
max_shape = math_ops.reduce_max(
|
||||||
array_ops.concat(0, [array_ops.reshape(shape, [1, -1])
|
array_ops.concat_v2(
|
||||||
for shape in shapes]), 0)
|
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
|
||||||
shapes = [array_ops.concat(0, [
|
shapes = [
|
||||||
max_shape[:axis], shape[-1:] if axis == -1 else
|
array_ops.concat_v2([
|
||||||
shape[axis:axis + 1], [] if axis == -1 else
|
max_shape[:axis], shape[-1:] if axis == -1 else
|
||||||
max_shape[axis + 1:]
|
shape[axis:axis + 1], [] if axis == -1 else max_shape[axis + 1:]
|
||||||
]) for shape in shapes]
|
], 0) for shape in shapes
|
||||||
|
]
|
||||||
|
|
||||||
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat(
|
output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat(
|
||||||
inds, vals, shapes, axis, name=name))
|
inds, vals, shapes, axis, name=name))
|
||||||
@ -855,15 +856,15 @@ def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
|
|||||||
# Slice off the last dimension of indices, then tack on the ids
|
# Slice off the last dimension of indices, then tack on the ids
|
||||||
indices_columns_to_preserve = array_ops.slice(
|
indices_columns_to_preserve = array_ops.slice(
|
||||||
sp_ids.indices, [0, 0], array_ops.pack([-1, rank - 1]))
|
sp_ids.indices, [0, 0], array_ops.pack([-1, rank - 1]))
|
||||||
new_indices = array_ops.concat(1, [indices_columns_to_preserve,
|
new_indices = array_ops.concat_v2(
|
||||||
array_ops.reshape(ids, [-1, 1])])
|
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
|
||||||
|
|
||||||
new_values = sp_values.values
|
new_values = sp_values.values
|
||||||
new_shape = array_ops.concat(
|
new_shape = array_ops.concat_v2([
|
||||||
0,
|
array_ops.slice(sp_ids.dense_shape, [0],
|
||||||
[array_ops.slice(
|
array_ops.expand_dims(rank - 1, 0)),
|
||||||
sp_ids.dense_shape, [0], array_ops.expand_dims(rank - 1, 0)),
|
math_ops.cast(array_ops.pack([vocab_size]), dtypes.int64)
|
||||||
math_ops.cast(array_ops.pack([vocab_size]), dtypes.int64)])
|
], 0)
|
||||||
|
|
||||||
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
|
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
|
||||||
return result if already_sorted else sparse_reorder(result)
|
return result if already_sorted else sparse_reorder(result)
|
||||||
@ -1059,16 +1060,17 @@ def sparse_fill_empty_rows(sp_input, default_value, name=None):
|
|||||||
False)
|
False)
|
||||||
|
|
||||||
empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])
|
empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])
|
||||||
additional_indices = array_ops.concat(
|
additional_indices = array_ops.concat_v2([
|
||||||
1, [empty_row_indices_as_column,
|
empty_row_indices_as_column,
|
||||||
array_ops.zeros_like(empty_row_indices_as_column)])
|
array_ops.zeros_like(empty_row_indices_as_column)
|
||||||
|
], 1)
|
||||||
additional_values = array_ops.fill(
|
additional_values = array_ops.fill(
|
||||||
array_ops.shape(empty_row_indices), default_value)
|
array_ops.shape(empty_row_indices), default_value)
|
||||||
|
|
||||||
all_indices_unordered = array_ops.concat(0, [sp_input.indices,
|
all_indices_unordered = array_ops.concat_v2(
|
||||||
additional_indices])
|
[sp_input.indices, additional_indices], 0)
|
||||||
all_values_unordered = array_ops.concat(0, [sp_input.values,
|
all_values_unordered = array_ops.concat_v2(
|
||||||
additional_values])
|
[sp_input.values, additional_values], 0)
|
||||||
sp_unordered_output = sparse_tensor.SparseTensor(
|
sp_unordered_output = sparse_tensor.SparseTensor(
|
||||||
all_indices_unordered,
|
all_indices_unordered,
|
||||||
all_values_unordered, sp_input.dense_shape)
|
all_values_unordered, sp_input.dense_shape)
|
||||||
|
@ -1000,7 +1000,7 @@ class PartitionedVariable(object):
|
|||||||
partition_ix = partition_axes[0]
|
partition_ix = partition_axes[0]
|
||||||
|
|
||||||
with ops.name_scope(self._name + "/ConcatPartitions/"):
|
with ops.name_scope(self._name + "/ConcatPartitions/"):
|
||||||
concatenated = array_ops.concat(partition_ix, self._variable_list)
|
concatenated = array_ops.concat_v2(self._variable_list, partition_ix)
|
||||||
|
|
||||||
with ops.name_scope(None):
|
with ops.name_scope(None):
|
||||||
return array_ops.identity(concatenated, name=self._name)
|
return array_ops.identity(concatenated, name=self._name)
|
||||||
|
@ -137,10 +137,10 @@ class Scaffold(object):
|
|||||||
default_init_op)
|
default_init_op)
|
||||||
if self._ready_op is None:
|
if self._ready_op is None:
|
||||||
def default_ready_op():
|
def default_ready_op():
|
||||||
return array_ops.concat(
|
return array_ops.concat_v2([
|
||||||
0,
|
variables.report_uninitialized_variables(),
|
||||||
[variables.report_uninitialized_variables(),
|
resources.report_uninitialized_resources()
|
||||||
resources.report_uninitialized_resources()])
|
], 0)
|
||||||
self._ready_op = Scaffold.get_or_default(
|
self._ready_op = Scaffold.get_or_default(
|
||||||
'ready_op', ops.GraphKeys.READY_OP,
|
'ready_op', ops.GraphKeys.READY_OP,
|
||||||
default_ready_op)
|
default_ready_op)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user