Change references to TensorArray.pack, TensorArray.unpack to TensorArray.stack
and TensorArray.unstack since pack and unpack are getting deprecated. Also, I switched a few references to tf.pack/tf.unpack to tf.stack and tf.unstack. Change: 141619037
This commit is contained in:
parent
6da1cfa157
commit
35a58c8141
@ -781,7 +781,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
dtype=tf.float32)
|
||||
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
|
||||
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
|
||||
inputs_ta = inputs_ta.unpack(inputs)
|
||||
inputs_ta = inputs_ta.unstack(inputs)
|
||||
|
||||
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
|
||||
|
||||
@ -804,7 +804,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
|
||||
outputs_ta, final_state, _ = tf.nn.raw_rnn(
|
||||
cell, loop_fn, scope=reuse_scope)
|
||||
outputs = outputs_ta.pack()
|
||||
outputs = outputs_ta.stack()
|
||||
|
||||
reuse_scope.reuse_variables()
|
||||
outputs_dynamic_rnn, final_state_dynamic_rnn = tf.nn.dynamic_rnn(
|
||||
@ -874,7 +874,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
|
||||
inputs = np.random.randn(max_time, batch_size, input_depth)
|
||||
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
|
||||
inputs_ta = inputs_ta.unpack(inputs)
|
||||
inputs_ta = inputs_ta.unstack(inputs)
|
||||
|
||||
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
|
||||
|
||||
@ -909,7 +909,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
|
||||
inputs = np.random.randn(max_time, batch_size, input_depth)
|
||||
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
|
||||
inputs_ta = inputs_ta.unpack(inputs)
|
||||
inputs_ta = inputs_ta.unstack(inputs)
|
||||
|
||||
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
|
||||
def loop_fn(time_, cell_output, cell_state, loop_state):
|
||||
@ -935,7 +935,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
|
||||
r = tf.nn.raw_rnn(cell, loop_fn)
|
||||
loop_state = r[-1]
|
||||
loop_state = loop_state.pack()
|
||||
loop_state = loop_state.stack()
|
||||
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
|
||||
|
||||
def testEmitDifferentStructureThanCellOutput(self):
|
||||
@ -947,7 +947,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
|
||||
inputs = np.random.randn(max_time, batch_size, input_depth)
|
||||
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
|
||||
inputs_ta = inputs_ta.unpack(inputs)
|
||||
inputs_ta = inputs_ta.unstack(inputs)
|
||||
|
||||
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
|
||||
def loop_fn(time_, cell_output, cell_state, _):
|
||||
@ -972,7 +972,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
output_ta = r[0]
|
||||
self.assertEqual(2, len(output_ta))
|
||||
self.assertEqual([tf.int32, tf.int64], [ta.dtype for ta in output_ta])
|
||||
output = [ta.pack() for ta in output_ta]
|
||||
output = [ta.stack() for ta in output_ta]
|
||||
output_vals = sess.run(output)
|
||||
self.assertAllEqual(
|
||||
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
|
||||
@ -1010,7 +1010,7 @@ class RawRNNTest(tf.test.TestCase):
|
||||
dtype=tf.float32)
|
||||
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
|
||||
inputs_ta = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
|
||||
inputs_ta = inputs_ta.unpack(inputs)
|
||||
inputs_ta = inputs_ta.unstack(inputs)
|
||||
|
||||
cell = tf.contrib.rnn.LSTMCell(num_units, state_is_tuple=True)
|
||||
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
|
||||
|
@ -2977,7 +2977,7 @@ def case(pred_fn_pairs, default, exclusive=False, name="case"):
|
||||
return prev_case
|
||||
|
||||
if exclusive:
|
||||
preds_c = array_ops.pack(preds, name="preds_c")
|
||||
preds_c = array_ops.stack(preds, name="preds_c")
|
||||
num_true_conditions = math_ops.reduce_sum(
|
||||
math_ops.cast(preds_c, dtypes.int32), name="num_true_conds")
|
||||
at_most_one_true_condition = math_ops.less(
|
||||
|
@ -192,7 +192,7 @@ class SwitchTestCase(TensorFlowTestCase):
|
||||
|
||||
_, outputs = tf.while_loop(Cond, Body, [initial_i, initial_outputs])
|
||||
|
||||
outputs = tf.reduce_sum(outputs.pack())
|
||||
outputs = tf.reduce_sum(outputs.stack())
|
||||
r = tf.gradients([outputs], [inputs])[0]
|
||||
grad_wr_inputs = ops.convert_to_tensor(r)
|
||||
o, grad = sess.run([outputs, grad_wr_inputs],
|
||||
@ -218,7 +218,7 @@ class SwitchTestCase(TensorFlowTestCase):
|
||||
|
||||
_, outputs = tf.while_loop(Cond, Body, [initial_i, initial_outputs])
|
||||
|
||||
outputs = tf.reduce_sum(outputs.pack())
|
||||
outputs = tf.reduce_sum(outputs.stack())
|
||||
r = tf.gradients([outputs], [inputs])[0]
|
||||
grad_wr_inputs = ops.convert_to_tensor(r)
|
||||
o, grad = sess.run([outputs, grad_wr_inputs],
|
||||
|
@ -106,7 +106,7 @@ def foldl(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
|
||||
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
|
||||
dynamic_size=False,
|
||||
infer_shape=True)
|
||||
elems_ta = elems_ta.unpack(elems)
|
||||
elems_ta = elems_ta.unstack(elems)
|
||||
|
||||
if initializer is None:
|
||||
a = elems_ta.read(0)
|
||||
@ -186,7 +186,7 @@ def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
|
||||
elems_ta = tensor_array_ops.TensorArray(dtype=elems.dtype, size=n,
|
||||
dynamic_size=False,
|
||||
infer_shape=True)
|
||||
elems_ta = elems_ta.unpack(elems)
|
||||
elems_ta = elems_ta.unstack(elems)
|
||||
|
||||
if initializer is None:
|
||||
i = n - 1
|
||||
@ -354,7 +354,7 @@ def map_fn(fn, elems, dtype=None, parallel_iterations=10, back_prop=True,
|
||||
for elem in elems_flat]
|
||||
# Unpack elements
|
||||
elems_ta = [
|
||||
elem_ta.unpack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
|
||||
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
|
||||
|
||||
i = constant_op.constant(0)
|
||||
|
||||
@ -390,7 +390,7 @@ def map_fn(fn, elems, dtype=None, parallel_iterations=10, back_prop=True,
|
||||
parallel_iterations=parallel_iterations,
|
||||
back_prop=back_prop,
|
||||
swap_memory=swap_memory)
|
||||
results_flat = [r.pack() for r in r_a]
|
||||
results_flat = [r.stack() for r in r_a]
|
||||
|
||||
n_static = elems_flat[0].get_shape().with_rank_at_least(1)[0]
|
||||
for elem in elems_flat[1:]:
|
||||
@ -536,7 +536,7 @@ def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
|
||||
for elem in elems_flat]
|
||||
# Unpack elements
|
||||
elems_ta = [
|
||||
elem_ta.unpack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
|
||||
elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]
|
||||
|
||||
if initializer is None:
|
||||
a_flat = [elem.read(0) for elem in elems_ta]
|
||||
@ -586,7 +586,7 @@ def scan(fn, elems, initializer=None, parallel_iterations=10, back_prop=True,
|
||||
parallel_iterations=parallel_iterations,
|
||||
back_prop=back_prop, swap_memory=swap_memory)
|
||||
|
||||
results_flat = [r.pack() for r in r_a]
|
||||
results_flat = [r.stack() for r in r_a]
|
||||
|
||||
n_static = elems_flat[0].get_shape().with_rank_at_least(1)[0]
|
||||
for elem in elems_flat[1:]:
|
||||
|
@ -191,7 +191,7 @@ def rnn(cell, inputs, initial_state=None, dtype=None,
|
||||
# convert int to TensorShape if necessary
|
||||
size = _state_size_with_prefix(output_size, prefix=[batch_size])
|
||||
output = array_ops.zeros(
|
||||
array_ops.pack(size), _infer_state_dtype(dtype, state))
|
||||
array_ops.stack(size), _infer_state_dtype(dtype, state))
|
||||
shape = _state_size_with_prefix(
|
||||
output_size, prefix=[fixed_batch_size.value])
|
||||
output.set_shape(tensor_shape.TensorShape(shape))
|
||||
@ -471,7 +471,7 @@ def _reverse_seq(input_seq, lengths):
|
||||
input_.set_shape(input_shape)
|
||||
|
||||
# Join into (time, batch_size, depth)
|
||||
s_joined = array_ops.pack(sequence)
|
||||
s_joined = array_ops.stack(sequence)
|
||||
|
||||
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
|
||||
if lengths is not None:
|
||||
@ -480,7 +480,7 @@ def _reverse_seq(input_seq, lengths):
|
||||
# Reverse along dimension 0
|
||||
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
|
||||
# Split again into list
|
||||
result = array_ops.unpack(s_reversed)
|
||||
result = array_ops.unstack(s_reversed)
|
||||
for r, flat_result in zip(result, flat_results):
|
||||
r.set_shape(input_shape)
|
||||
flat_result.append(r)
|
||||
@ -835,7 +835,7 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
|
||||
|
||||
def _assert_has_shape(x, shape):
|
||||
x_shape = array_ops.shape(x)
|
||||
packed_shape = array_ops.pack(shape)
|
||||
packed_shape = array_ops.stack(shape)
|
||||
return control_flow_ops.Assert(
|
||||
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
|
||||
["Expected shape for Tensor %s is " % x.name,
|
||||
@ -947,7 +947,7 @@ def _dynamic_rnn_loop(cell,
|
||||
def _create_zero_arrays(size):
|
||||
size = _state_size_with_prefix(size, prefix=[batch_size])
|
||||
return array_ops.zeros(
|
||||
array_ops.pack(size), _infer_state_dtype(dtype, state))
|
||||
array_ops.stack(size), _infer_state_dtype(dtype, state))
|
||||
|
||||
flat_zero_output = tuple(_create_zero_arrays(output)
|
||||
for output in flat_output_size)
|
||||
@ -974,7 +974,7 @@ def _dynamic_rnn_loop(cell,
|
||||
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
|
||||
for i in range(len(flat_input)))
|
||||
|
||||
input_ta = tuple(ta.unpack(input_)
|
||||
input_ta = tuple(ta.unstack(input_)
|
||||
for ta, input_ in zip(input_ta, flat_input))
|
||||
|
||||
def _time_step(time, output_ta_t, state):
|
||||
@ -1027,7 +1027,7 @@ def _dynamic_rnn_loop(cell,
|
||||
swap_memory=swap_memory)
|
||||
|
||||
# Unpack final output if not using output tuples.
|
||||
final_outputs = tuple(ta.pack() for ta in output_final_ta)
|
||||
final_outputs = tuple(ta.stack() for ta in output_final_ta)
|
||||
|
||||
# Restore some shape information
|
||||
for output, output_size in zip(final_outputs, flat_output_size):
|
||||
@ -1092,7 +1092,7 @@ def raw_rnn(cell, loop_fn,
|
||||
dtype=tf.float32)
|
||||
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
|
||||
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
|
||||
inputs_ta = inputs_ta.unpack(inputs)
|
||||
inputs_ta = inputs_ta.unstack(inputs)
|
||||
|
||||
cell = tf.contrib.rnn.LSTMCell(num_units)
|
||||
|
||||
@ -1113,7 +1113,7 @@ def raw_rnn(cell, loop_fn,
|
||||
emit_output, next_loop_state)
|
||||
|
||||
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
|
||||
outputs = outputs_ta.pack()
|
||||
outputs = outputs_ta.stack()
|
||||
```
|
||||
|
||||
Args:
|
||||
|
Loading…
Reference in New Issue
Block a user