From 842cd17c1075de0ffca4244e43d8428d7f341420 Mon Sep 17 00:00:00 2001 From: Ruizhi Date: Wed, 1 Aug 2018 16:55:33 +0800 Subject: [PATCH] Fix shapes in comments of nmt_with_attention.ipynb It is a bit misleading and confusing that the output shape of decoder is currently commented as `(batch_size * max_length, vocab)`. However the correct shape should be `(batch_size * 1, vocab)`, since the input x of GRU layer has shape == `(batch_size, 1, embedding_dim + hidden_size)`. --- .../examples/nmt_with_attention/nmt_with_attention.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb b/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb index 1ab1b71bd05..0408ef01caa 100644 --- a/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb +++ b/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb @@ -552,10 +552,10 @@ " # passing the concatenated vector to the GRU\n", " output, state = self.gru(x)\n", " \n", - " # output shape == (batch_size * max_length, hidden_size)\n", + " # output shape == (batch_size * 1, hidden_size)\n", " output = tf.reshape(output, (-1, output.shape[2]))\n", " \n", - " # output shape == (batch_size * max_length, vocab)\n", + " # output shape == (batch_size * 1, vocab)\n", " x = self.fc(output)\n", " \n", " return x, state, attention_weights\n",