From e7066fb9c15412d3f9aca43ccf991c62833f8291 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ond=C5=99ej=20Filip?= <ondri.f@gmail.com>
Date: Mon, 31 Oct 2016 18:14:03 +0100
Subject: [PATCH] Fix too long sentence (#4962)

* Fix too long sentence

* Make previous commit more idiomatic

* Fix missing logging import
---
 tensorflow/models/rnn/translate/translate.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/tensorflow/models/rnn/translate/translate.py b/tensorflow/models/rnn/translate/translate.py
index 083417ba5ae..cf870c71f6e 100644
--- a/tensorflow/models/rnn/translate/translate.py
+++ b/tensorflow/models/rnn/translate/translate.py
@@ -36,6 +36,7 @@ import os
 import random
 import sys
 import time
+import logging
 
 import numpy as np
 from six.moves import xrange  # pylint: disable=redefined-builtin
@@ -238,8 +239,14 @@ def decode():
       # Get token-ids for the input sentence.
       token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)
       # Which bucket does it belong to?
-      bucket_id = min([b for b in xrange(len(_buckets))
-                       if _buckets[b][0] > len(token_ids)])
+      bucket_id = len(_buckets) - 1
+      for i, bucket in enumerate(_buckets):
+        if bucket[0] >= len(token_ids):
+          bucket_id = i
+          break
+      else:
+        logging.warning("Sentence truncated: %s", sentence) 
+
       # Get a 1-element batch to feed the sentence to the model.
       encoder_inputs, decoder_inputs, target_weights = model.get_batch(
           {bucket_id: [(token_ids, [])]}, bucket_id)