From bf62fcec003636338386f5246103b90a9580181c Mon Sep 17 00:00:00 2001 From: Anna R Date: Fri, 9 Aug 2019 19:01:23 -0700 Subject: [PATCH] Automated rollback of commit 23e33f871b2bf2879b40ebf3b883e104f30f389b. Revert #31450. PiperOrigin-RevId: 262675086 --- tensorflow/python/keras/layers/core.py | 27 +++----------------------- tensorflow/python/layers/core_test.py | 14 ------------- 2 files changed, 3 insertions(+), 38 deletions(-) diff --git a/tensorflow/python/keras/layers/core.py b/tensorflow/python/keras/layers/core.py index c72b2d6aa8b..df78cffa4a2 100644 --- a/tensorflow/python/keras/layers/core.py +++ b/tensorflow/python/keras/layers/core.py @@ -26,7 +26,6 @@ import warnings import numpy as np from tensorflow.python.eager import context -from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape @@ -581,29 +580,9 @@ class Flatten(Layer): permutation.append(1) inputs = array_ops.transpose(inputs, perm=permutation) - input_shape = inputs.shape - if input_shape[1:].is_fully_defined(): - flattened_dim = tensor_shape.dimension_value( - np.prod(input_shape[1:], dtype=int)) - # Temporary fix for integer overflow issue. - if flattened_dim > np.iinfo(np.int32).max: - shape_dtype = dtypes.int64 - else: - shape_dtype = dtypes.int32 - outputs = array_ops.reshape( - inputs, constant_op.constant((-1, flattened_dim), dtype=shape_dtype)) - else: - batch_size = tensor_shape.dimension_value(inputs.shape[0]) - if batch_size: - # Temporary fix for integer overflow issue. - if batch_size > np.iinfo(np.int32).max: - shape_dtype = dtypes.int64 - else: - shape_dtype = dtypes.int32 - outputs = array_ops.reshape( - inputs, constant_op.constant((batch_size, -1), dtype=shape_dtype)) - else: - outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1)) + outputs = array_ops.reshape( + inputs, (tensor_shape.dimension_value(inputs.shape[0]) or + array_ops.shape(inputs)[0], -1)) if not context.executing_eagerly(): outputs.set_shape(self.compute_output_shape(inputs.shape)) return outputs diff --git a/tensorflow/python/layers/core_test.py b/tensorflow/python/layers/core_test.py index cc0d70f3e23..b40a2682381 100644 --- a/tensorflow/python/layers/core_test.py +++ b/tensorflow/python/layers/core_test.py @@ -556,20 +556,6 @@ class FlattenTest(test.TestCase): self.assertEqual(list(np_output.shape), [5, 6]) self.assertEqual(y.get_shape().as_list(), [5, None]) - @test_util.run_deprecated_v1 - def testFlattenLargeDim(self): - x = array_ops.placeholder(shape=(None, 21316, 21316, 80), dtype='float32') - y = core_layers.Flatten()(x) - self.assertEqual(y.shape.as_list(), [None, 21316 * 21316 * 80]) - - @test_util.run_deprecated_v1 - def testFlattenLargeBatchDim(self): - batch_size = np.iinfo(np.int32).max + 10 - x = array_ops.placeholder( - shape=(batch_size, None, None, 1), dtype='float32') - y = core_layers.Flatten()(x) - self.assertEqual(y.shape.as_list(), [batch_size, None]) - if __name__ == '__main__': test.main()