Remove run_v1_only from lite_test Python tests.

PiperOrigin-RevId: 261150274
This commit is contained in:
Nupur Garg 2019-08-01 10:39:04 -07:00 committed by TensorFlower Gardener
parent 2bc45ceb9e
commit 219e8d8d6f

View File

@ -101,10 +101,10 @@ class FromConstructor(TestModels):
self.assertTrue(converter._has_valid_tensors()) self.assertTrue(converter._has_valid_tensors())
@test_util.run_v1_only('Incompatible with 2.0.')
class FromSessionTest(TestModels, parameterized.TestCase): class FromSessionTest(TestModels, parameterized.TestCase):
def testFloat(self): def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -135,6 +135,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
def testString(self): def testString(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string) in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)
out_tensor = array_ops.reshape(in_tensor, shape=[2, 2]) out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])
sess = session.Session() sess = session.Session()
@ -164,6 +165,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
# interpreter API after support has been added. # interpreter API after support has been added.
def testQuantization(self): def testQuantization(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder( in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA') shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder( in_tensor_2 = array_ops.placeholder(
@ -210,6 +212,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testQuantizationInvalid(self): def testQuantizationInvalid(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder( in_tensor_1 = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA') shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')
in_tensor_2 = array_ops.placeholder( in_tensor_2 = array_ops.placeholder(
@ -232,6 +235,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
def testIntermediateInputArray(self): def testIntermediateInputArray(self):
"""Convert a model from an intermediate input array.""" """Convert a model from an intermediate input array."""
with ops.Graph().as_default():
in_tensor_init = array_ops.placeholder( in_tensor_init = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
in_tensor_final = in_tensor_init + in_tensor_init in_tensor_final = in_tensor_init + in_tensor_init
@ -263,6 +267,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
def testSizeNoneInvalid(self): def testSizeNoneInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32) in_tensor = array_ops.placeholder(dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
sess = session.Session() sess = session.Session()
@ -277,6 +282,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
def testScalarValid(self): def testScalarValid(self):
# Construct a graph using a scalar (empty shape) input. # Construct a graph using a scalar (empty shape) input.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[]) in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
sess = session.Session() sess = session.Session()
@ -313,6 +319,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue((expected_output == output_data).all()) self.assertTrue((expected_output == output_data).all())
def testSizeInvalid(self): def testSizeInvalid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, None, 16, 3], dtype=dtypes.float32) shape=[1, None, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -329,6 +336,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
str(error.exception)) str(error.exception))
def testBatchSizeValid(self): def testBatchSizeValid(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32) shape=[None, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -359,6 +367,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFreezeGraph(self): def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable( var = variable_scope.get_variable(
@ -391,8 +400,8 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all()) self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testGraphviz(self): def testGraphviz(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -405,8 +414,8 @@ class FromSessionTest(TestModels, parameterized.TestCase):
graphviz_output = converter.convert() graphviz_output = converter.convert()
self.assertTrue(graphviz_output) self.assertTrue(graphviz_output)
# TODO(nupurgarg): Verify value of contents in GraphViz.
def testDumpGraphviz(self): def testDumpGraphviz(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -441,6 +450,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue(num_items_graphviz_video > num_items_graphviz) self.assertTrue(num_items_graphviz_video > num_items_graphviz)
def testInferenceInputType(self): def testInferenceInputType(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -472,6 +482,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all()) self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testDefaultRangesStats(self): def testDefaultRangesStats(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -505,6 +516,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue(output_details[0]['quantization'][0] > 0) # scale self.assertTrue(output_details[0]['quantization'][0] > 0) # scale
def testPostTrainingQuantizeDeprecatedAttribute(self): def testPostTrainingQuantizeDeprecatedAttribute(self):
with ops.Graph().as_default():
in_tensor_1 = array_ops.placeholder( in_tensor_1 = array_ops.placeholder(
shape=[33, 33], dtype=dtypes.float32, name='inputA') shape=[33, 33], dtype=dtypes.float32, name='inputA')
in_tensor_2 = constant_op.constant( in_tensor_2 = constant_op.constant(
@ -528,6 +540,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
def testPostTrainingQuantize(self): def testPostTrainingQuantize(self):
np.random.seed(0) np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights # We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape. # to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder( in_tensor_1 = array_ops.placeholder(
@ -574,6 +587,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
return (inp, output, calibration_gen) return (inp, output, calibration_gen)
def testPostTrainingCalibrateAndQuantize(self): def testPostTrainingCalibrateAndQuantize(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel() inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session() sess = session.Session()
@ -604,6 +618,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertLess(len(quantized_tflite), len(float_tflite)) self.assertLess(len(quantized_tflite), len(float_tflite))
def testCalibrateAndQuantizeBuiltinInt8(self): def testCalibrateAndQuantizeBuiltinInt8(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel() inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session() sess = session.Session()
@ -648,6 +663,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
def testQuantizeFloat16(self, use_rep_data, include_int8, def testQuantizeFloat16(self, use_rep_data, include_int8,
is_float16_quantized, is_error, is_float16_quantized, is_error,
is_post_training_quantized): is_post_training_quantized):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel() inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session() sess = session.Session()
@ -698,6 +714,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
raise ValueError('Invalid test options.') raise ValueError('Invalid test options.')
def testInvalidQuantizeFloat16(self): def testInvalidQuantizeFloat16(self):
with ops.Graph().as_default():
inp, output, _ = self._getCalibrationQuantizeModel() inp, output, _ = self._getCalibrationQuantizeModel()
sess = session.Session() sess = session.Session()
@ -718,6 +735,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
def testInvalidPostTrainingQuantize(self): def testInvalidPostTrainingQuantize(self):
np.random.seed(0) np.random.seed(0)
with ops.Graph().as_default():
# We need the tensor to have more than 1024 elements for quantize_weights # We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape. # to kick in. Thus, the [33, 33] shape.
in_tensor_1 = array_ops.placeholder( in_tensor_1 = array_ops.placeholder(
@ -744,6 +762,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
'TFLITE_BUILTINS_INT8 or INT8 supported types.', str(error.exception)) 'TFLITE_BUILTINS_INT8 or INT8 supported types.', str(error.exception))
def testPostTrainingCalibrateAndQuantizeFloatNotAllowed(self): def testPostTrainingCalibrateAndQuantizeFloatNotAllowed(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel() inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session() sess = session.Session()
@ -768,6 +787,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertLess(len(quantized_tflite), len(float_tflite)) self.assertLess(len(quantized_tflite), len(float_tflite))
def testPostTrainingCalibrateAndQuantizeInt8Inputs(self): def testPostTrainingCalibrateAndQuantizeInt8Inputs(self):
with ops.Graph().as_default():
inp, output, calibration_gen = self._getCalibrationQuantizeModel() inp, output, calibration_gen = self._getCalibrationQuantizeModel()
sess = session.Session() sess = session.Session()
@ -801,6 +821,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
def testFloatTocoConverter(self): def testFloatTocoConverter(self):
"""Tests deprecated test TocoConverter.""" """Tests deprecated test TocoConverter."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -817,8 +838,10 @@ class FromSessionTest(TestModels, parameterized.TestCase):
def testMultipleOutputNodeNames(self): def testMultipleOutputNodeNames(self):
"""Tests converting a graph with an op that have multiple outputs.""" """Tests converting a graph with an op that have multiple outputs."""
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32) input_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)
out0, out1, out2, out3 = array_ops.split(input_tensor, [1, 1, 1, 1], axis=0) out0, out1, out2, out3 = array_ops.split(
input_tensor, [1, 1, 1, 1], axis=0)
sess = session.Session() sess = session.Session()
# Convert model and ensure model is not None. # Convert model and ensure model is not None.
@ -888,6 +911,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
def testInferenceInputOutputTypeFloatDefault(self): def testInferenceInputOutputTypeFloatDefault(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = in_tensor + in_tensor out_tensor = in_tensor + in_tensor
@ -916,6 +940,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all()) self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testInferenceInputOutputTypeQuantizedUint8Default(self): def testInferenceInputOutputTypeQuantizedUint8Default(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args( out_tensor = array_ops.fake_quant_with_min_max_args(
@ -947,6 +972,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all()) self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())
def testReusingConverterWithDifferentPostTrainingQuantization(self): def testReusingConverterWithDifferentPostTrainingQuantization(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
out_tensor = array_ops.fake_quant_with_min_max_args( out_tensor = array_ops.fake_quant_with_min_max_args(
@ -969,6 +995,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
# This is a regression test for the case where shape of dynamic output # This is a regression test for the case where shape of dynamic output
# tensors changes between invocations. # tensors changes between invocations.
# See also https://github.com/tensorflow/tensorflow/issues/26549 # See also https://github.com/tensorflow/tensorflow/issues/26549
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(shape=[1, 1], dtype=dtypes.float32) input_tensor = array_ops.placeholder(shape=[1, 1], dtype=dtypes.float32)
input2_tensor = array_ops.placeholder(shape=[1], dtype=dtypes.float32) input2_tensor = array_ops.placeholder(shape=[1], dtype=dtypes.float32)
@ -979,6 +1006,7 @@ class FromSessionTest(TestModels, parameterized.TestCase):
output_tensor = array_ops.pad(input_tensor, padding) + neg output_tensor = array_ops.pad(input_tensor, padding) + neg
sess = session.Session() sess = session.Session()
converter = lite.TFLiteConverter.from_session( converter = lite.TFLiteConverter.from_session(
sess, [input_tensor, padding, input2_tensor], [output_tensor]) sess, [input_tensor, padding, input2_tensor], [output_tensor])
tflite_model = converter.convert() tflite_model = converter.convert()
@ -1025,10 +1053,10 @@ class FromSessionTest(TestModels, parameterized.TestCase):
self.assertIn((func + 'add'), converter._debug_info.traces) self.assertIn((func + 'add'), converter._debug_info.traces)
@test_util.run_v1_only('Incompatible with 2.0.')
class FromFrozenGraphFile(test_util.TensorFlowTestCase): class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testFloat(self): def testFloat(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor _ = in_tensor + in_tensor
@ -1064,6 +1092,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
def testFloatWithShapesArray(self): def testFloatWithShapesArray(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor _ = in_tensor + in_tensor
@ -1090,6 +1119,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all()) self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())
def testFreezeGraph(self): def testFreezeGraph(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
var = variable_scope.get_variable( var = variable_scope.get_variable(
@ -1110,6 +1140,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
str(error.exception)) str(error.exception))
def testPbtxt(self): def testPbtxt(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor _ = in_tensor + in_tensor
@ -1166,6 +1197,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
str(error.exception)) str(error.exception))
def testFloatTocoConverter(self): def testFloatTocoConverter(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor _ = in_tensor + in_tensor
@ -1188,6 +1220,7 @@ class FromFrozenGraphFile(test_util.TensorFlowTestCase):
def testGraphDebugInfo(self): def testGraphDebugInfo(self):
"""Test a frozen graph doesn't have debug info captured.""" """Test a frozen graph doesn't have debug info captured."""
with ops.Graph().as_default():
in_tensor = array_ops.placeholder( in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32) shape=[1, 16, 16, 3], dtype=dtypes.float32)
_ = in_tensor + in_tensor _ = in_tensor + in_tensor
@ -1296,12 +1329,12 @@ class FromFrozenGraphObjectDetection(test_util.TensorFlowTestCase):
str(error.exception)) str(error.exception))
@test_util.run_v1_only('Incompatible with 2.0.')
class FromSavedModelTest(TestModels): class FromSavedModelTest(TestModels):
def _createSavedModel(self, shape): def _createSavedModel(self, shape):
"""Create a simple SavedModel.""" """Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel') saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with ops.Graph().as_default():
with session.Session() as sess: with session.Session() as sess:
in_tensor_1 = array_ops.placeholder( in_tensor_1 = array_ops.placeholder(
shape=shape, dtype=dtypes.float32, name='inputB') shape=shape, dtype=dtypes.float32, name='inputB')
@ -1465,7 +1498,6 @@ class MyAddLayer(keras.layers.Layer):
return config return config
@test_util.run_v1_only('Incompatible with 2.0.')
class FromKerasFile(TestModels, parameterized.TestCase): class FromKerasFile(TestModels, parameterized.TestCase):
def setUp(self): def setUp(self):
@ -1578,6 +1610,7 @@ class FromKerasFile(TestModels, parameterized.TestCase):
def testSequentialModelInputArray(self): def testSequentialModelInputArray(self):
"""Test a Sequential tf.keras model testing input arrays argument.""" """Test a Sequential tf.keras model testing input arrays argument."""
ops.disable_eager_execution()
self._getSequentialModel() self._getSequentialModel()
# Invalid input array raises error. # Invalid input array raises error.
@ -1622,6 +1655,7 @@ class FromKerasFile(TestModels, parameterized.TestCase):
def testSequentialModelOutputArray(self): def testSequentialModelOutputArray(self):
"""Test a Sequential tf.keras model testing output arrays argument.""" """Test a Sequential tf.keras model testing output arrays argument."""
ops.disable_eager_execution()
self._getSequentialModel() self._getSequentialModel()
# Invalid output array raises error. # Invalid output array raises error.
@ -1747,12 +1781,10 @@ class FromKerasFile(TestModels, parameterized.TestCase):
output_details = interpreter.get_output_details() output_details = interpreter.get_output_details()
self.assertLen(output_details, 2) self.assertLen(output_details, 2)
self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype']) self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 4] == output_details[0]['shape']).all()) self.assertTrue(([1, 4] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
self.assertEqual('dropout/Identity', output_details[1]['name'])
self.assertEqual(np.float32, output_details[1]['dtype']) self.assertEqual(np.float32, output_details[1]['dtype'])
self.assertTrue(([1, 4] == output_details[1]['shape']).all()) self.assertTrue(([1, 4] == output_details[1]['shape']).all())
self.assertEqual((0., 0.), output_details[1]['quantization']) self.assertEqual((0., 0.), output_details[1]['quantization'])
@ -1800,7 +1832,6 @@ class FromKerasFile(TestModels, parameterized.TestCase):
output_details = interpreter.get_output_details() output_details = interpreter.get_output_details()
self.assertLen(output_details, 1) self.assertLen(output_details, 1)
self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])
self.assertEqual(np.float32, output_details[0]['dtype']) self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all()) self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())
self.assertEqual((0., 0.), output_details[0]['quantization']) self.assertEqual((0., 0.), output_details[0]['quantization'])
@ -1839,12 +1870,13 @@ class FromKerasFile(TestModels, parameterized.TestCase):
self.assertValidDebugInfo(converter._debug_info) self.assertValidDebugInfo(converter._debug_info)
@test_util.run_v1_only('Incompatible with 2.0.')
class GrapplerTest(TestModels): class GrapplerTest(TestModels):
def testConstantFolding(self): def testConstantFolding(self):
ops.disable_eager_execution()
# Constant folding handles the tf.broadcast_to operation which was not # Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added. # supported by the TFLite at the time this test was added.
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(shape=[3, 3], dtype=dtypes.float32) in_tensor = array_ops.placeholder(shape=[3, 3], dtype=dtypes.float32)
y_const = constant_op.constant([1., 2., 3.]) y_const = constant_op.constant([1., 2., 3.])
y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3]) y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])