Re-enable signal kernel tests on py38

PiperOrigin-RevId: 312166420
Change-Id: Ie18cf2e29d8a05d57675ce3e75b06509205a4e61
This commit is contained in:
Jared Duke 2020-05-18 15:29:57 -07:00 committed by TensorFlower Gardener
parent 4001e3dad3
commit f5c5747f13
3 changed files with 9 additions and 13 deletions

View File

@ -149,7 +149,6 @@ cuda_py_tests(
python_version = "PY3", python_version = "PY3",
shard_count = 4, shard_count = 4,
tags = [ tags = [
"no_oss_py38", #TODO(b/151631881)
"no_windows_gpu", "no_windows_gpu",
], ],
deps = [ deps = [

View File

@ -50,7 +50,7 @@ def grappler_optimize(graph, fetches=None, config_proto=None):
return tf_optimizer.OptimizeGraph(config_proto, metagraph) return tf_optimizer.OptimizeGraph(config_proto, metagraph)
def tflite_convert(fn, input_templates, use_mlir=False): def tflite_convert(fn, input_templates):
"""Converts the provided fn to tf.lite model. """Converts the provided fn to tf.lite model.
Args: Args:
@ -59,7 +59,6 @@ def tflite_convert(fn, input_templates, use_mlir=False):
input_templates: A list of Tensors, ndarrays or TensorSpecs describing the input_templates: A list of Tensors, ndarrays or TensorSpecs describing the
inputs that fn expects. The actual values of the Tensors or ndarrays are inputs that fn expects. The actual values of the Tensors or ndarrays are
unused. unused.
use_mlir: Experimental. Whether to use the tf.lite MLIR converter.
Returns: Returns:
The serialized tf.lite model. The serialized tf.lite model.
@ -67,7 +66,6 @@ def tflite_convert(fn, input_templates, use_mlir=False):
fn = def_function.function(fn) fn = def_function.function(fn)
concrete_func = fn.get_concrete_function(*input_templates) concrete_func = fn.get_concrete_function(*input_templates)
converter = lite.TFLiteConverterV2([concrete_func]) converter = lite.TFLiteConverterV2([concrete_func])
converter.experimental_new_converter = use_mlir
return converter.convert() return converter.convert()

View File

@ -156,15 +156,14 @@ class WindowOpsTest(test.TestCase, parameterized.TestCase):
self.assertLen(rewritten_graph.node, 1) self.assertLen(rewritten_graph.node, 1)
@parameterized.parameters( @parameterized.parameters(
# Due to control flow, only MLIR is supported.
# Only float32 is supported. # Only float32 is supported.
(window_ops.hann_window, 10, False, dtypes.float32, True), (window_ops.hann_window, 10, False, dtypes.float32),
(window_ops.hann_window, 10, True, dtypes.float32, True), (window_ops.hann_window, 10, True, dtypes.float32),
(window_ops.hamming_window, 10, False, dtypes.float32, True), (window_ops.hamming_window, 10, False, dtypes.float32),
(window_ops.hamming_window, 10, True, dtypes.float32, True), (window_ops.hamming_window, 10, True, dtypes.float32),
(window_ops.vorbis_window, 12, None, dtypes.float32, True)) (window_ops.vorbis_window, 12, None, dtypes.float32))
def test_tflite_convert(self, window_fn, window_length, periodic, dtype, def test_tflite_convert(self, window_fn, window_length, periodic, dtype):
use_mlir):
def fn(window_length): def fn(window_length):
try: try:
return window_fn(window_length, periodic=periodic, dtype=dtype) return window_fn(window_length, periodic=periodic, dtype=dtype)
@ -172,7 +171,7 @@ class WindowOpsTest(test.TestCase, parameterized.TestCase):
return window_fn(window_length, dtype=dtype) return window_fn(window_length, dtype=dtype)
tflite_model = test_util.tflite_convert( tflite_model = test_util.tflite_convert(
fn, [tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)], use_mlir) fn, [tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)])
window_length = np.array(window_length).astype(np.int32) window_length = np.array(window_length).astype(np.int32)
actual_output, = test_util.evaluate_tflite_model( actual_output, = test_util.evaluate_tflite_model(
tflite_model, [window_length]) tflite_model, [window_length])