diff --git a/tensorflow/python/kernel_tests/signal/BUILD b/tensorflow/python/kernel_tests/signal/BUILD index adb12a5e850..bd893184570 100644 --- a/tensorflow/python/kernel_tests/signal/BUILD +++ b/tensorflow/python/kernel_tests/signal/BUILD @@ -149,7 +149,6 @@ cuda_py_tests( python_version = "PY3", shard_count = 4, tags = [ - "no_oss_py38", #TODO(b/151631881) "no_windows_gpu", ], deps = [ diff --git a/tensorflow/python/kernel_tests/signal/test_util.py b/tensorflow/python/kernel_tests/signal/test_util.py index 1e95fe4b28f..e8d477a843b 100644 --- a/tensorflow/python/kernel_tests/signal/test_util.py +++ b/tensorflow/python/kernel_tests/signal/test_util.py @@ -50,7 +50,7 @@ def grappler_optimize(graph, fetches=None, config_proto=None): return tf_optimizer.OptimizeGraph(config_proto, metagraph) -def tflite_convert(fn, input_templates, use_mlir=False): +def tflite_convert(fn, input_templates): """Converts the provided fn to tf.lite model. Args: @@ -59,7 +59,6 @@ def tflite_convert(fn, input_templates, use_mlir=False): input_templates: A list of Tensors, ndarrays or TensorSpecs describing the inputs that fn expects. The actual values of the Tensors or ndarrays are unused. - use_mlir: Experimental. Whether to use the tf.lite MLIR converter. Returns: The serialized tf.lite model. @@ -67,7 +66,6 @@ def tflite_convert(fn, input_templates, use_mlir=False): fn = def_function.function(fn) concrete_func = fn.get_concrete_function(*input_templates) converter = lite.TFLiteConverterV2([concrete_func]) - converter.experimental_new_converter = use_mlir return converter.convert() diff --git a/tensorflow/python/kernel_tests/signal/window_ops_test.py b/tensorflow/python/kernel_tests/signal/window_ops_test.py index 9f5fe6f64c7..9432e70c7f2 100644 --- a/tensorflow/python/kernel_tests/signal/window_ops_test.py +++ b/tensorflow/python/kernel_tests/signal/window_ops_test.py @@ -156,15 +156,14 @@ class WindowOpsTest(test.TestCase, parameterized.TestCase): self.assertLen(rewritten_graph.node, 1) @parameterized.parameters( - # Due to control flow, only MLIR is supported. # Only float32 is supported. - (window_ops.hann_window, 10, False, dtypes.float32, True), - (window_ops.hann_window, 10, True, dtypes.float32, True), - (window_ops.hamming_window, 10, False, dtypes.float32, True), - (window_ops.hamming_window, 10, True, dtypes.float32, True), - (window_ops.vorbis_window, 12, None, dtypes.float32, True)) - def test_tflite_convert(self, window_fn, window_length, periodic, dtype, - use_mlir): + (window_ops.hann_window, 10, False, dtypes.float32), + (window_ops.hann_window, 10, True, dtypes.float32), + (window_ops.hamming_window, 10, False, dtypes.float32), + (window_ops.hamming_window, 10, True, dtypes.float32), + (window_ops.vorbis_window, 12, None, dtypes.float32)) + def test_tflite_convert(self, window_fn, window_length, periodic, dtype): + def fn(window_length): try: return window_fn(window_length, periodic=periodic, dtype=dtype) @@ -172,7 +171,7 @@ class WindowOpsTest(test.TestCase, parameterized.TestCase): return window_fn(window_length, dtype=dtype) tflite_model = test_util.tflite_convert( - fn, [tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)], use_mlir) + fn, [tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)]) window_length = np.array(window_length).astype(np.int32) actual_output, = test_util.evaluate_tflite_model( tflite_model, [window_length])