Re-enable signal kernel tests on py38
PiperOrigin-RevId: 312166420 Change-Id: Ie18cf2e29d8a05d57675ce3e75b06509205a4e61
This commit is contained in:
parent
4001e3dad3
commit
f5c5747f13
@ -149,7 +149,6 @@ cuda_py_tests(
|
||||
python_version = "PY3",
|
||||
shard_count = 4,
|
||||
tags = [
|
||||
"no_oss_py38", #TODO(b/151631881)
|
||||
"no_windows_gpu",
|
||||
],
|
||||
deps = [
|
||||
|
@ -50,7 +50,7 @@ def grappler_optimize(graph, fetches=None, config_proto=None):
|
||||
return tf_optimizer.OptimizeGraph(config_proto, metagraph)
|
||||
|
||||
|
||||
def tflite_convert(fn, input_templates, use_mlir=False):
|
||||
def tflite_convert(fn, input_templates):
|
||||
"""Converts the provided fn to tf.lite model.
|
||||
|
||||
Args:
|
||||
@ -59,7 +59,6 @@ def tflite_convert(fn, input_templates, use_mlir=False):
|
||||
input_templates: A list of Tensors, ndarrays or TensorSpecs describing the
|
||||
inputs that fn expects. The actual values of the Tensors or ndarrays are
|
||||
unused.
|
||||
use_mlir: Experimental. Whether to use the tf.lite MLIR converter.
|
||||
|
||||
Returns:
|
||||
The serialized tf.lite model.
|
||||
@ -67,7 +66,6 @@ def tflite_convert(fn, input_templates, use_mlir=False):
|
||||
fn = def_function.function(fn)
|
||||
concrete_func = fn.get_concrete_function(*input_templates)
|
||||
converter = lite.TFLiteConverterV2([concrete_func])
|
||||
converter.experimental_new_converter = use_mlir
|
||||
return converter.convert()
|
||||
|
||||
|
||||
|
@ -156,15 +156,14 @@ class WindowOpsTest(test.TestCase, parameterized.TestCase):
|
||||
self.assertLen(rewritten_graph.node, 1)
|
||||
|
||||
@parameterized.parameters(
|
||||
# Due to control flow, only MLIR is supported.
|
||||
# Only float32 is supported.
|
||||
(window_ops.hann_window, 10, False, dtypes.float32, True),
|
||||
(window_ops.hann_window, 10, True, dtypes.float32, True),
|
||||
(window_ops.hamming_window, 10, False, dtypes.float32, True),
|
||||
(window_ops.hamming_window, 10, True, dtypes.float32, True),
|
||||
(window_ops.vorbis_window, 12, None, dtypes.float32, True))
|
||||
def test_tflite_convert(self, window_fn, window_length, periodic, dtype,
|
||||
use_mlir):
|
||||
(window_ops.hann_window, 10, False, dtypes.float32),
|
||||
(window_ops.hann_window, 10, True, dtypes.float32),
|
||||
(window_ops.hamming_window, 10, False, dtypes.float32),
|
||||
(window_ops.hamming_window, 10, True, dtypes.float32),
|
||||
(window_ops.vorbis_window, 12, None, dtypes.float32))
|
||||
def test_tflite_convert(self, window_fn, window_length, periodic, dtype):
|
||||
|
||||
def fn(window_length):
|
||||
try:
|
||||
return window_fn(window_length, periodic=periodic, dtype=dtype)
|
||||
@ -172,7 +171,7 @@ class WindowOpsTest(test.TestCase, parameterized.TestCase):
|
||||
return window_fn(window_length, dtype=dtype)
|
||||
|
||||
tflite_model = test_util.tflite_convert(
|
||||
fn, [tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)], use_mlir)
|
||||
fn, [tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)])
|
||||
window_length = np.array(window_length).astype(np.int32)
|
||||
actual_output, = test_util.evaluate_tflite_model(
|
||||
tflite_model, [window_length])
|
||||
|
Loading…
Reference in New Issue
Block a user