diff --git a/tensorflow/python/kernel_tests/batch_matmul_op_test.py b/tensorflow/python/kernel_tests/batch_matmul_op_test.py index b68eaa123c5..dab4116ab9d 100644 --- a/tensorflow/python/kernel_tests/batch_matmul_op_test.py +++ b/tensorflow/python/kernel_tests/batch_matmul_op_test.py @@ -262,7 +262,8 @@ class BatchMatMulBenchmark(test.Benchmark): if __name__ == "__main__": - dtypes_to_test = [np.float16, np.float32, np.float64, np.int32, np.complex64, np.complex128] + dtypes_to_test = [np.float16, np.float32, np.float64, np.int32, + np.complex64, np.complex128] for dtype_ in dtypes_to_test: for adjoint_a_ in False, True: for adjoint_b_ in False, True: diff --git a/tensorflow/python/kernel_tests/eig_op_test.py b/tensorflow/python/kernel_tests/eig_op_test.py index 74607c66dc2..4cfbcd21b49 100644 --- a/tensorflow/python/kernel_tests/eig_op_test.py +++ b/tensorflow/python/kernel_tests/eig_op_test.py @@ -183,7 +183,8 @@ def _GetEigTest(dtype_, shape_, compute_v_): if __name__ == "__main__": - dtypes_to_test = [dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64, dtypes_lib.complex128] + dtypes_to_test = [dtypes_lib.float32, dtypes_lib.float64, + dtypes_lib.complex64, dtypes_lib.complex128] for compute_v in True, False: for dtype in dtypes_to_test: for size in 1, 2, 5, 10: diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py index 409ab20985d..ad419ced5d1 100644 --- a/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py +++ b/tensorflow/python/kernel_tests/linalg/linear_operator_adjoint_test.py @@ -199,7 +199,8 @@ class LinearOperatorAdjointTest( def test_solve_adjoint_complex_operator(self): if test.is_built_with_rocm(): - self.skipTest("ROCm does not support BLAS solve operations for complex types") + self.skipTest("ROCm does not support BLAS solve operations" + " for complex types") matrix1 = self.evaluate(linear_operator_test_util.random_tril_matrix( [4, 4], dtype=dtypes.complex128, force_well_conditioned=True) + 1j * linear_operator_test_util.random_tril_matrix( diff --git a/tensorflow/python/kernel_tests/matmul_op_test.py b/tensorflow/python/kernel_tests/matmul_op_test.py index dedfa58c3ed..cf562e094ed 100644 --- a/tensorflow/python/kernel_tests/matmul_op_test.py +++ b/tensorflow/python/kernel_tests/matmul_op_test.py @@ -226,7 +226,8 @@ class MatMulInfixOperatorTest(test_lib.TestCase): if __name__ == "__main__": sizes = [1, 3, 5] trans_options = [[False, False], [True, False], [False, True]] - dtypes_to_test = [np.int32, np.int64, np.float16, np.float32, np.float64, np.complex64, np.complex128] + dtypes_to_test = [np.int32, np.int64, np.float16, np.float32, np.float64, + np.complex64, np.complex128] # TF2 does not support placeholders under eager so we skip it for use_static_shape in set([True, tf2.enabled()]): for dtype in dtypes_to_test: diff --git a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py index ef64f7cf61b..73609a3c1cf 100644 --- a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py +++ b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py @@ -240,7 +240,8 @@ def _GetSelfAdjointEigGradTest(dtype_, shape_, compute_v_): if __name__ == "__main__": - dtypes_to_test = [dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64, dtypes_lib.complex128] + dtypes_to_test = [dtypes_lib.float32, dtypes_lib.float64, + dtypes_lib.complex64, dtypes_lib.complex128] for compute_v in True, False: for dtype in dtypes_to_test: for size in 1, 2, 5, 10: diff --git a/tensorflow/python/kernel_tests/tensordot_op_test.py b/tensorflow/python/kernel_tests/tensordot_op_test.py index 3663a91281b..b63e3df6919 100644 --- a/tensorflow/python/kernel_tests/tensordot_op_test.py +++ b/tensorflow/python/kernel_tests/tensordot_op_test.py @@ -221,7 +221,8 @@ def _get_tensordot_tests(dtype_, rank_a_, rank_b_, num_dims_, dynamic_shape_): if __name__ == "__main__": - dtypes_to_test = [np.float16, np.float32, np.float64, np.complex64, np.complex128] + dtypes_to_test = [np.float16, np.float32, np.float64, + np.complex64, np.complex128] for dtype in dtypes_to_test: for rank_a in 1, 2, 4, 5: for rank_b in 1, 2, 4, 5: