diff --git a/tensorflow/core/kernels/concat_lib_cpu.cc b/tensorflow/core/kernels/concat_lib_cpu.cc index d66511a495b..199bb2a02b5 100644 --- a/tensorflow/core/kernels/concat_lib_cpu.cc +++ b/tensorflow/core/kernels/concat_lib_cpu.cc @@ -78,10 +78,10 @@ REGISTER(uint64) #if defined(IS_MOBILE_PLATFORM) && !defined(SUPPORT_SELECTIVE_REGISTRATION) && \ !defined(__ANDROID_TYPES_FULL__) -// Primarily used for SavedModel support on mobile. Registering it here only -// if __ANDROID_TYPES_FULL__ is not defined (which already registers string) -// to avoid duplicate registration. -REGISTER(tstring); + // Primarily used for SavedModel support on mobile. Registering it here only + // if __ANDROID_TYPES_FULL__ is not defined (which already registers string) + // to avoid duplicate registration. + REGISTER(string); #endif // defined(IS_MOBILE_PLATFORM) && // !defined(SUPPORT_SELECTIVE_REGISTRATION) && // !defined(__ANDROID_TYPES_FULL__) diff --git a/tensorflow/core/kernels/control_flow_ops.cc b/tensorflow/core/kernels/control_flow_ops.cc index b084af9fd4d..723814c5b58 100644 --- a/tensorflow/core/kernels/control_flow_ops.cc +++ b/tensorflow/core/kernels/control_flow_ops.cc @@ -145,8 +145,8 @@ REGISTER_GPU_HOST_KERNEL(int32); REGISTER_GPU_HOST_REF_KERNEL(int32); REGISTER_GPU_HOST_KERNEL(bool); REGISTER_GPU_HOST_REF_KERNEL(bool); -REGISTER_GPU_HOST_KERNEL(tstring); -REGISTER_GPU_HOST_REF_KERNEL(tstring); +REGISTER_GPU_HOST_KERNEL(string); +REGISTER_GPU_HOST_REF_KERNEL(string); REGISTER_GPU_HOST_KERNEL(ResourceHandle); #undef REGISTER_GPU_HOST_KERNEL @@ -183,7 +183,7 @@ TF_CALL_REAL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_SWITCH); SwitchOp) REGISTER_SYCL_HOST_KERNEL(bool); -REGISTER_SYCL_HOST_KERNEL(tstring); +REGISTER_SYCL_HOST_KERNEL(string); REGISTER_SYCL_HOST_KERNEL(int32); #define REGISTER_SYCL_HOST_REF_KERNEL(type) \ @@ -198,7 +198,7 @@ REGISTER_SYCL_HOST_KERNEL(int32); REGISTER_SYCL_HOST_REF_KERNEL(int32); REGISTER_SYCL_HOST_REF_KERNEL(bool); -REGISTER_SYCL_HOST_REF_KERNEL(tstring); +REGISTER_SYCL_HOST_REF_KERNEL(string); #undef REGISTER_SYCL_HOST_KERNEL #undef REGISTER_SYCL_HOST_REF_KERNEL @@ -350,7 +350,7 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL); MergeOp) REGISTER_GPU_HOST_KERNEL(int32); -REGISTER_GPU_HOST_KERNEL(tstring); +REGISTER_GPU_HOST_KERNEL(string); REGISTER_GPU_HOST_KERNEL(ResourceHandle); #undef REGISTER_GPU_HOST_KERNEL @@ -373,7 +373,7 @@ REGISTER_GPU_HOST_KERNEL(ResourceHandle); MergeOp) REGISTER_SYCL_HOST_KERNEL(int32); -REGISTER_SYCL_HOST_KERNEL(tstring); +REGISTER_SYCL_HOST_KERNEL(string); REGISTER_SYCL_HOST_KERNEL(ResourceHandle); #undef REGISTER_SYCL_HOST_KERNEL @@ -439,8 +439,8 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_REF_KERNEL); REGISTER_SYCL_HOST_KERNEL(int32); REGISTER_SYCL_HOST_REF_KERNEL(int32); -REGISTER_SYCL_HOST_KERNEL(tstring); -REGISTER_SYCL_HOST_REF_KERNEL(tstring); +REGISTER_SYCL_HOST_KERNEL(string); +REGISTER_SYCL_HOST_REF_KERNEL(string); REGISTER_SYCL_HOST_KERNEL(ResourceHandle); #undef REGISTER_SYCL_HOST_KERNEL @@ -468,8 +468,8 @@ REGISTER_SYCL_HOST_KERNEL(ResourceHandle); REGISTER_GPU_HOST_KERNEL(int32); REGISTER_GPU_HOST_REF_KERNEL(int32); -REGISTER_GPU_HOST_KERNEL(tstring); -REGISTER_GPU_HOST_REF_KERNEL(tstring); +REGISTER_GPU_HOST_KERNEL(string); +REGISTER_GPU_HOST_REF_KERNEL(string); REGISTER_GPU_HOST_KERNEL(ResourceHandle); #undef REGISTER_GPU_HOST_KERNEL @@ -529,7 +529,7 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL); ExitOp) REGISTER_SYCL_HOST_KERNEL(int32); -REGISTER_SYCL_HOST_KERNEL(tstring); +REGISTER_SYCL_HOST_KERNEL(string); #undef REGISTER_SYCL_HOST_KERNEL #endif // TENSORFLOW_USE_SYCL @@ -551,7 +551,7 @@ REGISTER_SYCL_HOST_KERNEL(tstring); ExitOp) REGISTER_GPU_HOST_KERNEL(int32); -REGISTER_GPU_HOST_KERNEL(tstring); +REGISTER_GPU_HOST_KERNEL(string); REGISTER_GPU_HOST_KERNEL(ResourceHandle); #undef REGISTER_GPU_HOST_KERNEL @@ -601,7 +601,7 @@ TF_CALL_variant(REGISTER_GPU_KERNEL); NextIterationOp) REGISTER_GPU_HOST_KERNEL(int32); -REGISTER_GPU_HOST_KERNEL(tstring); +REGISTER_GPU_HOST_KERNEL(string); REGISTER_GPU_HOST_KERNEL(ResourceHandle); #undef REGISTER_GPU_HOST_KERNEL @@ -634,7 +634,7 @@ TF_CALL_NUMBER_TYPES_NO_INT32(REGISTER_SYCL_KERNEL); NextIterationOp) REGISTER_SYCL_HOST_KERNEL(int32); -REGISTER_SYCL_HOST_KERNEL(tstring); +REGISTER_SYCL_HOST_KERNEL(string); #undef REGISTER_SYCL_HOST_KERNEL #endif // TENSORFLOW_USE_SYCL diff --git a/tensorflow/core/kernels/control_flow_ops_test.cc b/tensorflow/core/kernels/control_flow_ops_test.cc index 4037f1c3855..a2f7bd40692 100644 --- a/tensorflow/core/kernels/control_flow_ops_test.cc +++ b/tensorflow/core/kernels/control_flow_ops_test.cc @@ -71,12 +71,12 @@ TEST_F(SwitchOpTest, Int32Success_2_3_s0) { TEST_F(SwitchOpTest, StringSuccess_s1) { Initialize(DT_STRING); - AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); + AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); AddInputFromArray<bool>(TensorShape({}), {true}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({6})); - test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"}); - test::ExpectTensorEqual<tstring>(expected, *GetOutput(1)); + test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"}); + test::ExpectTensorEqual<string>(expected, *GetOutput(1)); EXPECT_EQ(nullptr, GetOutput(0)); } diff --git a/tensorflow/core/kernels/cwise_op_add_2.cc b/tensorflow/core/kernels/cwise_op_add_2.cc index c218d35498e..1fa453ddb09 100644 --- a/tensorflow/core/kernels/cwise_op_add_2.cc +++ b/tensorflow/core/kernels/cwise_op_add_2.cc @@ -23,7 +23,7 @@ namespace tensorflow { #if !defined(__ANDROID_TYPES_SLIM__) REGISTER6(BinaryOp, CPU, "Add", functor::add, int8, int16, complex64, uint8, - complex128, tstring); + complex128, string); // Notice: String is excluded to allow marking AddV2 is_commutative and // is_aggregate. REGISTER5(BinaryOp, CPU, "AddV2", functor::add, int8, int16, complex64, uint8, diff --git a/tensorflow/core/kernels/cwise_op_equal_to_2.cc b/tensorflow/core/kernels/cwise_op_equal_to_2.cc index 8bf53d89b41..77810338697 100644 --- a/tensorflow/core/kernels/cwise_op_equal_to_2.cc +++ b/tensorflow/core/kernels/cwise_op_equal_to_2.cc @@ -23,7 +23,7 @@ namespace tensorflow { #if !defined(__ANDROID_TYPES_SLIM__) REGISTER6(BinaryOp, CPU, "Equal", functor::equal_to, int32, int64, complex64, - complex128, tstring, bool); + complex128, string, bool); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM REGISTER6(BinaryOp, GPU, "Equal", functor::equal_to, int8, int16, int64, complex64, complex128, bool); diff --git a/tensorflow/core/kernels/cwise_op_not_equal_to_2.cc b/tensorflow/core/kernels/cwise_op_not_equal_to_2.cc index 9b23960936b..0ecc70c4f2b 100644 --- a/tensorflow/core/kernels/cwise_op_not_equal_to_2.cc +++ b/tensorflow/core/kernels/cwise_op_not_equal_to_2.cc @@ -23,7 +23,7 @@ namespace tensorflow { #if !defined(__ANDROID_TYPES_SLIM__) REGISTER6(BinaryOp, CPU, "NotEqual", functor::not_equal_to, int32, int64, - complex64, complex128, tstring, bool); + complex64, complex128, string, bool); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM REGISTER6(BinaryOp, GPU, "NotEqual", functor::not_equal_to, int8, int16, int64, complex64, complex128, bool); diff --git a/tensorflow/core/kernels/dense_update_functor.cc b/tensorflow/core/kernels/dense_update_functor.cc index 22181ce6cff..4d7eafd4f72 100644 --- a/tensorflow/core/kernels/dense_update_functor.cc +++ b/tensorflow/core/kernels/dense_update_functor.cc @@ -32,8 +32,8 @@ namespace functor { template <> struct DenseUpdate<CPUDevice, string, ASSIGN> { - void operator()(const CPUDevice& d, typename TTypes<tstring>::Flat params, - typename TTypes<tstring>::ConstFlat update) { + void operator()(const CPUDevice& d, typename TTypes<string>::Flat params, + typename TTypes<string>::ConstFlat update) { if (params.dimension(0) == 1) { params.data()->resize(update.data()->size()); auto work = [¶ms, &update](int64 start, int64 end) { @@ -57,9 +57,9 @@ struct DenseUpdate<CPUDevice, string, ASSIGN> { // first element of the tensor seems as good a guess as any of the sizes // of the strings contained within... estimated_string_size = - std::max(update.data()[0].size(), sizeof(tstring)); + std::max(update.data()[0].size(), sizeof(string)); } else { - estimated_string_size = sizeof(tstring); + estimated_string_size = sizeof(string); } d.parallelFor( params.dimension(0), diff --git a/tensorflow/core/kernels/deserialize_sparse_string_op.cc b/tensorflow/core/kernels/deserialize_sparse_string_op.cc index cea891e6b88..398df428994 100644 --- a/tensorflow/core/kernels/deserialize_sparse_string_op.cc +++ b/tensorflow/core/kernels/deserialize_sparse_string_op.cc @@ -283,7 +283,7 @@ class DeserializeSparseOp : public OpKernel { REGISTER_KERNEL_BUILDER(Name("DeserializeSparse") .Device(DEVICE_CPU) - .TypeConstraint<tstring>("Tserialized"), + .TypeConstraint<string>("Tserialized"), DeserializeSparseOp) REGISTER_KERNEL_BUILDER(Name("DeserializeManySparse").Device(DEVICE_CPU), diff --git a/tensorflow/core/kernels/fill_functor.cc b/tensorflow/core/kernels/fill_functor.cc index 10dd3df1915..2435c3eed52 100644 --- a/tensorflow/core/kernels/fill_functor.cc +++ b/tensorflow/core/kernels/fill_functor.cc @@ -32,9 +32,9 @@ void SetZeroFunctor<Eigen::ThreadPoolDevice, T>::operator()( out.device(d) = out.constant(T(0)); } -void SetZeroFunctor<Eigen::ThreadPoolDevice, tstring>::operator()( - const Eigen::ThreadPoolDevice& d, typename TTypes<tstring>::Flat out) { - out.device(d) = out.constant(tstring()); +void SetZeroFunctor<Eigen::ThreadPoolDevice, string>::operator()( + const Eigen::ThreadPoolDevice& d, typename TTypes<string>::Flat out) { + out.device(d) = out.constant(string()); } // Explicit instantiations. diff --git a/tensorflow/core/kernels/fill_functor.h b/tensorflow/core/kernels/fill_functor.h index a9a47c6ecd3..46bffa51734 100644 --- a/tensorflow/core/kernels/fill_functor.h +++ b/tensorflow/core/kernels/fill_functor.h @@ -54,9 +54,9 @@ struct SetZeroFunctor<Eigen::SyclDevice, T> { #endif // TENSORFLOW_USE_SYCL template <> -struct SetZeroFunctor<Eigen::ThreadPoolDevice, tstring> { +struct SetZeroFunctor<Eigen::ThreadPoolDevice, string> { void operator()(const Eigen::ThreadPoolDevice& d, - typename TTypes<tstring>::Flat out); + typename TTypes<string>::Flat out); }; template <typename Device, typename T> @@ -81,9 +81,9 @@ struct SetOneFunctor<Eigen::SyclDevice, T> { #endif // TENSORFLOW_USE_SYCL template <> -struct SetOneFunctor<Eigen::ThreadPoolDevice, tstring> { +struct SetOneFunctor<Eigen::ThreadPoolDevice, string> { void operator()(const Eigen::ThreadPoolDevice& d, - typename TTypes<tstring>::Flat out); + typename TTypes<string>::Flat out); }; } // namespace functor diff --git a/tensorflow/core/kernels/function_ops.cc b/tensorflow/core/kernels/function_ops.cc index 8e2b20d6057..087ff2ee847 100644 --- a/tensorflow/core/kernels/function_ops.cc +++ b/tensorflow/core/kernels/function_ops.cc @@ -120,7 +120,7 @@ REGISTER_KERNEL_BUILDER(Name(kArgOp) REGISTER_KERNEL_BUILDER(Name(kArgOp) .Device(DEVICE_GPU) .HostMemory("output") - .TypeConstraint<tstring>("T"), + .TypeConstraint<string>("T"), ArgOp); REGISTER_KERNEL_BUILDER( @@ -148,7 +148,7 @@ REGISTER_KERNEL_BUILDER(Name(kRetOp) REGISTER_KERNEL_BUILDER(Name(kRetOp) .Device(DEVICE_GPU) - .TypeConstraint<tstring>("T") + .TypeConstraint<string>("T") .HostMemory("input"), RetvalOp); #undef REGISTER diff --git a/tensorflow/core/kernels/identity_n_op_test.cc b/tensorflow/core/kernels/identity_n_op_test.cc index 9eada689d2c..6a133c4d03a 100644 --- a/tensorflow/core/kernels/identity_n_op_test.cc +++ b/tensorflow/core/kernels/identity_n_op_test.cc @@ -64,12 +64,12 @@ TEST_F(IdentityNOpTest, Int32Success_2_3) { TEST_F(IdentityNOpTest, StringInt32Success) { TF_ASSERT_OK(Init(DT_STRING, DT_INT32)); - AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); + AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); AddInputFromArray<int32>(TensorShape({8}), {1, 3, 5, 7, 9, 11, 13, 15}); TF_ASSERT_OK(RunOpKernel()); Tensor expected0(allocator(), DT_STRING, TensorShape({6})); - test::FillValues<tstring>(&expected0, {"A", "b", "C", "d", "E", "f"}); - test::ExpectTensorEqual<tstring>(expected0, *GetOutput(0)); + test::FillValues<string>(&expected0, {"A", "b", "C", "d", "E", "f"}); + test::ExpectTensorEqual<string>(expected0, *GetOutput(0)); Tensor expected1(allocator(), DT_INT32, TensorShape({8})); test::FillValues<int32>(&expected1, {1, 3, 5, 7, 9, 11, 13, 15}); test::ExpectTensorEqual<int32>(expected1, *GetOutput(1)); diff --git a/tensorflow/core/kernels/identity_op.cc b/tensorflow/core/kernels/identity_op.cc index daa8a1ddb25..9349bb69bcd 100644 --- a/tensorflow/core/kernels/identity_op.cc +++ b/tensorflow/core/kernels/identity_op.cc @@ -158,7 +158,7 @@ REGISTER_GPU_KERNEL(Variant); REGISTER_GPU_HOST_KERNEL(int32); REGISTER_GPU_HOST_KERNEL(bool); -REGISTER_GPU_HOST_KERNEL(tstring); +REGISTER_GPU_HOST_KERNEL(string); REGISTER_GPU_HOST_KERNEL(ResourceHandle); #undef REGISTER_GPU_HOST_KERNEL diff --git a/tensorflow/core/kernels/identity_op_test.cc b/tensorflow/core/kernels/identity_op_test.cc index b22848f816b..9975cd35376 100644 --- a/tensorflow/core/kernels/identity_op_test.cc +++ b/tensorflow/core/kernels/identity_op_test.cc @@ -56,11 +56,11 @@ TEST_F(IdentityOpTest, Int32Success_2_3) { TEST_F(IdentityOpTest, StringSuccess) { TF_ASSERT_OK(Init(DT_STRING)); - AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); + AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"}); TF_ASSERT_OK(RunOpKernel()); Tensor expected(allocator(), DT_STRING, TensorShape({6})); - test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"}); - test::ExpectTensorEqual<tstring>(expected, *GetOutput(0)); + test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"}); + test::ExpectTensorEqual<string>(expected, *GetOutput(0)); } TEST_F(IdentityOpTest, RefInputError) { TF_ASSERT_OK(Init(DT_INT32_REF)); } diff --git a/tensorflow/core/kernels/inplace_ops.cc b/tensorflow/core/kernels/inplace_ops.cc index fc23f70f39b..a6f026150ea 100644 --- a/tensorflow/core/kernels/inplace_ops.cc +++ b/tensorflow/core/kernels/inplace_ops.cc @@ -51,7 +51,7 @@ Status DoParallelConcat(const CPUDevice& d, const Tensor& value, int32 loc, case DataTypeToEnum<type>::value: \ return DoParallelConcatUpdate<CPUDevice, type>(d, value, loc, output); TF_CALL_POD_TYPES(CASE); - TF_CALL_tstring(CASE); + TF_CALL_string(CASE); TF_CALL_variant(CASE); #undef CASE default: @@ -416,7 +416,7 @@ Status DoCopy(const CPUDevice& device, const Tensor& x, Tensor* y) { TF_CALL_NUMBER_TYPES(CASE); TF_CALL_bool(CASE); - TF_CALL_tstring(CASE); + TF_CALL_string(CASE); #undef CASE default: return errors::InvalidArgument("Unsupported data type: ", @@ -477,7 +477,7 @@ REGISTER_KERNEL_BUILDER(Name("DeepCopy").Device(DEVICE_CPU), CopyOp<CPUDevice>); REGISTER_EMPTY(float, CPU) REGISTER_EMPTY(double, CPU) REGISTER_EMPTY(Eigen::half, CPU) -REGISTER_EMPTY(tstring, CPU) +REGISTER_EMPTY(string, CPU) REGISTER_EMPTY(int32, CPU) REGISTER_EMPTY(int64, CPU) REGISTER_EMPTY(bool, CPU) diff --git a/tensorflow/core/kernels/listdiff_op.cc b/tensorflow/core/kernels/listdiff_op.cc index b1f7f453096..d28a2729d4c 100644 --- a/tensorflow/core/kernels/listdiff_op.cc +++ b/tensorflow/core/kernels/listdiff_op.cc @@ -104,7 +104,7 @@ class ListDiffOp : public OpKernel { ListDiffOp<type, int64>) TF_CALL_REAL_NUMBER_TYPES(REGISTER_LISTDIFF); -REGISTER_LISTDIFF(tstring); +REGISTER_LISTDIFF(string); #undef REGISTER_LISTDIFF } // namespace tensorflow diff --git a/tensorflow/core/kernels/mirror_pad_op.cc b/tensorflow/core/kernels/mirror_pad_op.cc index 20211c88c8b..6f5b8a3536f 100644 --- a/tensorflow/core/kernels/mirror_pad_op.cc +++ b/tensorflow/core/kernels/mirror_pad_op.cc @@ -173,7 +173,7 @@ namespace functor { DECLARE_CPU_SPEC(T, int64, 5); TF_CALL_POD_TYPES(DECLARE_CPU_SPECS); -TF_CALL_tstring(DECLARE_CPU_SPECS); +TF_CALL_string(DECLARE_CPU_SPECS); #undef DECLARE_CPU_SPEC #undef DECLARE_CPU_SPECS @@ -195,7 +195,7 @@ TF_CALL_tstring(DECLARE_CPU_SPECS); // Note that we do register for bool type, but not in the gradient op. TF_CALL_POD_TYPES(REGISTER_KERNEL); -TF_CALL_tstring(REGISTER_KERNEL); +TF_CALL_string(REGISTER_KERNEL); #undef REGISTER_KERNEL #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM diff --git a/tensorflow/core/kernels/mirror_pad_op_cpu_impl.h b/tensorflow/core/kernels/mirror_pad_op_cpu_impl.h index 45e6676e5a6..98e3be082d7 100644 --- a/tensorflow/core/kernels/mirror_pad_op_cpu_impl.h +++ b/tensorflow/core/kernels/mirror_pad_op_cpu_impl.h @@ -29,7 +29,7 @@ using CpuDevice = Eigen::ThreadPoolDevice; template struct functor::MirrorPad<CpuDevice, T, int32, CPU_PROVIDED_IXDIM>; \ template struct functor::MirrorPad<CpuDevice, T, int64, CPU_PROVIDED_IXDIM>; TF_CALL_POD_TYPES(DEFINE_CPU_SPECS); -TF_CALL_tstring(DEFINE_CPU_SPECS); +TF_CALL_string(DEFINE_CPU_SPECS); #undef DEFINE_CPU_SPECS #define DEFINE_CPU_SPECS(T) \ diff --git a/tensorflow/core/kernels/pack_op.cc b/tensorflow/core/kernels/pack_op.cc index 94315f75c38..5e57365e3d3 100644 --- a/tensorflow/core/kernels/pack_op.cc +++ b/tensorflow/core/kernels/pack_op.cc @@ -142,7 +142,7 @@ TF_CALL_QUANTIZED_TYPES(REGISTER_PACK); #if defined(IS_MOBILE_PLATFORM) && !defined(SUPPORT_SELECTIVE_REGISTRATION) // Primarily used for SavedModel support on mobile. -REGISTER_PACK(tstring); +REGISTER_PACK(string); #endif // defined(IS_MOBILE_PLATFORM) && // !defined(SUPPORT_SELECTIVE_REGISTRATION) diff --git a/tensorflow/core/kernels/pad_op.cc b/tensorflow/core/kernels/pad_op.cc index dd1fa86b0dd..a55b4afb9c8 100644 --- a/tensorflow/core/kernels/pad_op.cc +++ b/tensorflow/core/kernels/pad_op.cc @@ -291,7 +291,7 @@ class PadOp : public OpKernel { PadOp<CPUDevice, type, int64>); TF_CALL_POD_TYPES(REGISTER_KERNEL); -TF_CALL_tstring(REGISTER_KERNEL); +TF_CALL_string(REGISTER_KERNEL); #undef REGISTER_KERNEL #if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \ diff --git a/tensorflow/core/kernels/ragged_gather_op.cc b/tensorflow/core/kernels/ragged_gather_op.cc index 623b848a656..730694e85ce 100644 --- a/tensorflow/core/kernels/ragged_gather_op.cc +++ b/tensorflow/core/kernels/ragged_gather_op.cc @@ -292,7 +292,7 @@ class RaggedGatherOp : public RaggedGatherOpBase<INDEX_TYPE, SPLITS_TYPE> { REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int32, value_type, int64) \ REGISTER_CPU_KERNEL_WITH_INDEX_TYPE(int64, value_type, int64) TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL); -TF_CALL_tstring(REGISTER_CPU_KERNEL); +TF_CALL_string(REGISTER_CPU_KERNEL); TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL); TF_CALL_quint16(REGISTER_CPU_KERNEL); TF_CALL_qint16(REGISTER_CPU_KERNEL); diff --git a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc index 470b3a219d2..122718c1610 100644 --- a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc @@ -303,7 +303,7 @@ class RaggedTensorFromVariantOp : public OpKernel { REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \ REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64) TF_CALL_POD_TYPES(REGISTER_KERNELS); -TF_CALL_tstring(REGISTER_KERNELS); +TF_CALL_string(REGISTER_KERNELS); TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS); TF_CALL_quint16(REGISTER_KERNELS); TF_CALL_qint16(REGISTER_KERNELS); diff --git a/tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc b/tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc index 0be3609f942..f5397dad509 100644 --- a/tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc +++ b/tensorflow/core/kernels/ragged_tensor_from_variant_op_test.cc @@ -601,7 +601,7 @@ TEST_F(RaggedTensorFromVariantKernelTest, RaggedValuesTypeMismatch) { {component_split_1_1}, TensorShape({1}), component_values_1); int input_ragged_rank = 1; int output_ragged_rank = 2; - BuildDecodeRaggedTensorGraph<tstring, int64>( + BuildDecodeRaggedTensorGraph<string, int64>( input_ragged_rank, output_ragged_rank, TensorShape({1}), {variant_component_1}); EXPECT_TRUE(absl::StartsWith(RunOpKernel().error_message(), diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc index c9f09796239..6923fd45f11 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc @@ -210,7 +210,7 @@ class RaggedTensorToVariantOp : public OpKernel { REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \ REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64) TF_CALL_POD_TYPES(REGISTER_KERNELS); -TF_CALL_tstring(REGISTER_KERNELS); +TF_CALL_string(REGISTER_KERNELS); TF_CALL_QUANTIZED_TYPES(REGISTER_KERNELS); TF_CALL_quint16(REGISTER_KERNELS); TF_CALL_qint16(REGISTER_KERNELS); diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc index b06f18cb94b..21d4b2ad2b5 100644 --- a/tensorflow/core/kernels/resource_variable_ops.cc +++ b/tensorflow/core/kernels/resource_variable_ops.cc @@ -950,7 +950,7 @@ class ResourceScatterUpdateOp : public OpKernel { TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ARITHMETIC_CPU); TF_CALL_REAL_NUMBER_TYPES(REGISTER_SCATTER_MINMAX_CPU); -REGISTER_SCATTER_KERNEL(tstring, CPU, "ResourceScatterUpdate", +REGISTER_SCATTER_KERNEL(string, CPU, "ResourceScatterUpdate", scatter_op::UpdateOp::ASSIGN); REGISTER_SCATTER_KERNEL(bool, CPU, "ResourceScatterUpdate", scatter_op::UpdateOp::ASSIGN); diff --git a/tensorflow/core/kernels/reverse_op.cc b/tensorflow/core/kernels/reverse_op.cc index 98bf8bf8e91..c60ab60849f 100644 --- a/tensorflow/core/kernels/reverse_op.cc +++ b/tensorflow/core/kernels/reverse_op.cc @@ -314,7 +314,7 @@ class ReverseV2Op : public OpKernel { .HostMemory("axis"), \ ReverseV2Op<CPUDevice, T, int64>) TF_CALL_POD_TYPES(REGISTER_KERNELS); -TF_CALL_tstring(REGISTER_KERNELS); +TF_CALL_string(REGISTER_KERNELS); #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM diff --git a/tensorflow/core/kernels/scatter_nd_op.cc b/tensorflow/core/kernels/scatter_nd_op.cc index aa62d488f73..abf7cfde135 100644 --- a/tensorflow/core/kernels/scatter_nd_op.cc +++ b/tensorflow/core/kernels/scatter_nd_op.cc @@ -378,7 +378,7 @@ class ScatterNdUpdateOp : public OpKernel { TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_ADD_SUB_CPU); TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_UPDATE_CPU); TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_CPU); -TF_CALL_tstring(REGISTER_SCATTER_ND_CPU); +TF_CALL_string(REGISTER_SCATTER_ND_CPU); TF_CALL_bool(REGISTER_SCATTER_ND_ADD_SUB_CPU); TF_CALL_bool(REGISTER_SCATTER_ND_UPDATE_CPU); TF_CALL_bool(REGISTER_SCATTER_ND_CPU); @@ -428,7 +428,7 @@ TF_CALL_bool(REGISTER_SCATTER_ND_CPU); // Register TensorScatterUpdate/Add/Sub for all number types. TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_TENSOR_CPU); // Register only TensorScatterUpdate for string/bool types as well. -TF_CALL_tstring(REGISTER_SCATTER_ND_TENSOR_UPDATE_CPU); +TF_CALL_string(REGISTER_SCATTER_ND_TENSOR_UPDATE_CPU); TF_CALL_bool(REGISTER_SCATTER_ND_TENSOR_UPDATE_CPU); #undef REGISTER_SCATTER_ND_TENSOR_CPU diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h b/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h index 811679dac79..01e4656eab8 100644 --- a/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h +++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h @@ -160,7 +160,7 @@ struct ScatterNdFunctor<CPUDevice, T, Index, OP, IXDIM> { REGISTER_SCATTER_ND_INDEX(type, scatter_nd_op::UpdateOp::SUB); TF_CALL_ALL_TYPES(REGISTER_SCATTER_ND_UPDATE); -REGISTER_SCATTER_ND_INDEX(tstring, scatter_nd_op::UpdateOp::ADD); +REGISTER_SCATTER_ND_INDEX(string, scatter_nd_op::UpdateOp::ADD); TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_MATH); TF_CALL_bool(REGISTER_SCATTER_ND_MATH); #undef REGISTER_SCATTER_ND_MATH diff --git a/tensorflow/core/kernels/scatter_nd_op_test.cc b/tensorflow/core/kernels/scatter_nd_op_test.cc index 1461831a1fb..d3f6ee6dc44 100644 --- a/tensorflow/core/kernels/scatter_nd_op_test.cc +++ b/tensorflow/core/kernels/scatter_nd_op_test.cc @@ -51,15 +51,15 @@ class ScatterNdUpdateOpTest : public OpsTestBase { // TODO(simister): Re-enable this once binary size is under control. // TEST_F(ScatterNdUpdateOpTest, Simple_StringType) { // MakeOp(DT_STRING_REF, DT_INT32); -// AddInputFromArray<tstring>(TensorShape({1}), {"Brain"}); +// AddInputFromArray<string>(TensorShape({1}), {"Brain"}); // AddInputFromArray<int32>(TensorShape({1}), {0}); -// AddInputFromArray<tstring>(TensorShape({1}), {"TensorFlow"}); +// AddInputFromArray<string>(TensorShape({1}), {"TensorFlow"}); // TF_ASSERT_OK(RunOpKernel()); // // Check the new state of the input // Tensor params_tensor = *mutable_input(0).tensor; // Tensor expected(allocator(), DT_STRING, TensorShape({1})); -// test::FillValues<tstring>(&expected, {"TensorFlow"}); -// test::ExpectTensorEqual<tstring>(expected, params_tensor); +// test::FillValues<string>(&expected, {"TensorFlow"}); +// test::ExpectTensorEqual<string>(expected, params_tensor); // } // TEST_F(ScatterNdUpdateOpTest, Simple_BoolType) { diff --git a/tensorflow/core/kernels/scatter_op_test.cc b/tensorflow/core/kernels/scatter_op_test.cc index c9a34f85765..ae6548e9ef2 100644 --- a/tensorflow/core/kernels/scatter_op_test.cc +++ b/tensorflow/core/kernels/scatter_op_test.cc @@ -50,15 +50,15 @@ class ScatterUpdateOpTest : public OpsTestBase { TEST_F(ScatterUpdateOpTest, Simple_StringType) { MakeOp(DT_STRING_REF, DT_INT32); - AddInputFromArray<tstring>(TensorShape({1}), {"Brain"}); + AddInputFromArray<string>(TensorShape({1}), {"Brain"}); AddInputFromArray<int32>(TensorShape({1}), {0}); - AddInputFromArray<tstring>(TensorShape({1}), {"TensorFlow"}); + AddInputFromArray<string>(TensorShape({1}), {"TensorFlow"}); TF_ASSERT_OK(RunOpKernel()); // Check the new state of the input Tensor params_tensor = *mutable_input(0).tensor; Tensor expected(allocator(), DT_STRING, TensorShape({1})); - test::FillValues<tstring>(&expected, {"TensorFlow"}); - test::ExpectTensorEqual<tstring>(expected, params_tensor); + test::FillValues<string>(&expected, {"TensorFlow"}); + test::ExpectTensorEqual<string>(expected, params_tensor); } TEST_F(ScatterUpdateOpTest, Simple_BoolType) { diff --git a/tensorflow/core/kernels/serialize_sparse_op.cc b/tensorflow/core/kernels/serialize_sparse_op.cc index 5d48c8d685e..577e327809d 100644 --- a/tensorflow/core/kernels/serialize_sparse_op.cc +++ b/tensorflow/core/kernels/serialize_sparse_op.cc @@ -93,7 +93,7 @@ class SerializeSparseOp : public OpKernel { // performs O(1) shallow copies (and hence is much cheaper than // dispatching to another thread would be). template <> -bool SerializeSparseOp<tstring>::IsExpensive() { +bool SerializeSparseOp<string>::IsExpensive() { return true; } template <> @@ -102,14 +102,14 @@ bool SerializeSparseOp<Variant>::IsExpensive() { } template <> -Status SerializeSparseOp<tstring>::Initialize(Tensor* result) { +Status SerializeSparseOp<string>::Initialize(Tensor* result) { *result = Tensor(DT_STRING, TensorShape({3})); return Status::OK(); } template <> -Status SerializeSparseOp<tstring>::Serialize(const Tensor& input, - tstring* result) { +Status SerializeSparseOp<string>::Serialize(const Tensor& input, + string* result) { TensorProto proto; input.AsProtoTensorContent(&proto); *result = proto.SerializeAsString(); @@ -118,8 +118,8 @@ Status SerializeSparseOp<tstring>::Serialize(const Tensor& input, REGISTER_KERNEL_BUILDER(Name("SerializeSparse") .Device(DEVICE_CPU) - .TypeConstraint<tstring>("out_type"), - SerializeSparseOp<tstring>); + .TypeConstraint<string>("out_type"), + SerializeSparseOp<string>); template <> Status SerializeSparseOp<Variant>::Initialize(Tensor* result) { @@ -261,27 +261,27 @@ class SerializeManySparseOp : public SerializeManySparseOpBase<U> { }; template <> -Status SerializeManySparseOpBase<tstring>::Initialize(const int64 n, - Tensor* result) { +Status SerializeManySparseOpBase<string>::Initialize(const int64 n, + Tensor* result) { *result = Tensor(DT_STRING, TensorShape({n, 3})); return Status::OK(); } template <> -Status SerializeManySparseOpBase<tstring>::Serialize(const Tensor& input, - tstring* result) { +Status SerializeManySparseOpBase<string>::Serialize(const Tensor& input, + string* result) { TensorProto proto; input.AsProtoTensorContent(&proto); *result = proto.SerializeAsString(); return Status::OK(); } -#define REGISTER_KERNELS(type) \ - REGISTER_KERNEL_BUILDER(Name("SerializeManySparse") \ - .Device(DEVICE_CPU) \ - .TypeConstraint<type>("T") \ - .TypeConstraint<tstring>("out_type"), \ - SerializeManySparseOp<type, tstring>) +#define REGISTER_KERNELS(type) \ + REGISTER_KERNEL_BUILDER(Name("SerializeManySparse") \ + .Device(DEVICE_CPU) \ + .TypeConstraint<type>("T") \ + .TypeConstraint<string>("out_type"), \ + SerializeManySparseOp<type, string>) TF_CALL_ALL_TYPES(REGISTER_KERNELS); #undef REGISTER_KERNELS diff --git a/tensorflow/core/kernels/set_kernels.cc b/tensorflow/core/kernels/set_kernels.cc index 4532396455f..59516b2329b 100644 --- a/tensorflow/core/kernels/set_kernels.cc +++ b/tensorflow/core/kernels/set_kernels.cc @@ -291,7 +291,7 @@ _SET_SIZE_REGISTER_KERNEL_BUILDER(int32); _SET_SIZE_REGISTER_KERNEL_BUILDER(int64); _SET_SIZE_REGISTER_KERNEL_BUILDER(uint8); _SET_SIZE_REGISTER_KERNEL_BUILDER(uint16); -_SET_SIZE_REGISTER_KERNEL_BUILDER(tstring); +_SET_SIZE_REGISTER_KERNEL_BUILDER(string); #undef _SET_SIZE_REGISTER_KERNEL_BUILDER enum InputTypes { @@ -716,7 +716,7 @@ _DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int32); _DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int64); _DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint8); _DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint16); -_DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(tstring); +_DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(string); #undef _DENSE_TO_DENSE_SET_OPERATION_REGISTER_KERNEL_BUILDER template <typename T> @@ -737,7 +737,7 @@ _DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int32); _DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int64); _DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint8); _DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint16); -_DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(tstring); +_DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(string); #undef _DENSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER template <typename T> @@ -758,7 +758,7 @@ _SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int32); _SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(int64); _SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint8); _SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(uint16); -_SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(tstring); +_SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER(string); #undef _SPARSE_TO_SPARSE_SET_OPERATION_REGISTER_KERNEL_BUILDER } // namespace tensorflow diff --git a/tensorflow/core/kernels/shape_ops.cc b/tensorflow/core/kernels/shape_ops.cc index cf065f738d6..86ccde9fb8c 100644 --- a/tensorflow/core/kernels/shape_ops.cc +++ b/tensorflow/core/kernels/shape_ops.cc @@ -546,7 +546,7 @@ REGISTER_GPU_KERNEL(Variant); REGISTER_GPU_HOST_KERNEL(int32); REGISTER_GPU_HOST_KERNEL(bool); -REGISTER_GPU_HOST_KERNEL(tstring); +REGISTER_GPU_HOST_KERNEL(string); REGISTER_GPU_HOST_KERNEL(ResourceHandle); #undef REGISTER_GPU_HOST_KERNEL diff --git a/tensorflow/core/kernels/tile_functor_cpu.cc b/tensorflow/core/kernels/tile_functor_cpu.cc index 2a5fb3f62d6..5a8af3468fa 100644 --- a/tensorflow/core/kernels/tile_functor_cpu.cc +++ b/tensorflow/core/kernels/tile_functor_cpu.cc @@ -81,7 +81,7 @@ TF_CALL_int64(DEFINE_TYPE); TF_CALL_half(DEFINE_TYPE); TF_CALL_complex64(DEFINE_TYPE); TF_CALL_complex128(DEFINE_TYPE); -TF_CALL_tstring(DEFINE_TYPE); +TF_CALL_string(DEFINE_TYPE); #undef DEFINE_TYPE diff --git a/tensorflow/core/kernels/tile_ops.cc b/tensorflow/core/kernels/tile_ops.cc index e1080acb700..cee334ec707 100644 --- a/tensorflow/core/kernels/tile_ops.cc +++ b/tensorflow/core/kernels/tile_ops.cc @@ -142,7 +142,7 @@ TF_CALL_int64(DECLARE_TYPE); TF_CALL_half(DECLARE_TYPE); TF_CALL_complex64(DECLARE_TYPE); TF_CALL_complex128(DECLARE_TYPE); -TF_CALL_tstring(DECLARE_TYPE); +TF_CALL_string(DECLARE_TYPE); #undef DECLARE_TYPE #define DECLARE_DIM(T, NDIM) \ @@ -241,7 +241,7 @@ class TileOp : public OpKernel { TF_CALL_int16(HANDLE_TYPE_NAME); TF_CALL_int64(HANDLE_TYPE_NAME); TF_CALL_half(HANDLE_TYPE_NAME); - TF_CALL_tstring(HANDLE_TYPE_NAME); // when DEVICE=CPUDevice. + TF_CALL_string(HANDLE_TYPE_NAME); // when DEVICE=CPUDevice. TF_CALL_complex64(HANDLE_TYPE_NAME); TF_CALL_complex128(HANDLE_TYPE_NAME); @@ -322,7 +322,7 @@ TF_CALL_int64(HANDLE_TYPE_NAME_CPU); TF_CALL_half(HANDLE_TYPE_NAME_CPU); TF_CALL_complex64(HANDLE_TYPE_NAME_CPU); TF_CALL_complex128(HANDLE_TYPE_NAME_CPU); -TF_CALL_tstring(HANDLE_TYPE_NAME_CPU); +TF_CALL_string(HANDLE_TYPE_NAME_CPU); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TF_CALL_bool(HANDLE_TYPE_NAME_GPU);