diff --git a/tensorflow/cc/framework/cc_op_gen.cc b/tensorflow/cc/framework/cc_op_gen.cc index d4796eac8fa..13e666ddaad 100644 --- a/tensorflow/cc/framework/cc_op_gen.cc +++ b/tensorflow/cc/framework/cc_op_gen.cc @@ -329,7 +329,7 @@ std::pair AttrTypeName(StringPiece attr_type) { new std::unordered_map, StringPieceHasher>{ {"string", {"StringPiece", false}}, - {"list(string)", {"gtl::ArraySlice", true}}, + {"list(string)", {"gtl::ArraySlice<::tensorflow::tstring>", true}}, {"int", {"int64", false}}, {"list(int)", {"gtl::ArraySlice", true}}, {"float", {"float", false}}, diff --git a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc index 21ff913b84c..7a9b9f65fd8 100644 --- a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc +++ b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc @@ -127,7 +127,7 @@ TEST_F(TRTEngineResourceOpsTest, Basic) { .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddInputFromArray(TensorShape({}), {handle}); - AddInputFromArray(TensorShape({}), {filename}); + AddInputFromArray(TensorShape({}), {filename}); TF_ASSERT_OK(RunOpKernel()); EXPECT_TRUE(rm->Lookup(container, resource_name, &resource).ok()); EXPECT_EQ(0, resource->cache_.size()); @@ -168,7 +168,7 @@ TEST_F(TRTEngineResourceOpsTest, Basic) { TF_ASSERT_OK(env->NewRandomAccessFile(filename, &file)); auto reader = absl::make_unique(file.get()); uint64 offset = 0; - string record; + tstring record; TF_ASSERT_OK(reader->ReadRecord(&offset, &record)); TRTEngineInstance engine_instance; engine_instance.ParseFromString(record); diff --git a/tensorflow/core/kernels/concat_lib_cpu.cc b/tensorflow/core/kernels/concat_lib_cpu.cc index 82f83fd7492..da73d3d2c56 100644 --- a/tensorflow/core/kernels/concat_lib_cpu.cc +++ b/tensorflow/core/kernels/concat_lib_cpu.cc @@ -57,10 +57,9 @@ int64 EstimateBytesPerElement( // reshapes all the inputs to matrices), by sampling the lengths of the actual // strings in the various tensors. template <> -int64 EstimateBytesPerElement( +int64 EstimateBytesPerElement( const std::vector< - std::unique_ptr::ConstMatrix>>& - inputs) { + std::unique_ptr::ConstMatrix>>& inputs) { // randomly sample a few input strings to get a sense of the average size // of each element int num_samples = 0; diff --git a/tensorflow/core/kernels/concat_op_test.cc b/tensorflow/core/kernels/concat_op_test.cc index 6348d1824d9..5dffe76130d 100644 --- a/tensorflow/core/kernels/concat_op_test.cc +++ b/tensorflow/core/kernels/concat_op_test.cc @@ -42,12 +42,12 @@ void FillTensorWithRandomValues(Tensor* t, int string_length, int64* bytes) { } template <> -void FillTensorWithRandomValues(Tensor* t, int string_length, - int64* bytes) { - auto ts = t->flat(); +void FillTensorWithRandomValues(Tensor* t, int string_length, + int64* bytes) { + auto ts = t->flat(); *bytes = 0; for (int i = 0; i < ts.size(); i++) { - ts(i) = string(string_length, 'x'); + ts(i) = tstring(string_length, 'x'); *bytes += sizeof(ts(i)) + ts(i).size(); } } @@ -99,7 +99,7 @@ BENCHMARK(BM_ConcatDim0Float)->Arg(1000)->Arg(100000)->Arg(1000000); BENCHMARK(BM_ConcatDim1Float)->Arg(1000)->Arg(100000)->Arg(1000000); static void BM_ConcatDim0String(int iters, int dim2, int string_length) { - ConcatHelper(iters, 0, dim2, string_length); + ConcatHelper(iters, 0, dim2, string_length); } BENCHMARK(BM_ConcatDim0String) diff --git a/tensorflow/core/kernels/data/experimental/unique_dataset_op_test.cc b/tensorflow/core/kernels/data/experimental/unique_dataset_op_test.cc index 7259f36242f..a402cbd5c3d 100644 --- a/tensorflow/core/kernels/data/experimental/unique_dataset_op_test.cc +++ b/tensorflow/core/kernels/data/experimental/unique_dataset_op_test.cc @@ -99,7 +99,7 @@ UniqueDatasetParams EmptyInputParams() { UniqueDatasetParams StringParams() { auto tensor_slice_dataset_params = TensorSliceDatasetParams( /*components=*/ - {CreateTensor( + {CreateTensor( TensorShape{11, 1}, {"one", "One", "two", "three", "five", "eight", "thirteen", "twenty-one", "eight", "eight", "thirty-four"})}, @@ -164,15 +164,15 @@ std::vector> GetNextTestCases() { CreateTensors(TensorShape({1}), {})}, {/*dataset_params=*/StringParams(), /*expected_outputs=*/ - CreateTensors(TensorShape({1}), {{"one"}, - {"One"}, - {"two"}, - {"three"}, - {"five"}, - {"eight"}, - {"thirteen"}, - {"twenty-one"}, - {"thirty-four"}})}}; + CreateTensors(TensorShape({1}), {{"one"}, + {"One"}, + {"two"}, + {"three"}, + {"five"}, + {"eight"}, + {"thirteen"}, + {"twenty-one"}, + {"thirty-four"}})}}; } ITERATOR_GET_NEXT_TEST_P(UniqueDatasetOpTest, UniqueDatasetParams, diff --git a/tensorflow/core/kernels/data/text_line_dataset_op_test.cc b/tensorflow/core/kernels/data/text_line_dataset_op_test.cc index 8acec95a9e9..f4c9589856d 100644 --- a/tensorflow/core/kernels/data/text_line_dataset_op_test.cc +++ b/tensorflow/core/kernels/data/text_line_dataset_op_test.cc @@ -66,7 +66,7 @@ class TextLineDatasetParams : public DatasetParams { class TextLineDatasetOpTest : public DatasetOpsTestBase {}; Status CreateTestFiles(const std::vector& filenames, - const std::vector& contents, + const std::vector& contents, CompressionType compression_type) { if (filenames.size() != contents.size()) { return tensorflow::errors::InvalidArgument( diff --git a/tensorflow/core/kernels/decode_proto_op.cc b/tensorflow/core/kernels/decode_proto_op.cc index fea0cb047bc..7752b2f5eb0 100644 --- a/tensorflow/core/kernels/decode_proto_op.cc +++ b/tensorflow/core/kernels/decode_proto_op.cc @@ -810,7 +810,7 @@ class DecodeProtoOp : public OpKernel { private: // Copy a serialized message to binary, e.g. to handle text proto inputs. - void ReserializeMessage(OpKernelContext* ctx, const string& buf, + void ReserializeMessage(OpKernelContext* ctx, const tstring& buf, tstring* binary_buf) { // Handle text protos by translating them to binary. std::unique_ptr message(message_prototype_->New()); @@ -831,7 +831,7 @@ class DecodeProtoOp : public OpKernel { } // Count the number of occurrences of each requested field in a message batch. - void CountFields(OpKernelContext* ctx, int message_index, const string& buf, + void CountFields(OpKernelContext* ctx, int message_index, const tstring& buf, Tensor* sizes_tensor, std::vector* max_sizes) { int field_count = fields_.size(); diff --git a/tensorflow/core/kernels/deserialize_sparse_string_op.cc b/tensorflow/core/kernels/deserialize_sparse_string_op.cc index cea891e6b88..8b8819b3cd0 100644 --- a/tensorflow/core/kernels/deserialize_sparse_string_op.cc +++ b/tensorflow/core/kernels/deserialize_sparse_string_op.cc @@ -210,7 +210,7 @@ class DeserializeSparseOp : public OpKernel { } private: - Status Deserialize(const string& serialized, Tensor* result) { + Status Deserialize(const tstring& serialized, Tensor* result) { TensorProto proto; if (!ParseProtoUnlimited(&proto, serialized)) { return errors::InvalidArgument("Could not parse serialized proto"); @@ -224,8 +224,8 @@ class DeserializeSparseOp : public OpKernel { } Status GetAndValidateSparseTensor( - const string& serialized_indices, const string& serialized_values, - const string& serialized_shape, DataType values_dtype, int index, + const tstring& serialized_indices, const tstring& serialized_values, + const tstring& serialized_shape, DataType values_dtype, int index, Tensor* output_indices, Tensor* output_values, Tensor* output_shape) { // Deserialize and validate the indices. TF_RETURN_IF_ERROR(this->Deserialize(serialized_indices, output_indices)); diff --git a/tensorflow/core/kernels/example_parsing_ops.cc b/tensorflow/core/kernels/example_parsing_ops.cc index 0098d9583d5..8595c333cd7 100644 --- a/tensorflow/core/kernels/example_parsing_ops.cc +++ b/tensorflow/core/kernels/example_parsing_ops.cc @@ -230,7 +230,7 @@ class ParseExampleOp : public OpKernel { Status ParseExampleScalar(const example::FastParseExampleConfig& config, const Tensor* serialized, OpKernelContext* ctx, example::Result* result) const { - const string& serialized_proto = serialized->scalar()(); + const tstring& serialized_proto = serialized->scalar()(); return FastParseSingleExample(config, serialized_proto, result); } @@ -357,7 +357,7 @@ class ParseSingleExampleOp : public OpKernel { config.sparse.push_back({attrs_.sparse_keys[d], attrs_.sparse_types[d]}); } - const string& serialized_proto = serialized->scalar()(); + const tstring& serialized_proto = serialized->scalar()(); OP_REQUIRES_OK(ctx, FastParseSingleExample(config, serialized_proto, &result)); @@ -567,8 +567,8 @@ class ParseSequenceExampleOp : public OpKernel { const OpInputList& context_dense_defaults) const { example::FastParseExampleConfig config; for (int d = 0; d < attrs_.num_context_dense; ++d) { - const string& key = dense_keys ? dense_keys->flat()(d) - : attrs_.context_dense_keys[d]; + const tstring& key = dense_keys ? dense_keys->flat()(d) + : attrs_.context_dense_keys[d]; config.dense.push_back({key, attrs_.context_dense_types[d], attrs_.context_dense_shapes[d], context_dense_defaults[d], @@ -576,8 +576,8 @@ class ParseSequenceExampleOp : public OpKernel { 0 /*attrs_.context_elements_per_stride[d] */}); } for (int d = 0; d < attrs_.num_context_sparse; ++d) { - const string& key = sparse_keys ? sparse_keys->flat()(d) - : attrs_.context_sparse_keys[d]; + const tstring& key = sparse_keys ? sparse_keys->flat()(d) + : attrs_.context_sparse_keys[d]; config.sparse.push_back({key, attrs_.context_sparse_types[d]}); } for (int d = 0; d < attrs_.num_context_ragged; ++d) { @@ -594,8 +594,8 @@ class ParseSequenceExampleOp : public OpKernel { const Tensor* feature_list_dense_missing_assumed_empty) const { example::FastParseExampleConfig config; for (int d = 0; d < attrs_.num_feature_list_dense; ++d) { - const string& key = dense_keys ? dense_keys->flat()(d) - : attrs_.feature_list_dense_keys[d]; + const tstring& key = dense_keys ? dense_keys->flat()(d) + : attrs_.feature_list_dense_keys[d]; bool missing_assumed_empty = feature_list_dense_missing_assumed_empty ? feature_list_dense_missing_assumed_empty->flat()(d) @@ -608,8 +608,8 @@ class ParseSequenceExampleOp : public OpKernel { 0 /*attrs_.feature_list_elements_per_stride[d] */}); } for (int d = 0; d < attrs_.num_feature_list_sparse; ++d) { - const string& key = sparse_keys ? sparse_keys->flat()(d) - : attrs_.feature_list_sparse_keys[d]; + const tstring& key = sparse_keys ? sparse_keys->flat()(d) + : attrs_.feature_list_sparse_keys[d]; config.sparse.push_back({key, attrs_.feature_list_sparse_types[d]}); } for (int d = 0; d < attrs_.num_feature_list_ragged; ++d) { @@ -909,7 +909,7 @@ class ParseSingleSequenceExampleOp : public OpKernel { errors::InvalidArgument("Could not parse example input, value: '", serialized_t(), "'")); - const string& name = (has_debug_name) ? debug_name_t() : ""; + const tstring& name = (has_debug_name) ? debug_name_t() : ""; const Features& context = ex.context(); const auto& context_dict = context.feature(); @@ -925,7 +925,7 @@ class ParseSingleSequenceExampleOp : public OpKernel { } for (int d = 0; d < attrs_.num_context_dense; ++d) { - const string& key = context_dense_keys_t[d]; + const tstring& key = context_dense_keys_t[d]; const DataType& dtype = attrs_.context_dense_types[d]; const TensorShape& shape = attrs_.context_dense_shapes[d]; @@ -955,7 +955,7 @@ class ParseSingleSequenceExampleOp : public OpKernel { // Context Sparse ---------------------------------------------------------- for (int d = 0; d < attrs_.num_context_sparse; ++d) { - const string& key = context_sparse_keys_t[d]; + const tstring& key = context_sparse_keys_t[d]; const DataType& dtype = attrs_.context_sparse_types[d]; const auto& feature_found = context_dict.find(key); @@ -1014,7 +1014,7 @@ class ParseSingleSequenceExampleOp : public OpKernel { FeatureList empty_feature_list; // Placeholder for missing FLs for (int d = 0; d < attrs_.num_feature_list_dense; ++d) { - const string& key = feature_list_dense_keys_t[d]; + const tstring& key = feature_list_dense_keys_t[d]; const DataType& dtype = attrs_.feature_list_dense_types[d]; const TensorShape& shape = attrs_.feature_list_dense_shapes[d]; @@ -1061,7 +1061,7 @@ class ParseSingleSequenceExampleOp : public OpKernel { // Feature List Sparse ----------------------------------------------------- for (int d = 0; d < attrs_.num_feature_list_sparse; ++d) { - const string& key = feature_list_sparse_keys_t[d]; + const tstring& key = feature_list_sparse_keys_t[d]; const DataType& dtype = attrs_.feature_list_sparse_types[d]; const auto& feature_list_found = feature_list_dict.find(key); diff --git a/tensorflow/core/kernels/example_parsing_ops_test.cc b/tensorflow/core/kernels/example_parsing_ops_test.cc index e8c039f9b88..bbf36ef4aa6 100644 --- a/tensorflow/core/kernels/example_parsing_ops_test.cc +++ b/tensorflow/core/kernels/example_parsing_ops_test.cc @@ -223,7 +223,7 @@ static Graph* ParseExampleV2(int batch_size, int num_keys, int feature_size) { std::vector ragged_split_types; std::vector dense_shapes; Tensor keys_t(DT_STRING, {static_cast(num_keys)}); - auto keys_flat = keys_t.flat(); + auto keys_flat = keys_t.flat(); Options opt; for (int i = 0; i < num_keys; ++i) { keys_flat(i) = strings::Printf("feature_%d", i); diff --git a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc index ca9e1836c82..d729c43f25a 100644 --- a/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc +++ b/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc @@ -406,12 +406,12 @@ void copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) { } template <> -void copy_array(string* dst, const string* src, int64 size) { +void copy_array(tstring* dst, const tstring* src, int64 size) { slow_copy_array(dst, src, size); } template <> -void copy_array(string* dst, const string* src, int32 size) { +void copy_array(tstring* dst, const tstring* src, int32 size) { slow_copy_array(dst, src, size); } diff --git a/tensorflow/core/kernels/scatter_functor.h b/tensorflow/core/kernels/scatter_functor.h index 55bb8d25b5c..fd2724a73d8 100644 --- a/tensorflow/core/kernels/scatter_functor.h +++ b/tensorflow/core/kernels/scatter_functor.h @@ -362,7 +362,7 @@ struct ScatterFunctorBase { // indices and params sizes were validated in DoCompute(). const Index N = static_cast(indices.size()); const Index limit = static_cast(params.dimension(0)); - if (!std::is_same::value) { + if (!std::is_same::value) { for (Index i = 0; i < N; i++) { // Grab the index and check its validity. Do this carefully, // to avoid checking the value and grabbing it again from diff --git a/tensorflow/core/kernels/strided_slice_op.h b/tensorflow/core/kernels/strided_slice_op.h index 86d105391d8..dce93bf1878 100644 --- a/tensorflow/core/kernels/strided_slice_op.h +++ b/tensorflow/core/kernels/strided_slice_op.h @@ -68,10 +68,10 @@ struct InitOutput { }; template -struct InitOutput { +struct InitOutput { static void run(const Device& d, - typename TTypes::Tensor output) { - output.device(d) = output.constant(string()); + typename TTypes::Tensor output) { + output.device(d) = output.constant(tstring()); } }; diff --git a/tensorflow/core/kernels/tensor_cord_test.cc b/tensorflow/core/kernels/tensor_cord_test.cc index bd8d796b110..8fca5b10be0 100644 --- a/tensorflow/core/kernels/tensor_cord_test.cc +++ b/tensorflow/core/kernels/tensor_cord_test.cc @@ -205,7 +205,7 @@ TEST(TensorCordTest, MoveConstructor) { void TensorCopyFromTensorBenchmark(benchmark::State& state, int num_elem, int string_size) { Tensor strings(DT_STRING, {num_elem}); - auto t = strings.flat(); + auto t = strings.flat(); for (int i = 0; i < num_elem; ++i) { t(i).insert(0, string_size, 'a'); } @@ -217,7 +217,7 @@ void TensorCopyFromTensorBenchmark(benchmark::State& state, int num_elem, void TensorCordFromTensorBenchmark(benchmark::State& state, int num_elem, int string_size) { Tensor strings(DT_STRING, {num_elem}); - auto t = strings.flat(); + auto t = strings.flat(); for (int i = 0; i < num_elem; ++i) { t(i).insert(0, string_size, 'a'); } diff --git a/tensorflow/core/util/batch_util.cc b/tensorflow/core/util/batch_util.cc index c3c72113abf..556359bf749 100644 --- a/tensorflow/core/util/batch_util.cc +++ b/tensorflow/core/util/batch_util.cc @@ -105,8 +105,8 @@ void HandleSliceToElement(Tensor* parent, Tensor* element, int64 index, } template <> -void HandleSliceToElement(Tensor* parent, Tensor* element, int64 index, - bool can_move) { +void HandleSliceToElement(Tensor* parent, Tensor* element, int64 index, + bool can_move) { auto parent_as_matrix = parent->flat_outer_dims(); auto element_flat = element->flat(); if (can_move) { diff --git a/tensorflow/core/util/example_proto_fast_parsing.cc b/tensorflow/core/util/example_proto_fast_parsing.cc index c627e23700b..0243c029be8 100644 --- a/tensorflow/core/util/example_proto_fast_parsing.cc +++ b/tensorflow/core/util/example_proto_fast_parsing.cc @@ -1078,7 +1078,7 @@ void CopySparseBufferToTensor(DataType dtype, size_t offset, SparseBuffer* src, } case DT_STRING: { std::move(src->bytes_list.begin(), src->bytes_list.end(), - dst->flat().data() + offset); + dst->flat().data() + offset); break; } default: diff --git a/tensorflow/core/util/example_proto_helper.h b/tensorflow/core/util/example_proto_helper.h index 81cedc2c834..5108ae0ca0e 100644 --- a/tensorflow/core/util/example_proto_helper.h +++ b/tensorflow/core/util/example_proto_helper.h @@ -226,9 +226,9 @@ struct ParseSingleExampleAttrs { return FinishInit(); } - std::vector sparse_keys; + std::vector sparse_keys; std::vector sparse_types; - std::vector dense_keys; + std::vector dense_keys; std::vector dense_types; std::vector dense_shapes; std::vector variable_length; @@ -302,10 +302,10 @@ struct ParseSequenceExampleAttrs { int64 num_feature_list_sparse; int64 num_feature_list_dense; int64 num_feature_list_ragged; - std::vector context_sparse_keys; - std::vector context_dense_keys; - std::vector feature_list_sparse_keys; - std::vector feature_list_dense_keys; + std::vector context_sparse_keys; + std::vector context_dense_keys; + std::vector feature_list_sparse_keys; + std::vector feature_list_dense_keys; std::vector context_sparse_types; std::vector context_dense_types; std::vector context_dense_shapes; diff --git a/tensorflow/core/util/proto/decode.h b/tensorflow/core/util/proto/decode.h index d415f999ad1..4fb54d8ce8e 100644 --- a/tensorflow/core/util/proto/decode.h +++ b/tensorflow/core/util/proto/decode.h @@ -381,13 +381,15 @@ inline Status ReadGroupBytes(CodedInputStream* input, int field_number, #else // USE_TSTRING StringOutputStream string_stream(data); #endif // USE_TSTRING - CodedOutputStream out(&string_stream); - if (!WireFormatLite::SkipField( - input, - WireFormatLite::MakeTag(field_number, - WireFormatLite::WIRETYPE_START_GROUP), - &out)) { - return errors::DataLoss("Failed reading group"); + { + CodedOutputStream out(&string_stream); + if (!WireFormatLite::SkipField( + input, + WireFormatLite::MakeTag(field_number, + WireFormatLite::WIRETYPE_START_GROUP), + &out)) { + return errors::DataLoss("Failed reading group"); + } } #ifdef USE_TSTRING *data = buf; diff --git a/tensorflow/python/lib/core/py_seq_tensor.cc b/tensorflow/python/lib/core/py_seq_tensor.cc index 785c3b994ee..8770b362a4e 100644 --- a/tensorflow/python/lib/core/py_seq_tensor.cc +++ b/tensorflow/python/lib/core/py_seq_tensor.cc @@ -482,7 +482,7 @@ typedef Converter NumpyHalfConverter; // String support template <> -struct ConverterTraits { +struct ConverterTraits { static const tensorflow::DataType kTypeEnum = DT_STRING; static const char* ConvertScalar(PyObject* v, tstring* out) { @@ -509,7 +509,7 @@ struct ConverterTraits { } }; -typedef Converter StringConverter; +typedef Converter StringConverter; // Converts Python object `c` that should hold a Python string into a // C++ string in *out. Returns nullptr on success, or a message on error. @@ -521,7 +521,7 @@ tstring PyRepr(PyObject* obj) { Safe_PyObjectPtr repr_obj = make_safe(PyObject_Repr(obj)); if (repr_obj) { tstring repr_str; - if (ConverterTraits::ConvertScalar(repr_obj.get(), &repr_str) == + if (ConverterTraits::ConvertScalar(repr_obj.get(), &repr_str) == nullptr) { return repr_str; }