Updated the majority of string tensor accessors to use tstring type.

This is a part of a larger migration effort for tensorflow::tstring.
See: https://github.com/tensorflow/community/pull/91
PiperOrigin-RevId: 262172788
This commit is contained in:
Dero Gharibian 2019-08-07 11:03:26 -07:00 committed by TensorFlower Gardener
parent c6719f2091
commit 4b628e8a15
172 changed files with 532 additions and 525 deletions

View File

@ -234,7 +234,7 @@ void TestEncodeDecode(int line, const std::vector<string>& data) {
// Create C++ Tensor // Create C++ Tensor
Tensor src(tensorflow::DT_STRING, TensorShape(dims)); Tensor src(tensorflow::DT_STRING, TensorShape(dims));
for (tensorflow::int64 i = 0; i < src.NumElements(); ++i) { for (tensorflow::int64 i = 0; i < src.NumElements(); ++i) {
src.flat<string>()(i) = data[i]; src.flat<tstring>()(i) = data[i];
} }
TF_Tensor* dst = TF_TensorFromTensor(src, status); TF_Tensor* dst = TF_TensorFromTensor(src, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
@ -244,7 +244,7 @@ void TestEncodeDecode(int line, const std::vector<string>& data) {
ASSERT_EQ(Status::OK(), TF_TensorToTensor(dst, &output)) << line; ASSERT_EQ(Status::OK(), TF_TensorToTensor(dst, &output)) << line;
ASSERT_EQ(src.NumElements(), output.NumElements()) << line; ASSERT_EQ(src.NumElements(), output.NumElements()) << line;
for (tensorflow::int64 i = 0; i < src.NumElements(); ++i) { for (tensorflow::int64 i = 0; i < src.NumElements(); ++i) {
ASSERT_EQ(data[i], output.flat<string>()(i)) << line; ASSERT_EQ(data[i], output.flat<tstring>()(i)) << line;
} }
TF_DeleteTensor(dst); TF_DeleteTensor(dst);
@ -1386,7 +1386,7 @@ TEST(CAPI, SavedModel) {
tensorflow::Example example; tensorflow::Example example;
auto* feature_map = example.mutable_features()->mutable_feature(); auto* feature_map = example.mutable_features()->mutable_feature();
(*feature_map)["x"].mutable_float_list()->add_value(i); (*feature_map)["x"].mutable_float_list()->add_value(i);
input.flat<string>()(i) = example.SerializeAsString(); input.flat<tstring>()(i) = example.SerializeAsString();
} }
const tensorflow::string input_op_name( const tensorflow::string input_op_name(

View File

@ -354,7 +354,7 @@ TF_Tensor* TF_TensorFromTensor(const tensorflow::Tensor& src,
// Compute bytes needed for encoding. // Compute bytes needed for encoding.
size_t size = 0; size_t size = 0;
const auto& srcarray = src.flat<string>(); const auto& srcarray = src.flat<tstring>();
for (int i = 0; i < srcarray.size(); ++i) { for (int i = 0; i < srcarray.size(); ++i) {
const string& s = srcarray(i); const string& s = srcarray(i);
// uint64 starting_offset, TF_StringEncode-d string. // uint64 starting_offset, TF_StringEncode-d string.
@ -440,7 +440,7 @@ Status TF_TensorToTensor(const TF_Tensor* src, Tensor* dst) {
const char* limit = input + src_size; const char* limit = input + src_size;
*dst = Tensor(static_cast<tensorflow::DataType>(src->dtype), src->shape); *dst = Tensor(static_cast<tensorflow::DataType>(src->dtype), src->shape);
auto dstarray = dst->flat<string>(); auto dstarray = dst->flat<tstring>();
for (tensorflow::int64 i = 0; i < num_elements; ++i) { for (tensorflow::int64 i = 0; i < num_elements; ++i) {
tensorflow::uint64 offset = tensorflow::uint64 offset =
reinterpret_cast<const tensorflow::uint64*>(input)[i]; reinterpret_cast<const tensorflow::uint64*>(input)[i];

View File

@ -193,7 +193,7 @@ string PrintTensor(const TensorProto& tensor_proto) {
string ret; string ret;
for (int64 i = 0; i < num_elts; ++i) { for (int64 i = 0; i < num_elts; ++i) {
if (i > 0) strings::StrAppend(&ret, " "); if (i > 0) strings::StrAppend(&ret, " ");
strings::StrAppend(&ret, absl::CEscape(t.flat<string>()(i))); strings::StrAppend(&ret, absl::CEscape(t.flat<tstring>()(i)));
} }
return ret; return ret;
} }

View File

@ -97,7 +97,7 @@ Input::Initializer::Initializer(
Tensor elem = e.tensor; Tensor elem = e.tensor;
if (first.tensor.dtype() == DT_STRING) { if (first.tensor.dtype() == DT_STRING) {
for (int i = 0; i < elem.NumElements(); ++i) { for (int i = 0; i < elem.NumElements(); ++i) {
t.flat<string>()(offset + i) = elem.flat<string>()(i); t.flat<tstring>()(offset + i) = elem.flat<tstring>()(i);
} }
offset += elem.NumElements(); offset += elem.NumElements();
} else { } else {

View File

@ -75,7 +75,7 @@ Status LoadMetaGraphIntoSession(const MetaGraphDef& meta_graph_def,
Tensor CreateStringTensor(const string& value) { Tensor CreateStringTensor(const string& value) {
Tensor tensor(DT_STRING, TensorShape({})); Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<string>()() = value; tensor.scalar<tstring>()() = value;
return tensor; return tensor;
} }
@ -219,7 +219,7 @@ Status RunRestore(const RunOptions& run_options, const string& export_dir,
// Add variables to the graph. // Add variables to the graph.
Tensor variables_path_tensor(DT_STRING, TensorShape({})); Tensor variables_path_tensor(DT_STRING, TensorShape({}));
variables_path_tensor.scalar<string>()() = variables_path; variables_path_tensor.scalar<tstring>()() = variables_path;
std::vector<std::pair<string, Tensor>> inputs = { std::vector<std::pair<string, Tensor>> inputs = {
{string(variable_filename_const_op_name), variables_path_tensor}}; {string(variable_filename_const_op_name), variables_path_tensor}};

View File

@ -508,7 +508,7 @@ void XlaCompileOp::Compute(OpKernelContext* ctx) {
client, executable, kernel, std::move(variables), constants_.size())); client, executable, kernel, std::move(variables), constants_.size()));
Tensor compilation_key(cpu_allocator, DT_STRING, TensorShape({})); Tensor compilation_key(cpu_allocator, DT_STRING, TensorShape({}));
compilation_key.flat<string>()(0) = key; compilation_key.flat<tstring>()(0) = key;
Tensor compilation_successful(cpu_allocator, DT_BOOL, TensorShape({})); Tensor compilation_successful(cpu_allocator, DT_BOOL, TensorShape({}));
compilation_successful.flat<bool>()(0) = true; compilation_successful.flat<bool>()(0) = true;
@ -523,7 +523,7 @@ XlaRunOp::XlaRunOp(OpKernelConstruction* ctx)
void XlaRunOp::Compute(OpKernelContext* ctx) { void XlaRunOp::Compute(OpKernelContext* ctx) {
VLOG(3) << "XlaRunOp " << def().name(); VLOG(3) << "XlaRunOp " << def().name();
Tensor key_tensor = ctx->input(ctx->num_inputs() - 1); Tensor key_tensor = ctx->input(ctx->num_inputs() - 1);
const XlaExecutableClosureStore::KeyT& key = key_tensor.flat<string>()(0); const XlaExecutableClosureStore::KeyT& key = key_tensor.flat<tstring>()(0);
XlaExecutableClosure closure = XlaExecutableClosure closure =
XlaExecutableClosureStore::Global()->Consume(key); XlaExecutableClosureStore::Global()->Consume(key);

View File

@ -40,7 +40,7 @@ class GetCalibrationDataOp : public OpKernel {
// serialized string to that tensor, and later sess.run() will copy it back // serialized string to that tensor, and later sess.run() will copy it back
// to host. We need to optimize this. // to host. We need to optimize this.
const string& resource_name = context->input(0).scalar<string>()(); const string& resource_name = context->input(0).scalar<tstring>()();
// Get the resource. // Get the resource.
TRTEngineCacheResource* resource = nullptr; TRTEngineCacheResource* resource = nullptr;
OP_REQUIRES_OK(context, context->resource_manager()->Lookup( OP_REQUIRES_OK(context, context->resource_manager()->Lookup(
@ -59,7 +59,7 @@ class GetCalibrationDataOp : public OpKernel {
OP_REQUIRES_OK(context, OP_REQUIRES_OK(context,
context->allocate_output(0, TensorShape({}), &output)); context->allocate_output(0, TensorShape({}), &output));
output->scalar<string>()() = serialized_resource; output->scalar<tstring>()() = serialized_resource;
} }
}; };

View File

@ -109,7 +109,7 @@ class InitializeTRTResource : public OpKernel {
resource->cache_.size(), " entries.")); resource->cache_.size(), " entries."));
// Get the file name. // Get the file name.
const string& filename = ctx->input(1).scalar<string>()(); const string& filename = ctx->input(1).scalar<tstring>()();
OP_REQUIRES(ctx, !filename.empty(), OP_REQUIRES(ctx, !filename.empty(),
errors::InvalidArgument("filename cannot be empty.")); errors::InvalidArgument("filename cannot be empty."));
@ -171,8 +171,8 @@ class SerializeTRTResource : public OpKernel {
} }
void Compute(OpKernelContext* ctx) override { void Compute(OpKernelContext* ctx) override {
const string& resource_name = ctx->input(0).scalar<string>()(); const string& resource_name = ctx->input(0).scalar<tstring>()();
const string& filename = ctx->input(1).scalar<string>()(); const string& filename = ctx->input(1).scalar<tstring>()();
OP_REQUIRES(ctx, !filename.empty(), OP_REQUIRES(ctx, !filename.empty(),
errors::InvalidArgument("filename cannot be empty.")); errors::InvalidArgument("filename cannot be empty."));

View File

@ -151,7 +151,7 @@ void XRTCompileOp::Compute(OpKernelContext* ctx) {
xrt::XLAComputation computation_proto; xrt::XLAComputation computation_proto;
OP_REQUIRES( OP_REQUIRES(
ctx, ctx,
computation_proto.ParseFromString(computation_input.scalar<string>()()), computation_proto.ParseFromString(computation_input.scalar<tstring>()()),
errors::InvalidArgument( errors::InvalidArgument(
"Unable to parse computation input to XLAComputation")); "Unable to parse computation input to XLAComputation"));
@ -191,7 +191,7 @@ void XRTCompileOp::Compute(OpKernelContext* ctx) {
.ComputeProgramShape() .ComputeProgramShape()
.ToProto(); .ToProto();
Tensor program_shape_output(DT_STRING, TensorShape({1})); Tensor program_shape_output(DT_STRING, TensorShape({1}));
program_shape_output.vec<string>()(0) = program_shape.SerializeAsString(); program_shape_output.vec<tstring>()(0) = program_shape.SerializeAsString();
ctx->set_output(1, program_shape_output); ctx->set_output(1, program_shape_output);
} }

View File

@ -260,7 +260,7 @@ Status XRTExecuteOp::DoWork(OpKernelContext* context) {
TF_RET_CHECK(TensorShapeUtils::IsScalar(execution_config.shape())); TF_RET_CHECK(TensorShapeUtils::IsScalar(execution_config.shape()));
xrt::XRTExecutionConfig config_proto; xrt::XRTExecutionConfig config_proto;
TF_RET_CHECK( TF_RET_CHECK(
config_proto.ParseFromString(execution_config.scalar<string>()())); config_proto.ParseFromString(execution_config.scalar<tstring>()()));
int core_index_in_replica = config_proto.core_index_in_replica(); int core_index_in_replica = config_proto.core_index_in_replica();
TF_RET_CHECK(core_index_in_replica == 0); TF_RET_CHECK(core_index_in_replica == 0);
@ -343,12 +343,12 @@ Status XRTExecuteChainedOp::DoWork(OpKernelContext* context) {
const Tensor& execution_plan = context->input(0); const Tensor& execution_plan = context->input(0);
TF_RET_CHECK(TensorShapeUtils::IsScalar(execution_plan.shape())); TF_RET_CHECK(TensorShapeUtils::IsScalar(execution_plan.shape()));
xrt::XRTChainedExecutePlan plan; xrt::XRTChainedExecutePlan plan;
TF_RET_CHECK(plan.ParseFromString(execution_plan.scalar<string>()())); TF_RET_CHECK(plan.ParseFromString(execution_plan.scalar<tstring>()()));
const Tensor& execution_config = context->input(1); const Tensor& execution_config = context->input(1);
TF_RET_CHECK(TensorShapeUtils::IsScalar(execution_config.shape())); TF_RET_CHECK(TensorShapeUtils::IsScalar(execution_config.shape()));
xrt::XRTChainedExecuteConfig config; xrt::XRTChainedExecuteConfig config;
TF_RET_CHECK(config.ParseFromString(execution_config.scalar<string>()())); TF_RET_CHECK(config.ParseFromString(execution_config.scalar<tstring>()()));
XRTCompilationCache* cache; XRTCompilationCache* cache;
TF_RETURN_IF_ERROR(rm->Lookup<XRTCompilationCache>( TF_RETURN_IF_ERROR(rm->Lookup<XRTCompilationCache>(

View File

@ -177,7 +177,7 @@ class XRTAllocateOp : public OpKernel {
xrt::XLAAllocation allocation_proto; xrt::XLAAllocation allocation_proto;
OP_REQUIRES( OP_REQUIRES(
ctx, ctx,
allocation_proto.ParseFromString(allocation_info.scalar<string>()()), allocation_proto.ParseFromString(allocation_info.scalar<tstring>()()),
errors::InvalidArgument( errors::InvalidArgument(
"Unable to parse allocation input to XLAAllocation")); "Unable to parse allocation input to XLAAllocation"));
@ -419,7 +419,7 @@ class XRTMakeTupleOp : public OpKernel {
errors::Internal("tuple description input should be a string scalar")); errors::Internal("tuple description input should be a string scalar"));
xrt::XLATupleNode tuple_proto; xrt::XLATupleNode tuple_proto;
OP_REQUIRES( OP_REQUIRES(
ctx, tuple_proto.ParseFromString(tuple_info.scalar<string>()()), ctx, tuple_proto.ParseFromString(tuple_info.scalar<tstring>()()),
errors::InvalidArgument("Unable to parse tuple input to XLATupleNode")); errors::InvalidArgument("Unable to parse tuple input to XLATupleNode"));
OpInputList arg_list; OpInputList arg_list;
@ -627,7 +627,7 @@ class XRTWriteLiteralOp : public OpKernel {
errors::Internal("literal input should be a string scalar")); errors::Internal("literal input should be a string scalar"));
xla::LiteralProto literal_proto; xla::LiteralProto literal_proto;
OP_REQUIRES(ctx, OP_REQUIRES(ctx,
literal_proto.ParseFromString(literal_info.scalar<string>()()), literal_proto.ParseFromString(literal_info.scalar<tstring>()()),
errors::InvalidArgument( errors::InvalidArgument(
"Unable to parse allocation input to LiteralProto")); "Unable to parse allocation input to LiteralProto"));
xla::Literal literal; xla::Literal literal;

View File

@ -127,7 +127,7 @@ xla::LiteralProto FloatMatrix(
xla::Literal ReadOutputLiteral(const std::vector<Tensor>& outputs, size_t idx) { xla::Literal ReadOutputLiteral(const std::vector<Tensor>& outputs, size_t idx) {
xla::LiteralProto response; xla::LiteralProto response;
CHECK(response.ParseFromString(outputs[idx].scalar<string>()())); CHECK(response.ParseFromString(outputs[idx].scalar<tstring>()()));
return xla::Literal::CreateFromProto(response).ValueOrDie(); return xla::Literal::CreateFromProto(response).ValueOrDie();
} }
@ -316,7 +316,7 @@ TEST(RawApiTest, AllocFromTensor) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralToLiteralProto(literal, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(literal, response));
} }
@ -351,7 +351,7 @@ TEST(RawApiTest, AllocUninitialized) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto read_back_literal; xla::LiteralProto read_back_literal;
EXPECT_TRUE( EXPECT_TRUE(
read_back_literal.ParseFromString(outputs[0].scalar<string>()())); read_back_literal.ParseFromString(outputs[0].scalar<tstring>()()));
Tensor read_back_tensor; Tensor read_back_tensor;
TF_ASSERT_OK(LiteralToHostTensor( TF_ASSERT_OK(LiteralToHostTensor(
xla::Literal::CreateFromProto(read_back_literal).ValueOrDie(), DT_FLOAT, xla::Literal::CreateFromProto(read_back_literal).ValueOrDie(), DT_FLOAT,
@ -381,7 +381,7 @@ TEST(RawApiTest, AllocUninitialized) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralProtos(response, new_literal)); EXPECT_TRUE(CompareLiteralProtos(response, new_literal));
} }
} }
@ -413,7 +413,7 @@ TEST(RawApiTest, AllocFromTensorTuple) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralToLiteralProto(literal, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(literal, response));
} }
@ -439,7 +439,7 @@ TEST(RawApiTest, AllocFromTensorTupleSingle) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralToLiteralProto(literal, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(literal, response));
} }
@ -465,7 +465,7 @@ TEST(RawApiTest, AllocFromTensorRelayout) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
// We have sent literal's data (in array layout) with a attribute layout // We have sent literal's data (in array layout) with a attribute layout
// {0,1}, so the expected literal read from device needs to be changed // {0,1}, so the expected literal read from device needs to be changed
// accordingly. // accordingly.
@ -493,7 +493,7 @@ TEST(RawApiTest, AllocAndRewrite) {
int64 allocation_handle = outputs[1].scalar<int64>()(); int64 allocation_handle = outputs[1].scalar<int64>()();
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralProtos(alloc.value(), response)); EXPECT_TRUE(CompareLiteralProtos(alloc.value(), response));
xla::LiteralProto new_literal = xla::LiteralProto new_literal =
@ -512,7 +512,7 @@ TEST(RawApiTest, AllocAndRewrite) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto new_response; xla::LiteralProto new_response;
EXPECT_TRUE(new_response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(new_response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralProtos(new_literal, new_response)); EXPECT_TRUE(CompareLiteralProtos(new_literal, new_response));
Tensor release_tensor(DT_INT64, TensorShape({1})); Tensor release_tensor(DT_INT64, TensorShape({1}));
@ -652,7 +652,7 @@ TEST(RawApiTest, ReadAndWriteState) {
session.Run(ClientSession::FeedType(), {read_back}, {release}, &outputs)); session.Run(ClientSession::FeedType(), {read_back}, {release}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralProtos(alloc.value(), response)); EXPECT_TRUE(CompareLiteralProtos(alloc.value(), response));
} }
@ -673,7 +673,7 @@ TEST(RawApiTest, ReadAndWriteStateAutoFree) {
TF_EXPECT_OK(session.Run({read_back}, &outputs)); TF_EXPECT_OK(session.Run({read_back}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralProtos(alloc.value(), response)); EXPECT_TRUE(CompareLiteralProtos(alloc.value(), response));
} }
@ -707,13 +707,13 @@ TEST(RawApiTest, SubBuffer) {
auto base_elements = base_literal.DecomposeTuple(); auto base_elements = base_literal.DecomposeTuple();
auto nested_0_elements = base_elements[0].Clone().DecomposeTuple(); auto nested_0_elements = base_elements[0].Clone().DecomposeTuple();
xla::LiteralProto response_0; xla::LiteralProto response_0;
EXPECT_TRUE(response_0.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response_0.ParseFromString(outputs[0].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralToLiteralProto(base_elements[0], response_0)); EXPECT_TRUE(CompareLiteralToLiteralProto(base_elements[0], response_0));
xla::LiteralProto response_1; xla::LiteralProto response_1;
EXPECT_TRUE(response_1.ParseFromString(outputs[1].scalar<string>()())); EXPECT_TRUE(response_1.ParseFromString(outputs[1].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralToLiteralProto(base_elements[1], response_1)); EXPECT_TRUE(CompareLiteralToLiteralProto(base_elements[1], response_1));
xla::LiteralProto response_00; xla::LiteralProto response_00;
EXPECT_TRUE(response_00.ParseFromString(outputs[2].scalar<string>()())); EXPECT_TRUE(response_00.ParseFromString(outputs[2].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralToLiteralProto(nested_0_elements[0], response_00)); EXPECT_TRUE(CompareLiteralToLiteralProto(nested_0_elements[0], response_00));
} }
@ -779,9 +779,9 @@ TEST(RawApiTest, MakeTuple) {
std::vector<Tensor> outputs; std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({res_0, res_1}, &outputs)); TF_EXPECT_OK(session.Run({res_0, res_1}, &outputs));
xla::LiteralProto response_0; xla::LiteralProto response_0;
EXPECT_TRUE(response_0.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response_0.ParseFromString(outputs[0].scalar<tstring>()()));
xla::LiteralProto response_1; xla::LiteralProto response_1;
EXPECT_TRUE(response_1.ParseFromString(outputs[1].scalar<string>()())); EXPECT_TRUE(response_1.ParseFromString(outputs[1].scalar<tstring>()()));
auto expected_0 = MakeTuple0(); auto expected_0 = MakeTuple0();
EXPECT_TRUE(CompareLiteralProtos(response_0, expected_0)); EXPECT_TRUE(CompareLiteralProtos(response_0, expected_0));
@ -853,7 +853,7 @@ TEST(RawApiTest, ExecuteChainedOpByOp) {
TF_EXPECT_OK(session.Run({read_back}, &outputs)); TF_EXPECT_OK(session.Run({read_back}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto expected = xla::LiteralUtil::CreateR1<float>({-150.0f, -36.0f}); auto expected = xla::LiteralUtil::CreateR1<float>({-150.0f, -36.0f});
EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response));
@ -973,7 +973,7 @@ TEST(RawApiTest, ExecuteChained) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto expected = xla::LiteralUtil::CreateR1<float>({-150.0f, -36.0f}); auto expected = xla::LiteralUtil::CreateR1<float>({-150.0f, -36.0f});
EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response));
@ -1022,13 +1022,13 @@ TEST(RawApiTest, CompileAndExecute) {
TF_EXPECT_OK(session.Run({read_back, c_handle.program_shape}, &outputs)); TF_EXPECT_OK(session.Run({read_back, c_handle.program_shape}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto expected = xla::LiteralUtil::CreateR1<float>({27.0f, 21.0f}); auto expected = xla::LiteralUtil::CreateR1<float>({27.0f, 21.0f});
EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response));
xla::ProgramShapeProto program_shape; xla::ProgramShapeProto program_shape;
EXPECT_TRUE(program_shape.ParseFromString(outputs[1].vec<string>()(0))); EXPECT_TRUE(program_shape.ParseFromString(outputs[1].vec<tstring>()(0)));
EXPECT_EQ(program_shape.parameters_size(), 2); EXPECT_EQ(program_shape.parameters_size(), 2);
} }
@ -1077,13 +1077,13 @@ TEST(RawApiTest, CompileAndExecuteWithArgumentVector) {
TF_EXPECT_OK(session.Run({read_back, c_handle.program_shape}, &outputs)); TF_EXPECT_OK(session.Run({read_back, c_handle.program_shape}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto expected = xla::LiteralUtil::CreateR1<float>({27.0f, 21.0f}); auto expected = xla::LiteralUtil::CreateR1<float>({27.0f, 21.0f});
EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response));
xla::ProgramShapeProto program_shape; xla::ProgramShapeProto program_shape;
EXPECT_TRUE(program_shape.ParseFromString(outputs[1].vec<string>()(0))); EXPECT_TRUE(program_shape.ParseFromString(outputs[1].vec<tstring>()(0)));
EXPECT_EQ(program_shape.parameters_size(), 2); EXPECT_EQ(program_shape.parameters_size(), 2);
} }
@ -1128,7 +1128,8 @@ TEST(RawApiTest, CompileWithXlaReturnShapes) {
{release}, &outputs)); {release}, &outputs));
xla::ProgramShapeProto program_shape_proto; xla::ProgramShapeProto program_shape_proto;
EXPECT_TRUE(program_shape_proto.ParseFromString(outputs[0].vec<string>()(0))); EXPECT_TRUE(
program_shape_proto.ParseFromString(outputs[0].vec<tstring>()(0)));
xla::ProgramShape program_shape(program_shape_proto); xla::ProgramShape program_shape(program_shape_proto);
EXPECT_EQ(program_shape.parameters_size(), 1); EXPECT_EQ(program_shape.parameters_size(), 1);
@ -1196,7 +1197,7 @@ TEST(RawApiTest, DotGeneralWithLayoutTest) {
TF_EXPECT_OK(session.Run({read_back}, &outputs)); TF_EXPECT_OK(session.Run({read_back}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto expected = auto expected =
xla::LiteralUtil::CreateR2WithLayout<float>({{18.0f}, {44.0f}}, layout); xla::LiteralUtil::CreateR2WithLayout<float>({{18.0f}, {44.0f}}, layout);
@ -1231,7 +1232,7 @@ TEST(RawApiTest, CompileAndExecuteZeroArg) {
TF_EXPECT_OK(session.Run({read_back}, &outputs)); TF_EXPECT_OK(session.Run({read_back}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto expected = xla::LiteralUtil::CreateR0<float>(3.0f); auto expected = xla::LiteralUtil::CreateR0<float>(3.0f);
EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response));
@ -1281,7 +1282,7 @@ TEST(RawApiTest, CompileAndExecuteReturnTuple) {
TF_EXPECT_OK(session.Run({read_back}, &outputs)); TF_EXPECT_OK(session.Run({read_back}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto sum = xla::LiteralUtil::CreateR1<float>({9.0f, 7.0f}); auto sum = xla::LiteralUtil::CreateR1<float>({9.0f, 7.0f});
auto expected = xla::LiteralUtil::MakeTuple({&sum}); auto expected = xla::LiteralUtil::MakeTuple({&sum});
@ -1343,7 +1344,7 @@ TEST(RawApiTest, CompileAndExecuteReturnExplodedTuple) {
EXPECT_EQ(voutputs.size(), 1); EXPECT_EQ(voutputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(voutputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(voutputs[0].scalar<tstring>()()));
auto expected = xla::LiteralUtil::CreateR0<float>(kResults[i]); auto expected = xla::LiteralUtil::CreateR0<float>(kResults[i]);
EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response));
@ -1514,13 +1515,13 @@ TEST(RawApiTest, CompileAndExecuteWithS64Argument) {
TF_EXPECT_OK(session.Run({read_back, c_handle.program_shape}, &outputs)); TF_EXPECT_OK(session.Run({read_back, c_handle.program_shape}, &outputs));
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto expected = xla::LiteralUtil::CreateR0<int64>(15123899); auto expected = xla::LiteralUtil::CreateR0<int64>(15123899);
EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response)); EXPECT_TRUE(CompareLiteralToLiteralProto(expected, response));
xla::ProgramShapeProto program_shape; xla::ProgramShapeProto program_shape;
EXPECT_TRUE(program_shape.ParseFromString(outputs[1].vec<string>()(0))); EXPECT_TRUE(program_shape.ParseFromString(outputs[1].vec<tstring>()(0)));
EXPECT_EQ(program_shape.parameters_size(), 2); EXPECT_EQ(program_shape.parameters_size(), 2);
EXPECT_TRUE(xla::ShapeUtil::HasPrimitiveType( EXPECT_TRUE(xla::ShapeUtil::HasPrimitiveType(
xla::Shape(program_shape.result()), xla::S64)); xla::Shape(program_shape.result()), xla::S64));
@ -1580,7 +1581,7 @@ TEST(RawApiTest, TestDeviceMemoryCompaction) {
// we have on record. // we have on record.
for (size_t i = 1, j = 0; i < handles.size(); i += 2, ++j) { for (size_t i = 1, j = 0; i < handles.size(); i += 2, ++j) {
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[j].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[j].scalar<tstring>()()));
EXPECT_TRUE(CompareLiteralProtos(allocs[i].value(), response)); EXPECT_TRUE(CompareLiteralProtos(allocs[i].value(), response));
} }
} }
@ -1668,7 +1669,7 @@ TEST(RawApiTest, TestDeviceMemorySwap) {
EXPECT_EQ(outputs.size(), 1); EXPECT_EQ(outputs.size(), 1);
xla::LiteralProto response; xla::LiteralProto response;
EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<string>()())); EXPECT_TRUE(response.ParseFromString(outputs[0].scalar<tstring>()()));
auto literal = xla::Literal::CreateFromProto(response).ValueOrDie(); auto literal = xla::Literal::CreateFromProto(response).ValueOrDie();
EXPECT_EQ(literal, zero_literal); EXPECT_EQ(literal, zero_literal);
} }

View File

@ -214,8 +214,8 @@ class ToBigtableOp : public AsyncOpKernel {
std::vector<string> columns; std::vector<string> columns;
columns.reserve(column_families_tensor->NumElements()); columns.reserve(column_families_tensor->NumElements());
for (uint64 i = 0; i < column_families_tensor->NumElements(); ++i) { for (uint64 i = 0; i < column_families_tensor->NumElements(); ++i) {
column_families.push_back(column_families_tensor->flat<string>()(i)); column_families.push_back(column_families_tensor->flat<tstring>()(i));
columns.push_back(columns_tensor->flat<string>()(i)); columns.push_back(columns_tensor->flat<tstring>()(i));
} }
DatasetBase* dataset; DatasetBase* dataset;
@ -317,7 +317,7 @@ class ToBigtableOp : public AsyncOpKernel {
"Iterator produced a set of Tensors shorter than expected"); "Iterator produced a set of Tensors shorter than expected");
} }
::google::cloud::bigtable::SingleRowMutation mutation( ::google::cloud::bigtable::SingleRowMutation mutation(
std::move(tensors[0].scalar<string>()())); std::move(tensors[0].scalar<tstring>()()));
std::chrono::milliseconds timestamp(timestamp_int); std::chrono::milliseconds timestamp(timestamp_int);
for (size_t i = 1; i < tensors.size(); ++i) { for (size_t i = 1; i < tensors.size(); ++i) {
if (!TensorShapeUtils::IsScalar(tensors[i].shape())) { if (!TensorShapeUtils::IsScalar(tensors[i].shape())) {
@ -326,11 +326,11 @@ class ToBigtableOp : public AsyncOpKernel {
if (timestamp_int == -1) { if (timestamp_int == -1) {
mutation.emplace_back(::google::cloud::bigtable::SetCell( mutation.emplace_back(::google::cloud::bigtable::SetCell(
column_families[i - 1], columns[i - 1], column_families[i - 1], columns[i - 1],
std::move(tensors[i].scalar<string>()()))); std::move(tensors[i].scalar<tstring>()())));
} else { } else {
mutation.emplace_back(::google::cloud::bigtable::SetCell( mutation.emplace_back(::google::cloud::bigtable::SetCell(
column_families[i - 1], columns[i - 1], timestamp, column_families[i - 1], columns[i - 1], timestamp,
std::move(tensors[i].scalar<string>()()))); std::move(tensors[i].scalar<tstring>()())));
} }
} }
bulk_mutation->emplace_back(std::move(mutation)); bulk_mutation->emplace_back(std::move(mutation));

View File

@ -156,13 +156,13 @@ class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
::google::cloud::StatusOr< ::google::cloud::StatusOr<
std::pair<bool, ::google::cloud::bigtable::Row>> std::pair<bool, ::google::cloud::bigtable::Row>>
row = dataset()->table_->table().ReadRow( row = dataset()->table_->table().ReadRow(
input_tensors[0].scalar<string>()(), dataset()->filter_); input_tensors[0].scalar<tstring>()(), dataset()->filter_);
if (!row.ok()) { if (!row.ok()) {
return GcpStatusToTfStatus(row.status()); return GcpStatusToTfStatus(row.status());
} }
if (!row->first) { if (!row->first) {
return errors::DataLoss("Row key '", return errors::DataLoss("Row key '",
input_tensors[0].scalar<string>()(), input_tensors[0].scalar<tstring>()(),
"' not found."); "' not found.");
} }
TF_RETURN_IF_ERROR(ParseRow(ctx, row->second, out_tensors)); TF_RETURN_IF_ERROR(ParseRow(ctx, row->second, out_tensors));
@ -180,7 +180,7 @@ class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
std::vector<Tensor>* out_tensors) { std::vector<Tensor>* out_tensors) {
out_tensors->reserve(dataset()->columns_.size() + 1); out_tensors->reserve(dataset()->columns_.size() + 1);
Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {}); Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {});
row_key_tensor.scalar<string>()() = string(row.row_key()); row_key_tensor.scalar<tstring>()() = tstring(row.row_key());
out_tensors->emplace_back(std::move(row_key_tensor)); out_tensors->emplace_back(std::move(row_key_tensor));
if (row.cells().size() > 2 * dataset()->columns_.size()) { if (row.cells().size() > 2 * dataset()->columns_.size()) {
@ -198,7 +198,7 @@ class BigtableLookupDatasetOp : public UnaryDatasetOpKernel {
if (cell_itr->family_name() == dataset()->column_families_[i] && if (cell_itr->family_name() == dataset()->column_families_[i] &&
string(cell_itr->column_qualifier()) == string(cell_itr->column_qualifier()) ==
dataset()->columns_[i]) { dataset()->columns_[i]) {
col_tensor.scalar<string>()() = string(cell_itr->value()); col_tensor.scalar<tstring>()() = tstring(cell_itr->value());
found_column = true; found_column = true;
} }
} }

View File

@ -99,7 +99,7 @@ class BigtablePrefixKeyDatasetOp : public DatasetOpKernel {
const ::google::cloud::bigtable::Row& row, const ::google::cloud::bigtable::Row& row,
std::vector<Tensor>* out_tensors) override { std::vector<Tensor>* out_tensors) override {
Tensor output_tensor(ctx->allocator({}), DT_STRING, {}); Tensor output_tensor(ctx->allocator({}), DT_STRING, {});
output_tensor.scalar<string>()() = string(row.row_key()); output_tensor.scalar<tstring>()() = tstring(row.row_key());
out_tensors->emplace_back(std::move(output_tensor)); out_tensors->emplace_back(std::move(output_tensor));
return Status::OK(); return Status::OK();
} }

View File

@ -105,7 +105,7 @@ class BigtableRangeKeyDatasetOp : public DatasetOpKernel {
const ::google::cloud::bigtable::Row& row, const ::google::cloud::bigtable::Row& row,
std::vector<Tensor>* out_tensors) override { std::vector<Tensor>* out_tensors) override {
Tensor output_tensor(ctx->allocator({}), DT_STRING, {}); Tensor output_tensor(ctx->allocator({}), DT_STRING, {});
output_tensor.scalar<string>()() = string(row.row_key()); output_tensor.scalar<tstring>()() = string(row.row_key());
out_tensors->emplace_back(std::move(output_tensor)); out_tensors->emplace_back(std::move(output_tensor));
return Status::OK(); return Status::OK();
} }

View File

@ -177,11 +177,11 @@ class BigtableSampleKeyPairsDatasetOp : public DatasetOpKernel {
*end_of_sequence = false; *end_of_sequence = false;
out_tensors->emplace_back(ctx->allocator({}), DT_STRING, out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({})); TensorShape({}));
out_tensors->back().scalar<string>()() = keys_[index_]; out_tensors->back().scalar<tstring>()() = keys_[index_];
out_tensors->emplace_back(ctx->allocator({}), DT_STRING, out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({})); TensorShape({}));
out_tensors->back().scalar<string>()() = keys_[index_ + 1]; out_tensors->back().scalar<tstring>()() = keys_[index_ + 1];
++index_; ++index_;
return Status::OK(); return Status::OK();

View File

@ -99,7 +99,7 @@ class BigtableSampleKeysDatasetOp : public DatasetOpKernel {
if (index_ < row_keys_.size()) { if (index_ < row_keys_.size()) {
out_tensors->emplace_back(ctx->allocator({}), DT_STRING, out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({})); TensorShape({}));
out_tensors->back().scalar<string>()() = out_tensors->back().scalar<tstring>()() =
string(row_keys_[index_].row_key); string(row_keys_[index_].row_key);
*end_of_sequence = false; *end_of_sequence = false;
index_++; index_++;

View File

@ -177,7 +177,7 @@ class BigtableScanDatasetOp : public DatasetOpKernel {
std::vector<Tensor>* out_tensors) override { std::vector<Tensor>* out_tensors) override {
out_tensors->reserve(dataset()->columns_.size() + 1); out_tensors->reserve(dataset()->columns_.size() + 1);
Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {}); Tensor row_key_tensor(ctx->allocator({}), DT_STRING, {});
row_key_tensor.scalar<string>()() = string(row.row_key()); row_key_tensor.scalar<tstring>()() = string(row.row_key());
out_tensors->emplace_back(std::move(row_key_tensor)); out_tensors->emplace_back(std::move(row_key_tensor));
if (row.cells().size() > 2 * dataset()->columns_.size()) { if (row.cells().size() > 2 * dataset()->columns_.size()) {

View File

@ -46,7 +46,7 @@ class CreateTreeEnsembleVariableOp : public OpKernel {
OP_REQUIRES_OK(context, context->input("tree_ensemble_config", OP_REQUIRES_OK(context, context->input("tree_ensemble_config",
&tree_ensemble_config_t)); &tree_ensemble_config_t));
auto* result = new DecisionTreeEnsembleResource(); auto* result = new DecisionTreeEnsembleResource();
if (!result->InitFromSerialized(tree_ensemble_config_t->scalar<string>()(), if (!result->InitFromSerialized(tree_ensemble_config_t->scalar<tstring>()(),
stamp_token)) { stamp_token)) {
result->Unref(); result->Unref();
OP_REQUIRES( OP_REQUIRES(
@ -99,7 +99,7 @@ class TreeEnsembleSerializeOp : public OpKernel {
Tensor* output_config_t = nullptr; Tensor* output_config_t = nullptr;
OP_REQUIRES_OK( OP_REQUIRES_OK(
context, context->allocate_output(1, TensorShape(), &output_config_t)); context, context->allocate_output(1, TensorShape(), &output_config_t));
output_config_t->scalar<string>()() = output_config_t->scalar<tstring>()() =
ensemble_resource->SerializeAsString(); ensemble_resource->SerializeAsString();
} }
}; };
@ -130,7 +130,7 @@ class TreeEnsembleDeserializeOp : public OpKernel {
OP_REQUIRES( OP_REQUIRES(
context, context,
ensemble_resource->InitFromSerialized( ensemble_resource->InitFromSerialized(
tree_ensemble_config_t->scalar<string>()(), stamp_token), tree_ensemble_config_t->scalar<tstring>()(), stamp_token),
errors::InvalidArgument("Unable to parse tree ensemble config.")); errors::InvalidArgument("Unable to parse tree ensemble config."));
} }
}; };

View File

@ -324,7 +324,7 @@ class QuantileAccumulatorAddSummariesOp : public OpKernel {
context, context,
ParseProtoUnlimited( ParseProtoUnlimited(
summary_proto, summary_proto,
summary_list[resource_handle_idx].scalar<string>()()), summary_list[resource_handle_idx].scalar<tstring>()()),
errors::InvalidArgument("Unable to parse quantile summary.")); errors::InvalidArgument("Unable to parse quantile summary."));
std::vector<QuantileSummaryEntry> entries; std::vector<QuantileSummaryEntry> entries;
entries.reserve(summary_proto->entries_size()); entries.reserve(summary_proto->entries_size());
@ -543,7 +543,7 @@ class QuantileAccumulatorDeserializeOp : public OpKernel {
::boosted_trees::QuantileStreamState state_proto; ::boosted_trees::QuantileStreamState state_proto;
OP_REQUIRES( OP_REQUIRES(
context, context,
ParseProtoUnlimited(&state_proto, stream_state_t->scalar<string>()()), ParseProtoUnlimited(&state_proto, stream_state_t->scalar<tstring>()()),
errors::InvalidArgument("Unabnle to parse quantile stream state.")); errors::InvalidArgument("Unabnle to parse quantile stream state."));
std::vector<QuantileSummary> summaries; std::vector<QuantileSummary> summaries;
summaries.reserve(state_proto.summaries_size()); summaries.reserve(state_proto.summaries_size());

View File

@ -213,8 +213,8 @@ class BuildDenseInequalitySplitsOp : public OpKernel {
OP_REQUIRES_OK(context, context->allocate_output("split_infos", OP_REQUIRES_OK(context, context->allocate_output("split_infos",
TensorShape({size_output}), TensorShape({size_output}),
&output_splits_t)); &output_splits_t));
tensorflow::TTypes<string>::Vec output_splits = tensorflow::TTypes<tstring>::Vec output_splits =
output_splits_t->vec<string>(); output_splits_t->vec<tstring>();
if (num_elements == 0) { if (num_elements == 0) {
return; return;
@ -529,8 +529,8 @@ class BuildSparseInequalitySplitsOp : public OpKernel {
OP_REQUIRES_OK(context, context->allocate_output( OP_REQUIRES_OK(context, context->allocate_output(
"split_infos", TensorShape({num_elements}), "split_infos", TensorShape({num_elements}),
&output_splits_t)); &output_splits_t));
tensorflow::TTypes<string>::Vec output_splits = tensorflow::TTypes<tstring>::Vec output_splits =
output_splits_t->vec<string>(); output_splits_t->vec<tstring>();
SplitBuilderState state(context); SplitBuilderState state(context);
// For each tree node that needs to be split. // For each tree node that needs to be split.
for (int root_idx = 0; root_idx < num_elements; ++root_idx) { for (int root_idx = 0; root_idx < num_elements; ++root_idx) {
@ -780,8 +780,8 @@ class BuildCategoricalEqualitySplitsOp : public OpKernel {
OP_REQUIRES_OK(context, context->allocate_output("split_infos", OP_REQUIRES_OK(context, context->allocate_output("split_infos",
TensorShape({size_output}), TensorShape({size_output}),
&output_splits_t)); &output_splits_t));
tensorflow::TTypes<string>::Vec output_splits = tensorflow::TTypes<tstring>::Vec output_splits =
output_splits_t->vec<string>(); output_splits_t->vec<tstring>();
if (num_elements == 0) { if (num_elements == 0) {
return; return;
} }

View File

@ -468,7 +468,7 @@ class GrowTreeEnsembleOp : public OpKernel {
for (int64 handler_id = 0; handler_id < num_handlers_; ++handler_id) { for (int64 handler_id = 0; handler_id < num_handlers_; ++handler_id) {
const auto& partition_ids = partition_ids_list[handler_id].vec<int32>(); const auto& partition_ids = partition_ids_list[handler_id].vec<int32>();
const auto& gains = gains_list[handler_id].vec<float>(); const auto& gains = gains_list[handler_id].vec<float>();
const auto& splits = splits_list[handler_id].vec<string>(); const auto& splits = splits_list[handler_id].vec<tstring>();
OP_REQUIRES(context, partition_ids.size() == gains.size(), OP_REQUIRES(context, partition_ids.size() == gains.size(),
errors::InvalidArgument( errors::InvalidArgument(
"Inconsistent partition Ids and gains tensors: ", "Inconsistent partition Ids and gains tensors: ",
@ -502,7 +502,7 @@ class GrowTreeEnsembleOp : public OpKernel {
// Find best split per partition going through every feature candidate. // Find best split per partition going through every feature candidate.
for (int64 handler_id = 0; handler_id < num_handlers_; ++handler_id) { for (int64 handler_id = 0; handler_id < num_handlers_; ++handler_id) {
const auto& gains = gains_list[handler_id].vec<float>(); const auto& gains = gains_list[handler_id].vec<float>();
const auto& splits = splits_list[handler_id].vec<string>(); const auto& splits = splits_list[handler_id].vec<tstring>();
OP_REQUIRES(context, gains.size() == 1, OP_REQUIRES(context, gains.size() == 1,
errors::InvalidArgument( errors::InvalidArgument(
"Gains size must be one for oblivious weak learner: ", "Gains size must be one for oblivious weak learner: ",

View File

@ -153,7 +153,7 @@ class GenerateBigQueryReaderPartitionsOp : public OpKernel {
context->allocate_output(0, TensorShape({num_partitions_}), context->allocate_output(0, TensorShape({num_partitions_}),
&output_tensor)); &output_tensor));
auto output = output_tensor->template flat<string>(); auto output = output_tensor->template flat<tstring>();
for (int64 i = 0; i < num_partitions_; ++i) { for (int64 i = 0; i < num_partitions_; ++i) {
BigQueryTablePartition partition; BigQueryTablePartition partition;
partition.set_start_index(i * partition_size); partition.set_start_index(i * partition_size);

View File

@ -135,9 +135,10 @@ class DecodeAudioOpV2 : public OpKernel {
"channel_count must be a rank-0 tensor but got shape ", "channel_count must be a rank-0 tensor but got shape ",
channel_count_tensor.shape().DebugString())); channel_count_tensor.shape().DebugString()));
const tensorflow::StringPiece contents = contents_tensor.scalar<string>()(); const tensorflow::StringPiece contents =
contents_tensor.scalar<tstring>()();
const string file_format = const string file_format =
absl::AsciiStrToLower(file_format_tensor.scalar<string>()()); absl::AsciiStrToLower(file_format_tensor.scalar<tstring>()());
const int32 samples_per_second = const int32 samples_per_second =
samples_per_second_tensor.scalar<int32>()(); samples_per_second_tensor.scalar<int32>()();
const int32 channel_count = channel_count_tensor.scalar<int32>()(); const int32 channel_count = channel_count_tensor.scalar<int32>()();
@ -243,7 +244,7 @@ class DecodeAudioOp : public OpKernel {
errors::InvalidArgument("contents must be scalar but got shape ", errors::InvalidArgument("contents must be scalar but got shape ",
contents.shape().DebugString())); contents.shape().DebugString()));
const tensorflow::StringPiece file_contents = contents.scalar<string>()(); const tensorflow::StringPiece file_contents = contents.scalar<tstring>()();
Decode(context, file_contents, file_format_, samples_per_second_, Decode(context, file_contents, file_format_, samples_per_second_,
channel_count_, ""); channel_count_, "");
} }

View File

@ -45,7 +45,8 @@ class DecodeVideoOp : public OpKernel {
errors::InvalidArgument( errors::InvalidArgument(
"contents must be a rank-0 tensor but got shape ", "contents must be a rank-0 tensor but got shape ",
contents_tensor.shape().DebugString())); contents_tensor.shape().DebugString()));
const tensorflow::StringPiece contents = contents_tensor.scalar<string>()(); const tensorflow::StringPiece contents =
contents_tensor.scalar<tstring>()();
// Write the input data to a temp file. // Write the input data to a temp file.
string extension; string extension;

View File

@ -45,7 +45,7 @@ void Encode(OpKernelContext* context, const Tensor& contents,
// Copy the encoded audio file to the output tensor. // Copy the encoded audio file to the output tensor.
Tensor* output = nullptr; Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape(), &output)); OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape(), &output));
output->scalar<string>()() = encoded_audio; output->scalar<tstring>()() = encoded_audio;
} }
} // namespace } // namespace
@ -95,7 +95,7 @@ class EncodeAudioOpV2 : public OpKernel {
bits_per_second_tensor.shape().DebugString())); bits_per_second_tensor.shape().DebugString()));
const string file_format = const string file_format =
absl::AsciiStrToLower(file_format_tensor.scalar<string>()()); absl::AsciiStrToLower(file_format_tensor.scalar<tstring>()());
const int32 samples_per_second = const int32 samples_per_second =
samples_per_second_tensor.scalar<int32>()(); samples_per_second_tensor.scalar<int32>()();
const int32 bits_per_second = bits_per_second_tensor.scalar<int32>()(); const int32 bits_per_second = bits_per_second_tensor.scalar<int32>()();

View File

@ -198,7 +198,7 @@ class SequenceFileDatasetOp : public DatasetOpKernel {
std::vector<string> filenames; std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements()); filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) { for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<string>()(i)); filenames.push_back(filenames_tensor->flat<tstring>()(i));
} }
*output = new Dataset(ctx, filenames, output_types_); *output = new Dataset(ctx, filenames, output_types_);
@ -264,11 +264,11 @@ class SequenceFileDatasetOp : public DatasetOpKernel {
TF_RETURN_IF_ERROR(status); TF_RETURN_IF_ERROR(status);
Tensor key_tensor(ctx->allocator({}), DT_STRING, {}); Tensor key_tensor(ctx->allocator({}), DT_STRING, {});
key_tensor.scalar<string>()() = key; key_tensor.scalar<tstring>()() = key;
out_tensors->emplace_back(std::move(key_tensor)); out_tensors->emplace_back(std::move(key_tensor));
Tensor value_tensor(ctx->allocator({}), DT_STRING, {}); Tensor value_tensor(ctx->allocator({}), DT_STRING, {});
value_tensor.scalar<string>()() = value; value_tensor.scalar<tstring>()() = value;
out_tensors->emplace_back(std::move(value_tensor)); out_tensors->emplace_back(std::move(value_tensor));
*end_of_sequence = false; *end_of_sequence = false;

View File

@ -73,7 +73,7 @@ Status BinaryObjectParser::Parse(uint8_t** ptr,
} }
case STRING: { case STRING: {
out_tensors->emplace_back(cpu_allocator(), DT_STRING, TensorShape({})); out_tensors->emplace_back(cpu_allocator(), DT_STRING, TensorShape({}));
out_tensors->back().scalar<string>()() = ParseString(ptr); out_tensors->back().scalar<tstring>()() = ParseString(ptr);
break; break;
} }
case DATE: { case DATE: {
@ -150,7 +150,7 @@ Status BinaryObjectParser::Parse(uint8_t** ptr,
out_tensors->emplace_back(cpu_allocator(), DT_STRING, out_tensors->emplace_back(cpu_allocator(), DT_STRING,
TensorShape({length})); TensorShape({length}));
for (int32_t i = 0; i < length; i++) for (int32_t i = 0; i < length; i++)
out_tensors->back().vec<string>()(i) = ParseString(ptr); out_tensors->back().vec<tstring>()(i) = ParseString(ptr);
break; break;
} }
case DATE_ARR: { case DATE_ARR: {

View File

@ -30,7 +30,7 @@ class ObtainNextOp : public OpKernel {
const Tensor* list; const Tensor* list;
OP_REQUIRES_OK(ctx, ctx->input("list", &list)); OP_REQUIRES_OK(ctx, ctx->input("list", &list));
int64 num_elements = list->NumElements(); int64 num_elements = list->NumElements();
auto list_flat = list->flat<string>(); auto list_flat = list->flat<tstring>();
// Allocate output. // Allocate output.
Tensor* output_tensor = nullptr; Tensor* output_tensor = nullptr;
@ -48,7 +48,7 @@ class ObtainNextOp : public OpKernel {
*pos = (*pos + 1) % num_elements; *pos = (*pos + 1) % num_elements;
// Assign value to output. // Assign value to output.
output_tensor->scalar<string>()() = list_flat(*pos); output_tensor->scalar<tstring>()() = list_flat(*pos);
} }
}; };

View File

@ -33,7 +33,7 @@ class KafkaDatasetOp : public DatasetOpKernel {
std::vector<string> topics; std::vector<string> topics;
topics.reserve(topics_tensor->NumElements()); topics.reserve(topics_tensor->NumElements());
for (int i = 0; i < topics_tensor->NumElements(); ++i) { for (int i = 0; i < topics_tensor->NumElements(); ++i) {
topics.push_back(topics_tensor->flat<string>()(i)); topics.push_back(topics_tensor->flat<tstring>()(i));
} }
std::string servers = ""; std::string servers = "";
@ -128,7 +128,7 @@ class KafkaDatasetOp : public DatasetOpKernel {
if (message->err() == RdKafka::ERR_NO_ERROR) { if (message->err() == RdKafka::ERR_NO_ERROR) {
// Produce the line as output. // Produce the line as output.
Tensor line_tensor(cpu_allocator(), DT_STRING, {}); Tensor line_tensor(cpu_allocator(), DT_STRING, {});
line_tensor.scalar<string>()() = line_tensor.scalar<tstring>()() =
std::string(static_cast<const char*>(message->payload()), std::string(static_cast<const char*>(message->payload()),
message->len()); message->len());
out_tensors->emplace_back(std::move(line_tensor)); out_tensors->emplace_back(std::move(line_tensor));

View File

@ -78,7 +78,7 @@ template <>
int64 SparseTensorColumn<int64>::Feature(int64 batch, int64 n) const { int64 SparseTensorColumn<int64>::Feature(int64 batch, int64 n) const {
const int64 start = feature_start_indices_[batch]; const int64 start = feature_start_indices_[batch];
if (DT_STRING == values_.dtype()) if (DT_STRING == values_.dtype())
return Fingerprint64(values_.vec<string>().data()[start + n]); return Fingerprint64(values_.vec<tstring>().data()[start + n]);
return values_.vec<int64>().data()[start + n]; return values_.vec<int64>().data()[start + n];
} }
@ -87,7 +87,7 @@ template <>
string SparseTensorColumn<string>::Feature(int64 batch, int64 n) const { string SparseTensorColumn<string>::Feature(int64 batch, int64 n) const {
const int64 start = feature_start_indices_[batch]; const int64 start = feature_start_indices_[batch];
if (DT_STRING == values_.dtype()) if (DT_STRING == values_.dtype())
return values_.vec<string>().data()[start + n]; return values_.vec<tstring>().data()[start + n];
return std::to_string(values_.vec<int64>().data()[start + n]); return std::to_string(values_.vec<int64>().data()[start + n]);
} }
@ -95,7 +95,7 @@ template <>
StringPiece SparseTensorColumn<StringPiece>::Feature(int64 batch, StringPiece SparseTensorColumn<StringPiece>::Feature(int64 batch,
int64 n) const { int64 n) const {
const int64 start = feature_start_indices_[batch]; const int64 start = feature_start_indices_[batch];
return values_.vec<string>().data()[start + n]; return values_.vec<tstring>().data()[start + n];
} }
// A column that is backed by a dense tensor. // A column that is backed by a dense tensor.
@ -118,21 +118,21 @@ class DenseTensorColumn : public ColumnInterface<InternalType> {
template <> template <>
int64 DenseTensorColumn<int64>::Feature(int64 batch, int64 n) const { int64 DenseTensorColumn<int64>::Feature(int64 batch, int64 n) const {
if (DT_STRING == tensor_.dtype()) if (DT_STRING == tensor_.dtype())
return Fingerprint64(tensor_.matrix<string>()(batch, n)); return Fingerprint64(tensor_.matrix<tstring>()(batch, n));
return tensor_.matrix<int64>()(batch, n); return tensor_.matrix<int64>()(batch, n);
} }
// Internal type is string or StringPiece when using StringCrosser. // Internal type is string or StringPiece when using StringCrosser.
template <> template <>
string DenseTensorColumn<string>::Feature(int64 batch, int64 n) const { string DenseTensorColumn<string>::Feature(int64 batch, int64 n) const {
if (DT_STRING == tensor_.dtype()) return tensor_.matrix<string>()(batch, n); if (DT_STRING == tensor_.dtype()) return tensor_.matrix<tstring>()(batch, n);
return std::to_string(tensor_.matrix<int64>()(batch, n)); return std::to_string(tensor_.matrix<int64>()(batch, n));
} }
template <> template <>
StringPiece DenseTensorColumn<StringPiece>::Feature(int64 batch, StringPiece DenseTensorColumn<StringPiece>::Feature(int64 batch,
int64 n) const { int64 n) const {
return tensor_.matrix<string>()(batch, n); return tensor_.matrix<tstring>()(batch, n);
} }
// Updates Output tensors with sparse crosses. // Updates Output tensors with sparse crosses.

View File

@ -36,7 +36,7 @@ class DecodeLibsvmOp : public OpKernel {
void Compute(OpKernelContext* ctx) override { void Compute(OpKernelContext* ctx) override {
const Tensor* input_tensor; const Tensor* input_tensor;
OP_REQUIRES_OK(ctx, ctx->input("input", &input_tensor)); OP_REQUIRES_OK(ctx, ctx->input("input", &input_tensor));
const auto& input_flat = input_tensor->flat<string>(); const auto& input_flat = input_tensor->flat<tstring>();
Tensor* label_tensor; Tensor* label_tensor;
OP_REQUIRES_OK( OP_REQUIRES_OK(

View File

@ -72,7 +72,7 @@ Status GetMetaGraphDefFromExport(const StringPiece export_dir,
// Creates a string tensor. // Creates a string tensor.
Tensor CreateStringTensor(const string& value) { Tensor CreateStringTensor(const string& value) {
Tensor tensor(DT_STRING, TensorShape({})); Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<string>()() = value; tensor.scalar<tstring>()() = value;
return tensor; return tensor;
} }

View File

@ -52,7 +52,7 @@ class CreateTreeVariableOp : public OpKernel {
auto* result = new DecisionTreeResource(param_proto_); auto* result = new DecisionTreeResource(param_proto_);
if (!ParseProtoUnlimited(result->mutable_decision_tree(), if (!ParseProtoUnlimited(result->mutable_decision_tree(),
tree_config_t->scalar<string>()())) { tree_config_t->scalar<tstring>()())) {
result->Unref(); result->Unref();
OP_REQUIRES(context, false, OP_REQUIRES(context, false,
errors::InvalidArgument("Unable to parse tree config.")); errors::InvalidArgument("Unable to parse tree config."));
@ -85,7 +85,7 @@ class TreeSerializeOp : public OpKernel {
Tensor* output_config_t = nullptr; Tensor* output_config_t = nullptr;
OP_REQUIRES_OK( OP_REQUIRES_OK(
context, context->allocate_output(0, TensorShape(), &output_config_t)); context, context->allocate_output(0, TensorShape(), &output_config_t));
output_config_t->scalar<string>()() = output_config_t->scalar<tstring>()() =
decision_tree_resource->decision_tree().SerializeAsString(); decision_tree_resource->decision_tree().SerializeAsString();
} }
}; };
@ -116,7 +116,7 @@ class TreeDeserializeOp : public OpKernel {
decision_trees::Model* config = decision_trees::Model* config =
decision_tree_resource->mutable_decision_tree(); decision_tree_resource->mutable_decision_tree();
OP_REQUIRES(context, OP_REQUIRES(context,
ParseProtoUnlimited(config, tree_config_t->scalar<string>()()), ParseProtoUnlimited(config, tree_config_t->scalar<tstring>()()),
errors::InvalidArgument("Unable to parse tree config.")); errors::InvalidArgument("Unable to parse tree config."));
decision_tree_resource->MaybeInitialize(); decision_tree_resource->MaybeInitialize();
} }
@ -224,7 +224,7 @@ class TreePredictionsV4Op : public OpKernel {
: 0); : 0);
OP_REQUIRES_OK(context, context->allocate_output(1, output_paths_shape, OP_REQUIRES_OK(context, context->allocate_output(1, output_paths_shape,
&output_tree_paths)); &output_tree_paths));
auto out_paths = output_tree_paths->unaligned_flat<string>(); auto out_paths = output_tree_paths->unaligned_flat<tstring>();
// TODO(gilberth): If this slows down inference too much, consider having // TODO(gilberth): If this slows down inference too much, consider having
// a filter that only serializes paths for the predicted label that we're // a filter that only serializes paths for the predicted label that we're

View File

@ -38,7 +38,7 @@ float Convert(const string& in) {
void Evaluate(const Tensor& input_data, Tensor output_data, int32 start, void Evaluate(const Tensor& input_data, Tensor output_data, int32 start,
int32 end) { int32 end) {
auto out_data = output_data.unaligned_flat<float>(); auto out_data = output_data.unaligned_flat<float>();
const auto in_data = input_data.unaligned_flat<string>(); const auto in_data = input_data.unaligned_flat<tstring>();
for (int32 i = start; i < end; ++i) { for (int32 i = start; i < end; ++i) {
out_data(i) = Convert(in_data(i)); out_data(i) = Convert(in_data(i));

View File

@ -56,7 +56,7 @@ class CreateFertileStatsVariableOp : public OpKernel {
errors::InvalidArgument("Stats config must be a scalar.")); errors::InvalidArgument("Stats config must be a scalar."));
auto* result = new FertileStatsResource(param_proto_); auto* result = new FertileStatsResource(param_proto_);
FertileStats stats; FertileStats stats;
if (!ParseProtoUnlimited(&stats, stats_config_t->scalar<string>()())) { if (!ParseProtoUnlimited(&stats, stats_config_t->scalar<tstring>()())) {
result->Unref(); result->Unref();
OP_REQUIRES(context, false, OP_REQUIRES(context, false,
errors::InvalidArgument("Unable to parse stats config.")); errors::InvalidArgument("Unable to parse stats config."));
@ -98,7 +98,7 @@ class FertileStatsSerializeOp : public OpKernel {
FertileStats stats; FertileStats stats;
fertile_stats_resource->PackToProto(&stats); fertile_stats_resource->PackToProto(&stats);
output_config_t->scalar<string>()() = stats.SerializeAsString(); output_config_t->scalar<tstring>()() = stats.SerializeAsString();
} }
private: private:
@ -128,9 +128,10 @@ class FertileStatsDeserializeOp : public OpKernel {
// Deallocate all the previous objects on the resource. // Deallocate all the previous objects on the resource.
fertile_stats_resource->Reset(); fertile_stats_resource->Reset();
FertileStats stats; FertileStats stats;
OP_REQUIRES(context, OP_REQUIRES(
ParseProtoUnlimited(&stats, stats_config_t->scalar<string>()()), context,
errors::InvalidArgument("Unable to parse stats config.")); ParseProtoUnlimited(&stats, stats_config_t->scalar<tstring>()()),
errors::InvalidArgument("Unable to parse stats config."));
fertile_stats_resource->ExtractFromProto(stats); fertile_stats_resource->ExtractFromProto(stats);
fertile_stats_resource->MaybeInitialize(); fertile_stats_resource->MaybeInitialize();

View File

@ -103,7 +103,7 @@ float CandidateGraphRunner::SplitScore() {
void CandidateGraphRunner::GetSplit(decision_trees::BinaryNode* node) { void CandidateGraphRunner::GetSplit(decision_trees::BinaryNode* node) {
std::vector<Tensor> outputs; std::vector<Tensor> outputs;
RunOp(kNoOp, TensorNameValueList(), {kGetSplitName}, &outputs); RunOp(kNoOp, TensorNameValueList(), {kGetSplitName}, &outputs);
ParseProtoUnlimited(node, outputs[0].unaligned_flat<string>()(0)); ParseProtoUnlimited(node, outputs[0].unaligned_flat<tstring>()(0));
const auto& oblique = split_.inequality_left_child_test().oblique(); const auto& oblique = split_.inequality_left_child_test().oblique();
auto* new_split = auto* new_split =
node->mutable_inequality_left_child_test()->mutable_oblique(); node->mutable_inequality_left_child_test()->mutable_oblique();

View File

@ -1055,9 +1055,9 @@ class SessionMetadataReaderOp : public OpKernel {
OP_REQUIRES_OK(ctx, OP_REQUIRES_OK(ctx,
ctx->allocate_output("y", TensorShape({}), &out_tensor)); ctx->allocate_output("y", TensorShape({}), &out_tensor));
if (ctx->session_metadata() != nullptr) { if (ctx->session_metadata() != nullptr) {
out_tensor->scalar<string>()() = ctx->session_metadata()->DebugString(); out_tensor->scalar<tstring>()() = ctx->session_metadata()->DebugString();
} else { } else {
out_tensor->scalar<string>()() = ""; out_tensor->scalar<tstring>()() = "";
} }
} }
}; };
@ -1079,7 +1079,7 @@ TEST(DirectSessionTest, SessionMetadataAbsent) {
run_opts.set_inter_op_thread_pool(-1); run_opts.set_inter_op_thread_pool(-1);
auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr); auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr);
EXPECT_EQ("", outputs[0].scalar<string>()()); EXPECT_EQ("", outputs[0].scalar<tstring>()());
} }
TEST(DirectSessionTest, SessionMetadataPresent) { TEST(DirectSessionTest, SessionMetadataPresent) {
@ -1104,7 +1104,7 @@ TEST(DirectSessionTest, SessionMetadataPresent) {
SessionMetadata read_metadata; SessionMetadata read_metadata;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString( ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
outputs[0].scalar<string>()(), &read_metadata)); outputs[0].scalar<tstring>()(), &read_metadata));
EXPECT_EQ("name", read_metadata.name()); EXPECT_EQ("name", read_metadata.name());
EXPECT_EQ(1, read_metadata.version()); EXPECT_EQ(1, read_metadata.version());
} }
@ -1468,7 +1468,7 @@ TEST(DirectSessionTest, RunHandleTest) {
const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()(); const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()();
Tensor string_handle(DT_STRING, {}); Tensor string_handle(DT_STRING, {});
string_handle.flat<string>().setConstant(resource_handle.name()); string_handle.flat<tstring>().setConstant(resource_handle.name());
// Second run call: Use a handle. // Second run call: Use a handle.
std::vector<Tensor> outputs1; std::vector<Tensor> outputs1;
@ -1521,7 +1521,7 @@ TEST(DirectSessionTest, RunHandleTest_Callable) {
const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()(); const ResourceHandle& resource_handle = outputs[0].scalar<ResourceHandle>()();
Tensor string_handle(DT_STRING, {}); Tensor string_handle(DT_STRING, {});
string_handle.flat<string>().setConstant(resource_handle.name()); string_handle.flat<tstring>().setConstant(resource_handle.name());
// Second run call: Use a handle. // Second run call: Use a handle.
std::vector<Tensor> outputs1; std::vector<Tensor> outputs1;

View File

@ -33,7 +33,7 @@ class FindDeviceOpKernel : public OpKernel {
Tensor* device_tensor = nullptr; Tensor* device_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output("device_name", TensorShape{}, OP_REQUIRES_OK(ctx, ctx->allocate_output("device_name", TensorShape{},
&device_tensor)); &device_tensor));
device_tensor->scalar<string>()() = device_tensor->scalar<tstring>()() =
ctx->function_library()->device()->name(); ctx->function_library()->device()->name();
} }
}; };

View File

@ -33,7 +33,7 @@ class RendezvousUtilTest : public ::testing::Test {
// string -> Tensor<string> // string -> Tensor<string>
Tensor V(const string& content) { Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({})); Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<string>()() = content; tensor.scalar<tstring>()() = content;
return tensor; return tensor;
} }
@ -41,7 +41,7 @@ Tensor V(const string& content) {
string V(const Tensor& tensor) { string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING); CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape())); CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<string>()(); return tensor.scalar<tstring>()();
} }
string MakeStringKey(const string& name) { string MakeStringKey(const string& name) {

View File

@ -147,7 +147,7 @@ TEST_F(GrpcDebugTest, SendSingleDebugTensorViaGrpcTest) {
TEST_F(GrpcDebugTest, SendDebugTensorWithLargeStringAtIndex0ViaGrpcTest) { TEST_F(GrpcDebugTest, SendDebugTensorWithLargeStringAtIndex0ViaGrpcTest) {
Tensor tensor(DT_STRING, TensorShape({1, 1})); Tensor tensor(DT_STRING, TensorShape({1, 1}));
tensor.flat<string>()(0) = string(5000 * 1024, 'A'); tensor.flat<tstring>()(0) = string(5000 * 1024, 'A');
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0", const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo_tensor", 0, "DebugIdentity"); "foo_tensor", 0, "DebugIdentity");
const Status status = DebugIO::PublishDebugTensor( const Status status = DebugIO::PublishDebugTensor(
@ -162,8 +162,8 @@ TEST_F(GrpcDebugTest, SendDebugTensorWithLargeStringAtIndex0ViaGrpcTest) {
TEST_F(GrpcDebugTest, SendDebugTensorWithLargeStringAtIndex1ViaGrpcTest) { TEST_F(GrpcDebugTest, SendDebugTensorWithLargeStringAtIndex1ViaGrpcTest) {
Tensor tensor(DT_STRING, TensorShape({1, 2})); Tensor tensor(DT_STRING, TensorShape({1, 2}));
tensor.flat<string>()(0) = "A"; tensor.flat<tstring>()(0) = "A";
tensor.flat<string>()(1) = string(5000 * 1024, 'A'); tensor.flat<tstring>()(1) = string(5000 * 1024, 'A');
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0", const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo_tensor", 0, "DebugIdentity"); "foo_tensor", 0, "DebugIdentity");
const Status status = DebugIO::PublishDebugTensor( const Status status = DebugIO::PublishDebugTensor(

View File

@ -47,8 +47,8 @@ class DebugIOUtilsTest : public ::testing::Test {
tensor_a_->flat<float>()(3) = 0.0; tensor_a_->flat<float>()(3) = 0.0;
tensor_b_.reset(new Tensor(DT_STRING, TensorShape{2})); tensor_b_.reset(new Tensor(DT_STRING, TensorShape{2}));
tensor_b_->flat<string>()(0) = "corge"; tensor_b_->flat<tstring>()(0) = "corge";
tensor_b_->flat<string>()(1) = "garply"; tensor_b_->flat<tstring>()(1) = "garply";
} }
Env* env_; Env* env_;
@ -182,8 +182,8 @@ TEST_F(DebugIOUtilsTest, DumpStringTensorToFileSunnyDay) {
// Verify tensor shape and value. // Verify tensor shape and value.
ASSERT_EQ(tensor_b_->shape(), b_prime.shape()); ASSERT_EQ(tensor_b_->shape(), b_prime.shape());
for (int i = 0; i < b_prime.flat<string>().size(); ++i) { for (int i = 0; i < b_prime.flat<tstring>().size(); ++i) {
ASSERT_EQ(tensor_b_->flat<string>()(i), b_prime.flat<string>()(i)); ASSERT_EQ(tensor_b_->flat<tstring>()(i), b_prime.flat<tstring>()(i));
} }
// Tear down temporary file and directories. // Tear down temporary file and directories.

View File

@ -231,7 +231,7 @@ TEST_F(GrpcSessionDebugTest, MultiDevices_String) {
Graph graph(OpRegistry::Global()); Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_STRING, TensorShape({2, 2})); Tensor a_tensor(DT_STRING, TensorShape({2, 2}));
for (size_t i = 0; i < 4; ++i) { for (size_t i = 0; i < 4; ++i) {
a_tensor.flat<string>()(i) = "hello, world"; a_tensor.flat<tstring>()(i) = "hello, world";
} }
Node* a = test::graph::Constant(&graph, a_tensor); Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Identity(&graph, a); Node* b = test::graph::Identity(&graph, a);
@ -266,7 +266,7 @@ TEST_F(GrpcSessionDebugTest, MultiDevices_String) {
ASSERT_EQ(outputs[0].dtype(), DT_STRING); ASSERT_EQ(outputs[0].dtype(), DT_STRING);
ASSERT_EQ(outputs[0].NumElements(), 4); ASSERT_EQ(outputs[0].NumElements(), 4);
for (size_t i = 0; i < outputs[0].NumElements(); ++i) { for (size_t i = 0; i < outputs[0].NumElements(); ++i) {
EXPECT_EQ(outputs[0].flat<string>()(i), "hello, world"); EXPECT_EQ(outputs[0].flat<tstring>()(i), "hello, world");
} }
TF_CHECK_OK(session->Close()); TF_CHECK_OK(session->Close());
@ -278,7 +278,7 @@ TEST_F(GrpcSessionDebugTest, MultiDevices_String) {
ASSERT_EQ(1, dumped_tensors.size()); ASSERT_EQ(1, dumped_tensors.size());
ASSERT_EQ(TensorShape({2, 2}), dumped_tensors[0].shape()); ASSERT_EQ(TensorShape({2, 2}), dumped_tensors[0].shape());
for (size_t i = 0; i < 4; ++i) { for (size_t i = 0; i < 4; ++i) {
ASSERT_EQ("hello, world", dumped_tensors[0].flat<string>()(i)); ASSERT_EQ("hello, world", dumped_tensors[0].flat<tstring>()(i));
} }
DeleteDumpDir(); DeleteDumpDir();

View File

@ -187,8 +187,8 @@ void GrpcRPCFactory::CreateCall(const Tensor& request_t, const bool try_rpc,
void GrpcRPCFactory::StartCall(const Tensor& address_t, const Tensor& method_t, void GrpcRPCFactory::StartCall(const Tensor& address_t, const Tensor& method_t,
GrpcCall* call) { GrpcCall* call) {
auto address = address_t.flat<string>(); auto address = address_t.flat<tstring>();
auto method = method_t.flat<string>(); auto method = method_t.flat<tstring>();
// Stubs are maintained by the GrpcRPCFactory class and will be // Stubs are maintained by the GrpcRPCFactory class and will be
// deleted when the class is destroyed. // deleted when the class is destroyed.
::grpc::GenericStub* singleton_stub = nullptr; ::grpc::GenericStub* singleton_stub = nullptr;

View File

@ -501,7 +501,7 @@ TEST(GrpcSessionTest, MultiDevices_String) {
Graph graph(OpRegistry::Global()); Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_STRING, TensorShape({2, 2})); Tensor a_tensor(DT_STRING, TensorShape({2, 2}));
for (int i = 0; i < 4; ++i) { for (int i = 0; i < 4; ++i) {
a_tensor.flat<string>()(i) = "hello, world"; a_tensor.flat<tstring>()(i) = "hello, world";
} }
Node* a = test::graph::Constant(&graph, a_tensor); Node* a = test::graph::Constant(&graph, a_tensor);
Node* b = test::graph::Identity(&graph, a); Node* b = test::graph::Identity(&graph, a);
@ -525,7 +525,7 @@ TEST(GrpcSessionTest, MultiDevices_String) {
ASSERT_EQ(outputs[0].dtype(), DT_STRING); ASSERT_EQ(outputs[0].dtype(), DT_STRING);
ASSERT_EQ(outputs[0].NumElements(), 4); ASSERT_EQ(outputs[0].NumElements(), 4);
for (int i = 0; i < outputs[0].NumElements(); ++i) { for (int i = 0; i < outputs[0].NumElements(); ++i) {
EXPECT_EQ(outputs[0].flat<string>()(i), "hello, world"); EXPECT_EQ(outputs[0].flat<tstring>()(i), "hello, world");
} }
TF_CHECK_OK(session->Close()); TF_CHECK_OK(session->Close());
} else { } else {

View File

@ -30,7 +30,7 @@ namespace tensorflow {
// string -> Tensor<string> // string -> Tensor<string>
Tensor V(const string& content) { Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({})); Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<string>()() = content; tensor.scalar<tstring>()() = content;
return tensor; return tensor;
} }
@ -38,7 +38,7 @@ Tensor V(const string& content) {
string V(const Tensor& tensor) { string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING); CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape())); CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<string>()(); return tensor.scalar<tstring>()();
} }
Rendezvous::ParsedKey MakeKey(const string& s) { Rendezvous::ParsedKey MakeKey(const string& s) {

View File

@ -114,13 +114,14 @@ Status ExtractExampleParserConfiguration(
for (int i = 0; i < num_sparse; ++i) { for (int i = 0; i < num_sparse; ++i) {
int input_idx = sparse_keys_start + i; int input_idx = sparse_keys_start + i;
(*var_len_features)[i].key = op_input_tensors[input_idx].scalar<string>()(); (*var_len_features)[i].key =
op_input_tensors[input_idx].scalar<tstring>()();
} }
for (int i = 0; i < num_dense; ++i) { for (int i = 0; i < num_dense; ++i) {
FixedLenFeature& config = (*fixed_len_features)[i]; FixedLenFeature& config = (*fixed_len_features)[i];
int dense_keys_offset = dense_keys_start + i; int dense_keys_offset = dense_keys_start + i;
config.key = op_input_tensors[dense_keys_offset].scalar<string>()(); config.key = op_input_tensors[dense_keys_offset].scalar<tstring>()();
int defaults_offset = dense_defaults_start + i; int defaults_offset = dense_defaults_start + i;
config.default_value = op_input_tensors[defaults_offset]; config.default_value = op_input_tensors[defaults_offset];

View File

@ -35,7 +35,7 @@ class TestKernel : public OpKernel {
Tensor* out_tensor = nullptr; Tensor* out_tensor = nullptr;
OP_REQUIRES_OK(context, context->allocate_output("ndef", TensorShape({}), OP_REQUIRES_OK(context, context->allocate_output("ndef", TensorShape({}),
&out_tensor)); &out_tensor));
out_tensor->scalar<string>()() = SummarizeNodeDef(def()); out_tensor->scalar<tstring>()() = SummarizeNodeDef(def());
} }
}; };
@ -87,7 +87,7 @@ class OpCompatibilityTest : public OpsTestBase {
TF_ASSERT_OK(RunOpKernel()); TF_ASSERT_OK(RunOpKernel());
} }
string Result() { return GetOutput(0)->scalar<string>()(); } string Result() { return GetOutput(0)->scalar<tstring>()(); }
void ExpectIncompatible(const OpDef& old_op_def, const OpDef& new_op_def, void ExpectIncompatible(const OpDef& old_op_def, const OpDef& new_op_def,
const string& error) { const string& error) {

View File

@ -214,7 +214,7 @@ string ReaderBase::GetNextWorkLocked(QueueInterface* queue,
context->SetStatus(errors::InvalidArgument( context->SetStatus(errors::InvalidArgument(
"Expected to dequeue a one-element string tensor")); "Expected to dequeue a one-element string tensor"));
} else { } else {
work = tuple[0].flat<string>()(0); work = tuple[0].flat<tstring>()(0);
} }
} }
n.Notify(); n.Notify();

View File

@ -86,7 +86,7 @@ class LocalRendezvousTest : public ::testing::Test {
// string -> Tensor<string> // string -> Tensor<string>
Tensor V(const string& content) { Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({})); Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<string>()() = content; tensor.scalar<tstring>()() = content;
return tensor; return tensor;
} }
@ -94,7 +94,7 @@ Tensor V(const string& content) {
string V(const Tensor& tensor) { string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING); CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape())); CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<string>()(); return tensor.scalar<tstring>()();
} }
Rendezvous::ParsedKey MakeKey(const string& name) { Rendezvous::ParsedKey MakeKey(const string& name) {

View File

@ -639,8 +639,8 @@ Status GetResourceFromContext(OpKernelContext* ctx, const string& input_name,
"Resource handle must have 2 elements, but had shape: ", "Resource handle must have 2 elements, but had shape: ",
tensor.shape().DebugString()); tensor.shape().DebugString());
} }
container = tensor.flat<string>()(0); container = tensor.flat<tstring>()(0);
shared_name = tensor.flat<string>()(1); shared_name = tensor.flat<tstring>()(1);
} }
return ctx->resource_manager()->Lookup(container, shared_name, resource); return ctx->resource_manager()->Lookup(container, shared_name, resource);
} }

View File

@ -96,7 +96,7 @@ class ResourceOpKernel : public OpKernel {
} }
if (!has_resource_type_) { if (!has_resource_type_) {
auto h = handle_.AccessTensor(context)->template flat<string>(); auto h = handle_.AccessTensor(context)->template flat<tstring>();
h(0) = cinfo_.container(); h(0) = cinfo_.container();
h(1) = cinfo_.name(); h(1) = cinfo_.name();
} }

View File

@ -480,7 +480,7 @@ TEST_F(TensorReshapeTest, ReshapeError) {
Tensor string_tensor{DT_STRING, {10}}; Tensor string_tensor{DT_STRING, {10}};
// Note that the error message compare # of elements, not # of bytes. // Note that the error message compare # of elements, not # of bytes.
EXPECT_DEATH((string_tensor.bit_casted_shaped<string, 1>({9})), "9 vs. 10"); EXPECT_DEATH((string_tensor.bit_casted_shaped<tstring, 1>({9})), "9 vs. 10");
} }
TEST_F(TensorReshapeTest, Flat) { TEST_F(TensorReshapeTest, Flat) {
@ -795,27 +795,27 @@ TEST(Tensor_Scalar, Basics) {
{ {
Tensor t(DT_STRING, TensorShape({})); Tensor t(DT_STRING, TensorShape({}));
EXPECT_EQ(1, t.NumElements()); EXPECT_EQ(1, t.NumElements());
auto Tt = t.scalar<string>(); auto Tt = t.scalar<tstring>();
EXPECT_EQ(1, Tt.size()); EXPECT_EQ(1, Tt.size());
EXPECT_EQ(0, Tt.rank()); EXPECT_EQ(0, Tt.rank());
t.scalar<string>()() = "foo"; t.scalar<tstring>()() = "foo";
EXPECT_EQ("foo", Tt()); EXPECT_EQ("foo", Tt());
} }
{ {
Tensor t(DT_STRING, TensorShape({1})); Tensor t(DT_STRING, TensorShape({1}));
EXPECT_EQ(1, t.NumElements()); EXPECT_EQ(1, t.NumElements());
auto Tt = t.vec<string>(); auto Tt = t.vec<tstring>();
EXPECT_EQ(1, Tt.size()); EXPECT_EQ(1, Tt.size());
t.flat<string>()(0) = "foo"; t.flat<tstring>()(0) = "foo";
EXPECT_EQ("foo", Tt(0)); EXPECT_EQ("foo", Tt(0));
} }
{ {
Tensor t(DT_STRING, TensorShape({1, 1, 1})); Tensor t(DT_STRING, TensorShape({1, 1, 1}));
EXPECT_EQ(1, t.NumElements()); EXPECT_EQ(1, t.NumElements());
auto Tt = t.scalar<string>(); auto Tt = t.scalar<tstring>();
EXPECT_EQ(1, Tt.size()); EXPECT_EQ(1, Tt.size());
EXPECT_EQ(0, Tt.rank()); EXPECT_EQ(0, Tt.rank());
t.flat<string>()(0) = "bar"; t.flat<tstring>()(0) = "bar";
EXPECT_EQ("bar", Tt()); EXPECT_EQ("bar", Tt());
} }
{ {
@ -860,7 +860,7 @@ TEST(Tensor_HostScalar, Basics) {
Tensor t("fooooooooooooooooooooooooooooooooooooo"); Tensor t("fooooooooooooooooooooooooooooooooooooo");
EXPECT_EQ(DT_STRING, t.dtype()); EXPECT_EQ(DT_STRING, t.dtype());
EXPECT_EQ(1, t.NumElements()); EXPECT_EQ(1, t.NumElements());
auto Tt = t.scalar<string>(); auto Tt = t.scalar<tstring>();
EXPECT_EQ(1, Tt.size()); EXPECT_EQ(1, Tt.size());
EXPECT_EQ(0, Tt.rank()); EXPECT_EQ(0, Tt.rank());
EXPECT_EQ("fooooooooooooooooooooooooooooooooooooo", Tt()); EXPECT_EQ("fooooooooooooooooooooooooooooooooooooo", Tt());
@ -980,7 +980,7 @@ TEST(Tensor_String, SimpleWithHelper) {
Tensor t2(DT_STRING, {2, 3}); Tensor t2(DT_STRING, {2, 3});
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) { for (int j = 0; j < 3; ++j) {
t2.matrix<string>()(i, j) = strings::StrCat(i * 3 + j); t2.matrix<tstring>()(i, j) = strings::StrCat(i * 3 + j);
} }
} }
@ -1163,7 +1163,7 @@ TEST(Tensor, FailureToAllocate) {
// String // String
{ {
Tensor t(DT_STRING, TensorShape({1})); Tensor t(DT_STRING, TensorShape({1}));
t.vec<string>()(0) = "foo"; t.vec<tstring>()(0) = "foo";
TensorProto proto; TensorProto proto;
t.AsProtoField(&proto); t.AsProtoField(&proto);

View File

@ -48,7 +48,7 @@ void DeepCopy(const Tensor& input, Tensor* output) {
input_data.size()); input_data.size());
} }
} else if (input.dtype() == DT_STRING) { } else if (input.dtype() == DT_STRING) {
output->unaligned_flat<string>() = input.unaligned_flat<string>(); output->unaligned_flat<tstring>() = input.unaligned_flat<tstring>();
} else { } else {
CHECK_EQ(DT_VARIANT, input.dtype()); CHECK_EQ(DT_VARIANT, input.dtype());
output->unaligned_flat<Variant>() = input.unaligned_flat<Variant>(); output->unaligned_flat<Variant>() = input.unaligned_flat<Variant>();
@ -103,7 +103,7 @@ Status Concat(const gtl::ArraySlice<Tensor>& tensors, Tensor* result) {
int64 offset = 0; int64 offset = 0;
for (const Tensor& tensor : tensors) { for (const Tensor& tensor : tensors) {
auto from_strings = tensor.flat<string>(); auto from_strings = tensor.flat<tstring>();
CHECK_LE(offset + tensor.NumElements(), result->NumElements()); CHECK_LE(offset + tensor.NumElements(), result->NumElements());
for (int i = 0; i < tensor.NumElements(); ++i) { for (int i = 0; i < tensor.NumElements(); ++i) {
to_strings[offset + i] = from_strings(i); to_strings[offset + i] = from_strings(i);
@ -155,7 +155,7 @@ Status Split(const Tensor& tensor, const gtl::ArraySlice<int64>& sizes,
if (tensor.dtype() != DT_STRING) { if (tensor.dtype() != DT_STRING) {
return errors::Internal("Unexpected data type"); return errors::Internal("Unexpected data type");
} }
auto from_strings = tensor.flat<string>(); auto from_strings = tensor.flat<tstring>();
int64 offset = 0; int64 offset = 0;
for (int64 size : sizes) { for (int64 size : sizes) {

View File

@ -111,12 +111,12 @@ TEST(TensorUtil, DeepCopy) {
// Test string deep copy // Test string deep copy
Tensor str1(DT_STRING, TensorShape({2})); Tensor str1(DT_STRING, TensorShape({2}));
str1.flat<string>()(0) = "foo1"; str1.flat<tstring>()(0) = "foo1";
str1.flat<string>()(1) = "foo2"; str1.flat<tstring>()(1) = "foo2";
Tensor str2 = tensor::DeepCopy(str1); Tensor str2 = tensor::DeepCopy(str1);
str2.flat<string>()(0) = "bar1"; str2.flat<tstring>()(0) = "bar1";
str2.flat<string>()(1) = "bar2"; str2.flat<tstring>()(1) = "bar2";
EXPECT_NE(str2.flat<string>()(0), str1.flat<string>()(0)); EXPECT_NE(str2.flat<tstring>()(0), str1.flat<tstring>()(0));
} }
TEST(TensorUtil, DeepCopySlice) { TEST(TensorUtil, DeepCopySlice) {
@ -151,7 +151,7 @@ TEST(TensorUtil, DeepCopySlice) {
TEST(TensorUtil, DeepCopySliceString) { TEST(TensorUtil, DeepCopySliceString) {
Tensor x(DT_STRING, TensorShape({10})); Tensor x(DT_STRING, TensorShape({10}));
x.flat<string>().setConstant("hello"); x.flat<tstring>().setConstant("hello");
// Slice 'x' -- y still refers to the same buffer. // Slice 'x' -- y still refers to the same buffer.
Tensor y = x.Slice(3, 7); Tensor y = x.Slice(3, 7);
@ -160,7 +160,7 @@ TEST(TensorUtil, DeepCopySliceString) {
Tensor z = tensor::DeepCopy(y); Tensor z = tensor::DeepCopy(y);
// Set x to be different. // Set x to be different.
x.flat<string>().setConstant("goodbye"); x.flat<tstring>().setConstant("goodbye");
EXPECT_EQ(TensorShape({10}), x.shape()); EXPECT_EQ(TensorShape({10}), x.shape());
EXPECT_EQ(TensorShape({4}), y.shape()); EXPECT_EQ(TensorShape({4}), y.shape());
@ -171,11 +171,11 @@ TEST(TensorUtil, DeepCopySliceString) {
// x and y should now all be 'goodbye', but z should be 'hello'. // x and y should now all be 'goodbye', but z should be 'hello'.
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
EXPECT_EQ("goodbye", x.flat<string>()(i)); EXPECT_EQ("goodbye", x.flat<tstring>()(i));
} }
for (int i = 0; i < 4; ++i) { for (int i = 0; i < 4; ++i) {
EXPECT_EQ("goodbye", y.unaligned_flat<string>()(i)); EXPECT_EQ("goodbye", y.unaligned_flat<tstring>()(i));
EXPECT_EQ("hello", z.flat<string>()(i)); EXPECT_EQ("hello", z.flat<tstring>()(i));
} }
} }
@ -202,11 +202,12 @@ TEST(TensorUtil, DeepCopySliceVariant) {
// Each element of x and y should now be a DT_STRING Tensor containing "foo", // Each element of x and y should now be a DT_STRING Tensor containing "foo",
// but each element of z should be a DT_FLOAT tensor containing 42.0. // but each element of z should be a DT_FLOAT tensor containing 42.0.
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
EXPECT_EQ("foo", x.flat<Variant>()(i).get<Tensor>()->scalar<string>()()); EXPECT_EQ("foo", x.flat<Variant>()(i).get<Tensor>()->scalar<tstring>()());
} }
for (int i = 0; i < 4; ++i) { for (int i = 0; i < 4; ++i) {
EXPECT_EQ("foo", EXPECT_EQ(
y.unaligned_flat<Variant>()(i).get<Tensor>()->scalar<string>()()); "foo",
y.unaligned_flat<Variant>()(i).get<Tensor>()->scalar<tstring>()());
EXPECT_EQ(42.0, z.flat<Variant>()(i).get<Tensor>()->scalar<float>()()); EXPECT_EQ(42.0, z.flat<Variant>()(i).get<Tensor>()->scalar<float>()());
} }
} }
@ -271,7 +272,7 @@ TEST(TensorUtil, Split) {
TEST(TensorUtil, ConcatSplitStrings) { TEST(TensorUtil, ConcatSplitStrings) {
Tensor x(DT_STRING, TensorShape({4, 3})); Tensor x(DT_STRING, TensorShape({4, 3}));
for (int i = 0; i < 4 * 3; ++i) { for (int i = 0; i < 4 * 3; ++i) {
x.flat<string>()(i) = strings::StrCat("foo_", i); x.flat<tstring>()(i) = strings::StrCat("foo_", i);
} }
std::vector<Tensor> split; std::vector<Tensor> split;
@ -280,15 +281,15 @@ TEST(TensorUtil, ConcatSplitStrings) {
TF_ASSERT_OK(tensor::Concat(split, &x_round_tripped)); TF_ASSERT_OK(tensor::Concat(split, &x_round_tripped));
ASSERT_EQ(x.shape(), x_round_tripped.shape()); ASSERT_EQ(x.shape(), x_round_tripped.shape());
for (int i = 0; i < 4 * 3; ++i) { for (int i = 0; i < 4 * 3; ++i) {
EXPECT_EQ(x.flat<string>()(i), x_round_tripped.flat<string>()(i)); EXPECT_EQ(x.flat<tstring>()(i), x_round_tripped.flat<tstring>()(i));
} }
// Ensure that no memory is being shared between 'x' and 'x_round_tripped'. // Ensure that no memory is being shared between 'x' and 'x_round_tripped'.
for (int i = 0; i < 4 * 3; ++i) { for (int i = 0; i < 4 * 3; ++i) {
x_round_tripped.flat<string>()(i) = strings::StrCat("bar_", i); x_round_tripped.flat<tstring>()(i) = strings::StrCat("bar_", i);
} }
for (int i = 0; i < 4 * 3; ++i) { for (int i = 0; i < 4 * 3; ++i) {
EXPECT_NE(x.flat<string>()(i), x_round_tripped.flat<string>()(i)); EXPECT_NE(x.flat<tstring>()(i), x_round_tripped.flat<tstring>()(i));
} }
} }

View File

@ -244,7 +244,7 @@ TEST(VariantOpCopyTest, CreateConstOnGPUFailsGracefully) {
// Create the input StoredTensorValue and serialize it. // Create the input StoredTensorValue and serialize it.
StoredTensorValue from; StoredTensorValue from;
from.stored = Tensor(DT_STRING, TensorShape({})); from.stored = Tensor(DT_STRING, TensorShape({}));
from.stored.scalar<string>()() = "hi"; from.stored.scalar<tstring>()() = "hi";
VariantTensorData data; VariantTensorData data;
data.set_type_name(from.TypeName()); data.set_type_name(from.TypeName());
from.Encode(&data); from.Encode(&data);
@ -292,7 +292,7 @@ TEST(VariantOpCopyTest, CreateCopyCPUToCPU) {
TEST(VariantOpCopyTest, CreateCopyCPUToCPUString) { TEST(VariantOpCopyTest, CreateCopyCPUToCPUString) {
Scope root = Scope::NewRootScope().WithDevice("/cpu:0"); Scope root = Scope::NewRootScope().WithDevice("/cpu:0");
Tensor t_str(DT_STRING, TensorShape({})); Tensor t_str(DT_STRING, TensorShape({}));
t_str.scalar<string>()() = "hi"; t_str.scalar<tstring>()() = "hi";
Output create_op = CreateTestVariant(root, t_str); Output create_op = CreateTestVariant(root, t_str);
Output identity = ops::Identity(root, create_op); Output identity = ops::Identity(root, create_op);
@ -309,7 +309,7 @@ TEST(VariantOpCopyTest, CreateCopyCPUToCPUString) {
EXPECT_EQ("StoredTensorValue", r1.TypeName()); EXPECT_EQ("StoredTensorValue", r1.TypeName());
const StoredTensorValue* v1 = r1.get<StoredTensorValue>(); const StoredTensorValue* v1 = r1.get<StoredTensorValue>();
EXPECT_NE(v1, nullptr); EXPECT_NE(v1, nullptr);
EXPECT_EQ("hi", v1->stored.scalar<string>()()); EXPECT_EQ("hi", v1->stored.scalar<tstring>()());
} }
} }
@ -356,7 +356,7 @@ TEST(VariantOpCopyTest, CreateCopyCPUToGPUStringFailsSafely) {
Scope root = Scope::NewRootScope().WithDevice("/cpu:0"); Scope root = Scope::NewRootScope().WithDevice("/cpu:0");
Scope with_gpu = root.WithDevice("/gpu:0"); Scope with_gpu = root.WithDevice("/gpu:0");
Tensor t_str(DT_STRING, TensorShape({})); Tensor t_str(DT_STRING, TensorShape({}));
t_str.scalar<string>()() = "hi"; t_str.scalar<tstring>()() = "hi";
Output create_op = CreateTestVariant(root, t_str); Output create_op = CreateTestVariant(root, t_str);
Output identity = ops::Identity(with_gpu, create_op); Output identity = ops::Identity(with_gpu, create_op);

View File

@ -172,8 +172,8 @@ StringPiece GetNodeNamePrefix(const Node* node) {
} }
void FillStringTensor(Tensor* dst, const Tensor& src) { void FillStringTensor(Tensor* dst, const Tensor& src) {
auto dst_flat = dst->flat<string>(); auto dst_flat = dst->flat<tstring>();
auto src_flat = src.flat<string>(); auto src_flat = src.flat<tstring>();
for (int i = 0; i < src.NumElements(); i++) { for (int i = 0; i < src.NumElements(); i++) {
dst_flat(i) = src_flat(i); dst_flat(i) = src_flat(i);
} }
@ -220,8 +220,8 @@ Status ConnectVariablesToSaveOp(Graph* graph, Node* save_op,
FillStringTensor(&new_shape_and_slices, shape_and_slices); FillStringTensor(&new_shape_and_slices, shape_and_slices);
for (int i = 0; i < var_size; i++) { for (int i = 0; i < var_size; i++) {
Node* var = added_variables[i]; Node* var = added_variables[i];
new_tensor_names.flat<string>()(tn_size + i) = var->name(); new_tensor_names.flat<tstring>()(tn_size + i) = var->name();
new_shape_and_slices.flat<string>()(tn_size + i) = ""; new_shape_and_slices.flat<tstring>()(tn_size + i) = "";
var_nodeouts.emplace_back(var); var_nodeouts.emplace_back(var);
} }
save_op_builder = save_op_builder.Input(var_nodeouts); save_op_builder = save_op_builder.Input(var_nodeouts);
@ -275,7 +275,7 @@ Status AddRestoreVariableSubgraphs(Graph* graph, Node* save_op,
// Construct the tensor_names input with the variable name. // Construct the tensor_names input with the variable name.
Node* tensor_names; Node* tensor_names;
Tensor tensor_names_val(DT_STRING, TensorShape({1})); Tensor tensor_names_val(DT_STRING, TensorShape({1}));
tensor_names_val.flat<string>()(0) = var->name(); tensor_names_val.flat<tstring>()(0) = var->name();
TF_RETURN_IF_ERROR(NodeBuilder(tensor_names_op_name, "Const") TF_RETURN_IF_ERROR(NodeBuilder(tensor_names_op_name, "Const")
.Attr("dtype", DT_STRING) .Attr("dtype", DT_STRING)
.Attr("value", tensor_names_val) .Attr("value", tensor_names_val)
@ -284,7 +284,7 @@ Status AddRestoreVariableSubgraphs(Graph* graph, Node* save_op,
// Construct the shape_and_slices input with empty string. // Construct the shape_and_slices input with empty string.
Node* shape_and_slices; Node* shape_and_slices;
Tensor shape_and_slices_val(DT_STRING, TensorShape({1})); Tensor shape_and_slices_val(DT_STRING, TensorShape({1}));
shape_and_slices_val.flat<string>()(0) = ""; shape_and_slices_val.flat<tstring>()(0) = "";
TF_RETURN_IF_ERROR(NodeBuilder(shape_and_slices_op_name, "Const") TF_RETURN_IF_ERROR(NodeBuilder(shape_and_slices_op_name, "Const")
.Attr("dtype", DT_STRING) .Attr("dtype", DT_STRING)
.Attr("value", shape_and_slices_val) .Attr("value", shape_and_slices_val)

View File

@ -130,7 +130,7 @@ static void ExtractExtraProperties(
if (tensor.NumElements() != 1) { if (tensor.NumElements() != 1) {
continue; continue;
} }
const string filename = tensor.scalar<string>()(); const string filename = tensor.scalar<tstring>()();
Env* env = Env::Default(); Env* env = Env::Default();
FileStatistics stat; FileStatistics stat;

View File

@ -98,7 +98,7 @@ TEST_F(GraphViewTest, OpPortIdToArgIdSparseSplit) {
TEST_F(GraphViewTest, ParseSingleExample) { TEST_F(GraphViewTest, ParseSingleExample) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope(); tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const<string>(s.WithOpName("a"), "", {}); Output a = ops::Const<tstring>(s.WithOpName("a"), "", {});
Output b = ops::Const<int64>(s.WithOpName("b"), 1, {1, 1}); Output b = ops::Const<int64>(s.WithOpName("b"), 1, {1, 1});
ops::ParseSingleExample c(s.WithOpName("c"), a, {b, b}, 2, {"w", "x"}, ops::ParseSingleExample c(s.WithOpName("c"), a, {b, b}, 2, {"w", "x"},
{"y", "z"}, {DT_INT64, DT_INT64}, {{1}, {1}}); {"y", "z"}, {DT_INT64, DT_INT64}, {{1}, {1}});

View File

@ -116,7 +116,7 @@ class AsStringOp : public OpKernel {
OP_REQUIRES_OK(context, OP_REQUIRES_OK(context,
context->allocate_output("output", input_tensor->shape(), context->allocate_output("output", input_tensor->shape(),
&output_tensor)); &output_tensor));
auto output_flat = output_tensor->flat<string>(); auto output_flat = output_tensor->flat<tstring>();
#define ENCODE_TYPE(type, T, enc_str) \ #define ENCODE_TYPE(type, T, enc_str) \
case (type): { \ case (type): { \

View File

@ -308,7 +308,7 @@ class Barrier : public ResourceBase {
int component_index, int i, int component_index, int i,
std::vector<Tuple>* ready_tuples, bool* new_elements) std::vector<Tuple>* ready_tuples, bool* new_elements)
EXCLUSIVE_LOCKS_REQUIRED(mu_) { EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto keys_vec = keys.flat<string>(); auto keys_vec = keys.flat<tstring>();
auto values_matrix = values.flat_outer_dims<T>(); auto values_matrix = values.flat_outer_dims<T>();
PersistentTuple* element_ptr; PersistentTuple* element_ptr;
@ -392,7 +392,7 @@ class Barrier : public ResourceBase {
&key, &allocated_key)); &key, &allocated_key));
ready_tuple.push_back(*element[0].AccessTensor(ctx)); // index ready_tuple.push_back(*element[0].AccessTensor(ctx)); // index
ready_tuple.push_back(*allocated_key); // key ready_tuple.push_back(*allocated_key); // key
ready_tuple[1].scalar<string>()() = keys_vec(i); // set the key ready_tuple[1].scalar<tstring>()() = keys_vec(i); // set the key
for (int j = 1; j < num_components() + 1; ++j) { for (int j = 1; j < num_components() + 1; ++j) {
ready_tuple.push_back(*element[j].AccessTensor(ctx)); ready_tuple.push_back(*element[j].AccessTensor(ctx));
} }

View File

@ -36,8 +36,8 @@ class EncodeBase64Op : public OpKernel {
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(), OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor)); &output_tensor));
auto input = input_tensor.flat<string>(); auto input = input_tensor.flat<tstring>();
auto output = output_tensor->flat<string>(); auto output = output_tensor->flat<tstring>();
for (int64 i = 0; i < input.dimension(0); ++i) { for (int64 i = 0; i < input.dimension(0); ++i) {
OP_REQUIRES_OK(context, Base64Encode(input(i), pad_, &output(i))); OP_REQUIRES_OK(context, Base64Encode(input(i), pad_, &output(i)));
@ -61,8 +61,8 @@ class DecodeBase64Op : public OpKernel {
OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(), OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
&output_tensor)); &output_tensor));
auto input = input_tensor.flat<string>(); auto input = input_tensor.flat<tstring>();
auto output = output_tensor->flat<string>(); auto output = output_tensor->flat<tstring>();
for (int64 i = 0; i < input.dimension(0); ++i) { for (int64 i = 0; i < input.dimension(0); ++i) {
OP_REQUIRES_OK(context, Base64Decode(input(i), &output(i))); OP_REQUIRES_OK(context, Base64Decode(input(i), &output(i)));

View File

@ -324,7 +324,7 @@ class BoostedTreesExampleDebugOutputsOp : public OpKernel {
context, context->allocate_output("examples_debug_outputs_serialized", context, context->allocate_output("examples_debug_outputs_serialized",
{batch_size}, &output_debug_info_t)); {batch_size}, &output_debug_info_t));
// Will contain serialized protos, per example. // Will contain serialized protos, per example.
auto output_debug_info = output_debug_info_t->flat<string>(); auto output_debug_info = output_debug_info_t->flat<tstring>();
const int32 last_tree = resource->num_trees() - 1; const int32 last_tree = resource->num_trees() - 1;
// For each given example, traverse through all trees keeping track of the // For each given example, traverse through all trees keeping track of the

View File

@ -51,7 +51,7 @@ class BoostedTreesCreateEnsembleOp : public OpKernel {
std::unique_ptr<BoostedTreesEnsembleResource> result( std::unique_ptr<BoostedTreesEnsembleResource> result(
new BoostedTreesEnsembleResource()); new BoostedTreesEnsembleResource());
if (!result->InitFromSerialized( if (!result->InitFromSerialized(
tree_ensemble_serialized_t->scalar<string>()(), stamp_token)) { tree_ensemble_serialized_t->scalar<tstring>()(), stamp_token)) {
result->Unref(); result->Unref();
OP_REQUIRES( OP_REQUIRES(
context, false, context, false,
@ -152,7 +152,7 @@ class BoostedTreesSerializeEnsembleOp : public OpKernel {
Tensor* output_proto_t = nullptr; Tensor* output_proto_t = nullptr;
OP_REQUIRES_OK(context, OP_REQUIRES_OK(context,
context->allocate_output(1, TensorShape(), &output_proto_t)); context->allocate_output(1, TensorShape(), &output_proto_t));
output_proto_t->scalar<string>()() = output_proto_t->scalar<tstring>()() =
tree_ensemble_resource->SerializeAsString(); tree_ensemble_resource->SerializeAsString();
} }
}; };
@ -187,7 +187,7 @@ class BoostedTreesDeserializeEnsembleOp : public OpKernel {
OP_REQUIRES( OP_REQUIRES(
context, context,
tree_ensemble_resource->InitFromSerialized( tree_ensemble_resource->InitFromSerialized(
tree_ensemble_serialized_t->scalar<string>()(), stamp_token), tree_ensemble_serialized_t->scalar<tstring>()(), stamp_token),
errors::InvalidArgument("Unable to parse tree ensemble proto.")); errors::InvalidArgument("Unable to parse tree ensemble proto."));
} }
}; };

View File

@ -393,7 +393,7 @@ class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel {
OP_REQUIRES_OK( OP_REQUIRES_OK(
context, context->allocate_output("split_with_default_directions", context, context->allocate_output("split_with_default_directions",
{num_nodes}, &output_split_types_t)); {num_nodes}, &output_split_types_t));
auto output_split_types_vec = output_split_types_t->vec<string>(); auto output_split_types_vec = output_split_types_t->vec<tstring>();
// Sets output tensors from vectors. // Sets output tensors from vectors.
for (int i = 0; i < num_nodes; ++i) { for (int i = 0; i < num_nodes; ++i) {
@ -677,7 +677,7 @@ class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel {
OP_REQUIRES_OK( OP_REQUIRES_OK(
context, context->allocate_output("split_with_default_directions", context, context->allocate_output("split_with_default_directions",
{num_nodes}, &output_split_types_t)); {num_nodes}, &output_split_types_t));
auto output_split_types_vec = output_split_types_t->vec<string>(); auto output_split_types_vec = output_split_types_t->vec<tstring>();
// Sets output tensors from vectors. // Sets output tensors from vectors.
for (int i = 0; i < num_nodes; ++i) { for (int i = 0; i < num_nodes; ++i) {

View File

@ -113,7 +113,7 @@ class ConditionalAccumulatorBaseOp : public OpKernel {
// Verify that the shared accumulator is compatible // Verify that the shared accumulator is compatible
// with the requested arguments. // with the requested arguments.
TF_RETURN_IF_ERROR(accumulator->MatchesNodeDef(def())); TF_RETURN_IF_ERROR(accumulator->MatchesNodeDef(def()));
auto h = accumulator_handle_.AccessTensor(ctx)->template flat<string>(); auto h = accumulator_handle_.AccessTensor(ctx)->template flat<tstring>();
h(0) = cinfo_.container(); h(0) = cinfo_.container();
h(1) = cinfo_.name(); h(1) = cinfo_.name();
accumulator_handle_set_ = true; accumulator_handle_set_ = true;

View File

@ -85,7 +85,7 @@ class ResourceConditionalAccumulatorOp : public ConditionalAccumulatorBaseOp {
void SetHandleToOutput(OpKernelContext* ctx) void SetHandleToOutput(OpKernelContext* ctx)
SHARED_LOCKS_REQUIRED(mu_) override { SHARED_LOCKS_REQUIRED(mu_) override {
auto h = accumulator_handle_.AccessTensor(ctx)->template flat<string>(); auto h = accumulator_handle_.AccessTensor(ctx)->template flat<tstring>();
h(0) = cinfo_.container(); h(0) = cinfo_.container();
h(1) = cinfo_.name(); h(1) = cinfo_.name();
OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput( OP_REQUIRES_OK(ctx, MakeResourceHandleToOutput(

View File

@ -40,7 +40,7 @@ void DatasetToGraphOp::Compute(OpKernelContext* ctx) {
ctx, AsGraphDef(ctx, dataset, SerializationContext({}), &graph_def)); ctx, AsGraphDef(ctx, dataset, SerializationContext({}), &graph_def));
Tensor* result; Tensor* result;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &result)); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &result));
result->scalar<string>()() = graph_def.SerializeAsString(); result->scalar<tstring>()() = graph_def.SerializeAsString();
} }
void DatasetCardinalityOp::Compute(OpKernelContext* ctx) { void DatasetCardinalityOp::Compute(OpKernelContext* ctx) {

View File

@ -93,7 +93,7 @@ class CSVDatasetOp : public DatasetOpKernel {
std::vector<string> filenames; std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements()); filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) { for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<string>()(i)); filenames.push_back(filenames_tensor->flat<tstring>()(i));
} }
io::ZlibCompressionOptions zlib_compression_options = io::ZlibCompressionOptions zlib_compression_options =
@ -719,10 +719,10 @@ class CSVDatasetOp : public DatasetOpKernel {
} }
case DT_STRING: { case DT_STRING: {
if (field.empty() || field == dataset()->na_value_) { if (field.empty() || field == dataset()->na_value_) {
component.scalar<string>()() = component.scalar<tstring>()() =
dataset()->record_defaults_[output_idx].flat<string>()(0); dataset()->record_defaults_[output_idx].flat<tstring>()(0);
} else { } else {
component.scalar<string>()() = string(field); component.scalar<tstring>()() = string(field);
} }
break; break;
} }

View File

@ -38,7 +38,7 @@ class LMDBDatasetOp : public DatasetOpKernel {
std::vector<string> filenames; std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements()); filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) { for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<string>()(i)); filenames.push_back(filenames_tensor->flat<tstring>()(i));
} }
*output = new Dataset(ctx, filenames); *output = new Dataset(ctx, filenames);
@ -95,13 +95,13 @@ class LMDBDatasetOp : public DatasetOpKernel {
out_tensors->emplace_back(ctx->allocator({}), DT_STRING, out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({})); TensorShape({}));
Tensor& key_tensor = out_tensors->back(); Tensor& key_tensor = out_tensors->back();
key_tensor.scalar<string>()() = string( key_tensor.scalar<tstring>()() = string(
static_cast<const char*>(mdb_key_.mv_data), mdb_key_.mv_size); static_cast<const char*>(mdb_key_.mv_data), mdb_key_.mv_size);
out_tensors->emplace_back(ctx->allocator({}), DT_STRING, out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({})); TensorShape({}));
Tensor& value_tensor = out_tensors->back(); Tensor& value_tensor = out_tensors->back();
value_tensor.scalar<string>()() = value_tensor.scalar<tstring>()() =
string(static_cast<const char*>(mdb_value_.mv_data), string(static_cast<const char*>(mdb_value_.mv_data),
mdb_value_.mv_size); mdb_value_.mv_size);

View File

@ -42,7 +42,7 @@ class MatchingFilesDatasetOp : public DatasetOpKernel {
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override { void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
const Tensor* patterns_t; const Tensor* patterns_t;
OP_REQUIRES_OK(ctx, ctx->input("patterns", &patterns_t)); OP_REQUIRES_OK(ctx, ctx->input("patterns", &patterns_t));
const auto patterns = patterns_t->flat<string>(); const auto patterns = patterns_t->flat<tstring>();
size_t num_patterns = static_cast<size_t>(patterns.size()); size_t num_patterns = static_cast<size_t>(patterns.size());
std::vector<string> pattern_strs; std::vector<string> pattern_strs;
pattern_strs.reserve(num_patterns); pattern_strs.reserve(num_patterns);
@ -126,7 +126,7 @@ class MatchingFilesDatasetOp : public DatasetOpKernel {
current_path.first.end(), '/', '\\'); current_path.first.end(), '/', '\\');
} }
filepath_tensor.scalar<string>()() = filepath_tensor.scalar<tstring>()() =
std::move(current_path.first); std::move(current_path.first);
out_tensors->emplace_back(std::move(filepath_tensor)); out_tensors->emplace_back(std::move(filepath_tensor));
*end_of_sequence = false; *end_of_sequence = false;

View File

@ -42,7 +42,7 @@ class IteratorGetDeviceOp : public OpKernel {
// NOTE(mrry): Since the operation's input is a resource, we must be // NOTE(mrry): Since the operation's input is a resource, we must be
// colocated with it, and so we can simply return the current device's // colocated with it, and so we can simply return the current device's
// name without looking at the input. // name without looking at the input.
device_name_t->scalar<string>()() = ctx->device()->name(); device_name_t->scalar<tstring>()() = ctx->device()->name();
} }
}; };

View File

@ -267,7 +267,7 @@ class StatsAggregatorSummaryOp : public OpKernel {
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &summary_t)); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &summary_t));
Summary summary; Summary summary;
resource->stats_aggregator()->EncodeToProto(&summary); resource->stats_aggregator()->EncodeToProto(&summary);
summary_t->scalar<string>()() = summary.SerializeAsString(); summary_t->scalar<tstring>()() = summary.SerializeAsString();
} }
}; };

View File

@ -122,7 +122,7 @@ class ToTFRecordOp : public AsyncOpKernel {
if (!end_of_sequence) { if (!end_of_sequence) {
OP_REQUIRES_OK_ASYNC( OP_REQUIRES_OK_ASYNC(
ctx, writer->WriteRecord(components[0].scalar<string>()()), ctx, writer->WriteRecord(components[0].scalar<tstring>()()),
done); done);
} }
components.clear(); components.clear();

View File

@ -171,7 +171,7 @@ class UniqueDatasetOp : public UnaryDatasetOpKernel {
return Hash64(t.tensor_data().data(), t.tensor_data().size()); return Hash64(t.tensor_data().data(), t.tensor_data().size());
} else { } else {
DCHECK_EQ(DT_STRING, t.dtype()); DCHECK_EQ(DT_STRING, t.dtype());
auto flat_t = t.flat<string>(); auto flat_t = t.flat<tstring>();
uint64 hash = 0; uint64 hash = 0;
for (int64 i = 0; i < t.NumElements(); ++i) { for (int64 i = 0; i < t.NumElements(); ++i) {
hash = Hash64Combine(hash, Hash64(flat_t(i))); hash = Hash64Combine(hash, Hash64(flat_t(i)));

View File

@ -141,7 +141,7 @@ class FixedLengthRecordDatasetOp::Dataset : public DatasetBase {
// Produce the record as output. // Produce the record as output.
Tensor record_tensor(ctx->allocator({}), DT_STRING, {}); Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<string>()() = record; record_tensor.scalar<tstring>()() = record;
out_tensors->emplace_back(std::move(record_tensor)); out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false; *end_of_sequence = false;
return Status::OK(); return Status::OK();
@ -264,7 +264,7 @@ class FixedLengthRecordDatasetOp::Dataset : public DatasetBase {
// Produce the record as output. // Produce the record as output.
Tensor record_tensor(ctx->allocator({}), DT_STRING, {}); Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<string>()() = std::move(record); record_tensor.scalar<tstring>()() = std::move(record);
out_tensors->emplace_back(std::move(record_tensor)); out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false; *end_of_sequence = false;
return Status::OK(); return Status::OK();
@ -282,7 +282,7 @@ class FixedLengthRecordDatasetOp::Dataset : public DatasetBase {
lookahead_cache_.substr(dataset()->record_bytes_); lookahead_cache_.substr(dataset()->record_bytes_);
// Produce the record as output. // Produce the record as output.
Tensor record_tensor(ctx->allocator({}), DT_STRING, {}); Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<string>()() = std::move(record); record_tensor.scalar<tstring>()() = std::move(record);
out_tensors->emplace_back(std::move(record_tensor)); out_tensors->emplace_back(std::move(record_tensor));
*end_of_sequence = false; *end_of_sequence = false;
return Status::OK(); return Status::OK();
@ -459,7 +459,7 @@ void FixedLengthRecordDatasetOp::MakeDataset(OpKernelContext* ctx,
std::vector<string> filenames; std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements()); filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) { for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<string>()(i)); filenames.push_back(filenames_tensor->flat<tstring>()(i));
} }
int64 header_bytes = -1; int64 header_bytes = -1;

View File

@ -1002,7 +1002,7 @@ void IteratorToStringHandleOp::Compute(OpKernelContext* ctx) {
Tensor* string_handle_t; Tensor* string_handle_t;
OP_REQUIRES_OK(ctx, OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &string_handle_t)); ctx->allocate_output(0, TensorShape({}), &string_handle_t));
string_handle_t->scalar<string>()() = string_handle_t->scalar<tstring>()() =
resource_handle_t.scalar<ResourceHandle>()().SerializeAsString(); resource_handle_t.scalar<ResourceHandle>()().SerializeAsString();
} }
@ -1026,7 +1026,7 @@ void IteratorFromStringHandleOp::Compute(OpKernelContext* ctx) {
ResourceHandle resource_handle; ResourceHandle resource_handle;
OP_REQUIRES( OP_REQUIRES(
ctx, resource_handle.ParseFromString(string_handle_t.scalar<string>()()), ctx, resource_handle.ParseFromString(string_handle_t.scalar<tstring>()()),
errors::InvalidArgument( errors::InvalidArgument(
"Could not parse string_handle as a valid ResourceHandle")); "Could not parse string_handle as a valid ResourceHandle"));

View File

@ -644,7 +644,7 @@ class MultiDeviceIteratorToStringHandleOp : public OpKernel {
Tensor* string_handle_t; Tensor* string_handle_t;
OP_REQUIRES_OK(ctx, OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &string_handle_t)); ctx->allocate_output(0, TensorShape({}), &string_handle_t));
string_handle_t->scalar<string>()() = string_handle_t->scalar<tstring>()() =
resource_handle_t.scalar<ResourceHandle>()().SerializeAsString(); resource_handle_t.scalar<ResourceHandle>()().SerializeAsString();
} }
}; };
@ -675,7 +675,7 @@ class MultiDeviceIteratorFromStringHandleOp : public OpKernel {
ResourceHandle resource_handle; ResourceHandle resource_handle;
OP_REQUIRES( OP_REQUIRES(
ctx, ctx,
resource_handle.ParseFromString(string_handle_t.scalar<string>()()), resource_handle.ParseFromString(string_handle_t.scalar<tstring>()()),
errors::InvalidArgument( errors::InvalidArgument(
"Could not parse string_handle as a valid ResourceHandle")); "Could not parse string_handle as a valid ResourceHandle"));

View File

@ -108,7 +108,7 @@ class TextLineDatasetOp::Dataset : public DatasetBase {
line_contents.size()); line_contents.size());
out_tensors->emplace_back(ctx->allocator({}), DT_STRING, out_tensors->emplace_back(ctx->allocator({}), DT_STRING,
TensorShape({})); TensorShape({}));
out_tensors->back().scalar<string>()() = std::move(line_contents); out_tensors->back().scalar<tstring>()() = std::move(line_contents);
*end_of_sequence = false; *end_of_sequence = false;
return Status::OK(); return Status::OK();
} else if (!errors::IsOutOfRange(s)) { } else if (!errors::IsOutOfRange(s)) {
@ -266,7 +266,7 @@ void TextLineDatasetOp::MakeDataset(OpKernelContext* ctx,
std::vector<string> filenames; std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements()); filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) { for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
filenames.push_back(filenames_tensor->flat<string>()(i)); filenames.push_back(filenames_tensor->flat<tstring>()(i));
} }
*output = new Dataset(ctx, std::move(filenames), compression_type, *output = new Dataset(ctx, std::move(filenames), compression_type,

View File

@ -108,7 +108,7 @@ class TFRecordDatasetOp::Dataset : public DatasetBase {
reader_->ReadRecord(&out_tensors->back().scalar<string>()()); reader_->ReadRecord(&out_tensors->back().scalar<string>()());
if (s.ok()) { if (s.ok()) {
metrics::RecordTFDataBytesRead( metrics::RecordTFDataBytesRead(
kDatasetType, out_tensors->back().scalar<string>()().size()); kDatasetType, out_tensors->back().scalar<tstring>()().size());
*end_of_sequence = false; *end_of_sequence = false;
return Status::OK(); return Status::OK();
} }
@ -224,8 +224,8 @@ void TFRecordDatasetOp::MakeDataset(OpKernelContext* ctx,
std::vector<string> filenames; std::vector<string> filenames;
filenames.reserve(filenames_tensor->NumElements()); filenames.reserve(filenames_tensor->NumElements());
for (int i = 0; i < filenames_tensor->NumElements(); ++i) { for (int i = 0; i < filenames_tensor->NumElements(); ++i) {
VLOG(2) << "Reading file: " << filenames_tensor->flat<string>()(i); VLOG(2) << "Reading file: " << filenames_tensor->flat<tstring>()(i);
filenames.push_back(filenames_tensor->flat<string>()(i)); filenames.push_back(filenames_tensor->flat<tstring>()(i));
} }
string compression_type; string compression_type;

View File

@ -54,7 +54,7 @@ class DecodeBmpOp : public OpKernel {
contents.shape().DebugString())); contents.shape().DebugString()));
// Start decoding image to get shape details // Start decoding image to get shape details
const StringPiece input = contents.scalar<string>()(); const StringPiece input = contents.scalar<tstring>()();
OP_REQUIRES(context, (32 <= input.size()), OP_REQUIRES(context, (32 <= input.size()),
errors::InvalidArgument("Incomplete bmp content, requires at " errors::InvalidArgument("Incomplete bmp content, requires at "

View File

@ -84,13 +84,13 @@ class DecodeCompressedOp : public OpKernel {
void Compute(OpKernelContext* context) override { void Compute(OpKernelContext* context) override {
const Tensor* bytes_tensor; const Tensor* bytes_tensor;
OP_REQUIRES_OK(context, context->input("bytes", &bytes_tensor)); OP_REQUIRES_OK(context, context->input("bytes", &bytes_tensor));
const auto& bytes_flat = bytes_tensor->flat<string>(); const auto& bytes_flat = bytes_tensor->flat<tstring>();
Tensor* output_tensor = nullptr; Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(context, OP_REQUIRES_OK(context,
context->allocate_output("output", bytes_tensor->shape(), context->allocate_output("output", bytes_tensor->shape(),
&output_tensor)); &output_tensor));
auto output_flat = output_tensor->flat<string>(); auto output_flat = output_tensor->flat<tstring>();
if (compression_type_.empty()) { if (compression_type_.empty()) {
for (int64 i = 0; i < bytes_flat.size(); i++) { for (int64 i = 0; i < bytes_flat.size(); i++) {
output_flat(i) = bytes_flat(i); output_flat(i) = bytes_flat(i);

View File

@ -70,7 +70,7 @@ class DecodeCSVOp : public OpKernel {
" has ", record_defaults[i].NumElements())); " has ", record_defaults[i].NumElements()));
} }
auto records_t = records->flat<string>(); auto records_t = records->flat<tstring>();
int64 records_size = records_t.size(); int64 records_size = records_t.size();
OpOutputList output; OpOutputList output;
@ -181,10 +181,10 @@ class DecodeCSVOp : public OpKernel {
errors::InvalidArgument( errors::InvalidArgument(
"Field ", f, "Field ", f,
" is required but missing in record ", i, "!")); " is required but missing in record ", i, "!"));
output[f]->flat<string>()(i) = output[f]->flat<tstring>()(i) =
record_defaults[f].flat<string>()(0); record_defaults[f].flat<tstring>()(0);
} else { } else {
output[f]->flat<string>()(i) = fields[f]; output[f]->flat<tstring>()(i) = fields[f];
} }
break; break;
} }

View File

@ -154,7 +154,7 @@ class DecodeImageOp : public OpKernel {
contents.shape().DebugString())); contents.shape().DebugString()));
// Determine format // Determine format
const StringPiece input = contents.scalar<string>()(); const StringPiece input = contents.scalar<tstring>()();
const auto magic = ClassifyFileFormat(input); const auto magic = ClassifyFileFormat(input);
OP_REQUIRES( OP_REQUIRES(
context, context,

View File

@ -39,7 +39,7 @@ class DecodePaddedRawOp : public OpKernel {
void Compute(OpKernelContext* context) override { void Compute(OpKernelContext* context) override {
const auto& input = context->input(0); const auto& input = context->input(0);
auto flat_in = input.flat<string>(); auto flat_in = input.flat<tstring>();
int fixed_length; int fixed_length;
const auto& length_input = context->input(1); const auto& length_input = context->input(1);

View File

@ -748,14 +748,14 @@ class DecodeProtoOp : public OpKernel {
if (is_binary_ && !sanitize_) { if (is_binary_ && !sanitize_) {
// Fast path. // Fast path.
for (int mi = 0; mi < message_count; ++mi) { for (int mi = 0; mi < message_count; ++mi) {
const string* buf = &buf_tensor.flat<string>()(mi); const tstring* buf = &buf_tensor.flat<tstring>()(mi);
bufs.push_back(buf); bufs.push_back(buf);
} }
} else { } else {
// We will have to allocate a copy, either to convert from text to binary // We will have to allocate a copy, either to convert from text to binary
// or to sanitize a binary proto. // or to sanitize a binary proto.
for (int mi = 0; mi < message_count; ++mi) { for (int mi = 0; mi < message_count; ++mi) {
ReserializeMessage(ctx, buf_tensor.flat<string>()(mi), ReserializeMessage(ctx, buf_tensor.flat<tstring>()(mi),
&tmp_binary_bufs[mi]); &tmp_binary_bufs[mi]);
if (!ctx->status().ok()) { if (!ctx->status().ok()) {
return; return;
@ -895,8 +895,8 @@ class DecodeProtoOp : public OpKernel {
data = tensor->bit_casted_shaped<uint8, 1>(flatshape).data(); data = tensor->bit_casted_shaped<uint8, 1>(flatshape).data();
} else { } else {
// DataTypeSize() returns 0 for string types. // DataTypeSize() returns 0 for string types.
stride = last_dim_size * sizeof(string); stride = last_dim_size * sizeof(tstring);
data = reinterpret_cast<uint8*>(tensor->flat<string>().data()); data = reinterpret_cast<uint8*>(tensor->flat<tstring>().data());
} }
} }

View File

@ -41,7 +41,7 @@ class DecodeRawOp : public OpKernel {
void Compute(OpKernelContext* context) override { void Compute(OpKernelContext* context) override {
const auto& input = context->input(0); const auto& input = context->input(0);
int64 str_size = -1; int64 str_size = -1;
auto flat_in = input.flat<string>(); auto flat_in = input.flat<tstring>();
for (int64 i = 0; i < flat_in.size(); ++i) { for (int64 i = 0; i < flat_in.size(); ++i) {
const string& in_str = flat_in(i); const string& in_str = flat_in(i);
if (str_size == -1) { if (str_size == -1) {

View File

@ -40,7 +40,7 @@ class DecodeWavOp : public OpKernel {
OP_REQUIRES(context, TensorShapeUtils::IsScalar(contents.shape()), OP_REQUIRES(context, TensorShapeUtils::IsScalar(contents.shape()),
errors::InvalidArgument("contents must be scalar, got shape ", errors::InvalidArgument("contents must be scalar, got shape ",
contents.shape().DebugString())); contents.shape().DebugString()));
const string wav_string = contents.scalar<string>()(); const string wav_string = contents.scalar<tstring>()();
OP_REQUIRES(context, wav_string.size() <= std::numeric_limits<int>::max(), OP_REQUIRES(context, wav_string.size() <= std::numeric_limits<int>::max(),
errors::InvalidArgument("WAV contents are too large for int: ", errors::InvalidArgument("WAV contents are too large for int: ",
wav_string.size())); wav_string.size()));

View File

@ -75,7 +75,7 @@ class DeserializeSparseOp : public OpKernel {
if (num_sparse_tensors == 1 && ndims == 1) { if (num_sparse_tensors == 1 && ndims == 1) {
// Special case with a single sparse tensor. We can avoid data // Special case with a single sparse tensor. We can avoid data
// motion in the Concat and Reshape. // motion in the Concat and Reshape.
const auto& serialized_sparse_t = serialized_sparse.vec<string>(); const auto& serialized_sparse_t = serialized_sparse.vec<tstring>();
Tensor output_indices; Tensor output_indices;
Tensor output_values; Tensor output_values;
@ -98,7 +98,7 @@ class DeserializeSparseOp : public OpKernel {
values.reserve(num_sparse_tensors); values.reserve(num_sparse_tensors);
const auto& serialized_sparse_t = const auto& serialized_sparse_t =
serialized_sparse.flat_inner_dims<string, 2>(); serialized_sparse.flat_inner_dims<tstring, 2>();
for (int i = 0; i < num_sparse_tensors; ++i) { for (int i = 0; i < num_sparse_tensors; ++i) {
Tensor output_indices; Tensor output_indices;
Tensor output_values; Tensor output_values;

View File

@ -303,7 +303,7 @@ Status WriteVarLenField(const FieldDescriptor& field_desc, const Tensor& input,
// code it ourselves. // code it ourselves.
Status WriteGroup(const FieldDescriptor& field_desc, const Tensor& input, Status WriteGroup(const FieldDescriptor& field_desc, const Tensor& input,
int message_index, int size, CodedOutputStream* output) { int message_index, int size, CodedOutputStream* output) {
auto input_t = input.flat_inner_dims<string>(); auto input_t = input.flat_inner_dims<tstring>();
for (int64 i = 0; i < size; i++) { for (int64 i = 0; i < size; i++) {
const string& value = input_t(static_cast<int64>(message_index), i); const string& value = input_t(static_cast<int64>(message_index), i);
WireFormatLite::WriteTag(field_desc.number(), WireFormatLite::WriteTag(field_desc.number(),
@ -587,7 +587,7 @@ class EncodeProtoOp : public OpKernel {
Tensor* output_tensor; Tensor* output_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, common_prefix, &output_tensor)); OP_REQUIRES_OK(ctx, ctx->allocate_output(0, common_prefix, &output_tensor));
auto bufs = output_tensor->flat<string>(); auto bufs = output_tensor->flat<tstring>();
for (int message_index = 0; message_index < message_count; for (int message_index = 0; message_index < message_count;
message_index++) { message_index++) {
// TODO(nix): possibly optimize allocation here by calling // TODO(nix): possibly optimize allocation here by calling

View File

@ -63,10 +63,10 @@ class ParseExampleOp : public OpKernel {
// Copy from OpInputList to std::vector<string>. // Copy from OpInputList to std::vector<string>.
for (int di = 0; di < attrs_.num_dense; ++di) { for (int di = 0; di < attrs_.num_dense; ++di) {
dense_keys_t[di] = dense_keys[di].scalar<string>()(); dense_keys_t[di] = dense_keys[di].scalar<tstring>()();
} }
for (int di = 0; di < attrs_.num_sparse; ++di) { for (int di = 0; di < attrs_.num_sparse; ++di) {
sparse_keys_t[di] = sparse_keys[di].scalar<string>()(); sparse_keys_t[di] = sparse_keys[di].scalar<tstring>()();
} }
if (names->NumElements() > 0) { if (names->NumElements() > 0) {
@ -234,7 +234,7 @@ class ParseSingleExampleOp : public OpKernel {
config.sparse.push_back({attrs_.sparse_keys[d], attrs_.sparse_types[d]}); config.sparse.push_back({attrs_.sparse_keys[d], attrs_.sparse_types[d]});
} }
const string& serialized_proto = serialized->scalar<string>()(); const string& serialized_proto = serialized->scalar<tstring>()();
OP_REQUIRES_OK(ctx, OP_REQUIRES_OK(ctx,
FastParseSingleExample(config, serialized_proto, &result)); FastParseSingleExample(config, serialized_proto, &result));
@ -473,7 +473,7 @@ class ParseSingleSequenceExampleOp : public OpKernel {
"Expected context_dense_keys[", di, "Expected context_dense_keys[", di,
"] to be a scalar, got shape: ", "] to be a scalar, got shape: ",
context_dense_keys[di].shape().DebugString())); context_dense_keys[di].shape().DebugString()));
context_dense_keys_t[di] = context_dense_keys[di].scalar<string>()(); context_dense_keys_t[di] = context_dense_keys[di].scalar<tstring>()();
} }
for (int di = 0; di < attrs_.num_context_sparse; ++di) { for (int di = 0; di < attrs_.num_context_sparse; ++di) {
OP_REQUIRES(ctx, OP_REQUIRES(ctx,
@ -482,7 +482,7 @@ class ParseSingleSequenceExampleOp : public OpKernel {
"Expected context_sparse_keys[", di, "Expected context_sparse_keys[", di,
"] to be a scalar, got shape: ", "] to be a scalar, got shape: ",
context_sparse_keys[di].shape().DebugString())); context_sparse_keys[di].shape().DebugString()));
context_sparse_keys_t[di] = context_sparse_keys[di].scalar<string>()(); context_sparse_keys_t[di] = context_sparse_keys[di].scalar<tstring>()();
} }
for (int di = 0; di < attrs_.num_feature_list_dense; ++di) { for (int di = 0; di < attrs_.num_feature_list_dense; ++di) {
OP_REQUIRES( OP_REQUIRES(
@ -492,7 +492,7 @@ class ParseSingleSequenceExampleOp : public OpKernel {
"] to be a scalar, got shape: ", "] to be a scalar, got shape: ",
feature_list_dense_keys[di].shape().DebugString())); feature_list_dense_keys[di].shape().DebugString()));
feature_list_dense_keys_t[di] = feature_list_dense_keys_t[di] =
feature_list_dense_keys[di].scalar<string>()(); feature_list_dense_keys[di].scalar<tstring>()();
} }
for (int di = 0; di < attrs_.num_feature_list_sparse; ++di) { for (int di = 0; di < attrs_.num_feature_list_sparse; ++di) {
OP_REQUIRES( OP_REQUIRES(
@ -502,7 +502,7 @@ class ParseSingleSequenceExampleOp : public OpKernel {
"] to be a scalar, got shape: ", "] to be a scalar, got shape: ",
feature_list_sparse_keys[di].shape().DebugString())); feature_list_sparse_keys[di].shape().DebugString()));
feature_list_sparse_keys_t[di] = feature_list_sparse_keys_t[di] =
feature_list_sparse_keys[di].scalar<string>()(); feature_list_sparse_keys[di].scalar<tstring>()();
} }
OP_REQUIRES( OP_REQUIRES(
ctx, ctx,
@ -513,7 +513,7 @@ class ParseSingleSequenceExampleOp : public OpKernel {
"to be a vector, got shape: ", "to be a vector, got shape: ",
feature_list_dense_missing_assumed_empty->shape().DebugString())); feature_list_dense_missing_assumed_empty->shape().DebugString()));
auto feature_list_dense_missing_assumped_empty_t = auto feature_list_dense_missing_assumped_empty_t =
feature_list_dense_missing_assumed_empty->vec<string>(); feature_list_dense_missing_assumed_empty->vec<tstring>();
for (int de = 0; for (int de = 0;
de < feature_list_dense_missing_assumed_empty->NumElements(); ++de) { de < feature_list_dense_missing_assumed_empty->NumElements(); ++de) {
feature_list_dense_missing_assumed_empty_set.insert( feature_list_dense_missing_assumed_empty_set.insert(
@ -527,7 +527,7 @@ class ParseSingleSequenceExampleOp : public OpKernel {
"Expected debug_name to be a scalar, got shape: ", "Expected debug_name to be a scalar, got shape: ",
debug_name->shape().DebugString())); debug_name->shape().DebugString()));
} }
auto debug_name_t = debug_name->scalar<string>(); auto debug_name_t = debug_name->scalar<tstring>();
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(serialized->shape()), OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(serialized->shape()),
errors::InvalidArgument( errors::InvalidArgument(
@ -561,7 +561,7 @@ class ParseSingleSequenceExampleOp : public OpKernel {
} }
} }
auto serialized_t = serialized->scalar<string>(); auto serialized_t = serialized->scalar<tstring>();
OpOutputList context_sparse_indices; OpOutputList context_sparse_indices;
OpOutputList context_sparse_values; OpOutputList context_sparse_values;

View File

@ -114,7 +114,7 @@ struct ExampleStore {
Example example; Example example;
Filler fill; Filler fill;
Tensor record_string(DT_STRING, TensorShape({batch_size})); Tensor record_string(DT_STRING, TensorShape({batch_size}));
auto string_t = record_string.vec<string>(); auto string_t = record_string.vec<tstring>();
example.Clear(); example.Clear();
for (int b = 0; b < batch_size; ++b) { for (int b = 0; b < batch_size; ++b) {
for (int k = 0; k < num_keys; ++k) { for (int k = 0; k < num_keys; ++k) {
@ -163,7 +163,7 @@ static Graph* ParseExample(int batch_size, int num_keys, int feature_size) {
Options opt; Options opt;
for (int i = 0; i < num_keys; ++i) { for (int i = 0; i < num_keys; ++i) {
Tensor key(DT_STRING, TensorShape()); Tensor key(DT_STRING, TensorShape());
key.scalar<string>()() = strings::Printf("feature_%d", i); key.scalar<tstring>()() = strings::Printf("feature_%d", i);
switch (opt.benchmark_type) { switch (opt.benchmark_type) {
case kDense: case kDense:
dense_keys.emplace_back(test::graph::Constant(g, key)); dense_keys.emplace_back(test::graph::Constant(g, key));
@ -205,7 +205,7 @@ static Graph* ParseSingleExample(int num_keys, int feature_size) {
Options::Store::GetSerializedExample()[std::make_tuple(1, num_keys, Options::Store::GetSerializedExample()[std::make_tuple(1, num_keys,
feature_size)]; feature_size)];
Tensor serialized(DT_STRING, TensorShape()); Tensor serialized(DT_STRING, TensorShape());
serialized.scalar<string>()() = serialized_batch_1.vec<string>()(0); serialized.scalar<tstring>()() = serialized_batch_1.vec<tstring>()(0);
std::vector<string> sparse_keys; std::vector<string> sparse_keys;
std::vector<string> dense_keys; std::vector<string> dense_keys;

View File

@ -41,7 +41,7 @@ class ExtractJpegShapeOp : public OpKernel {
OP_REQUIRES(context, TensorShapeUtils::IsScalar(contents.shape()), OP_REQUIRES(context, TensorShapeUtils::IsScalar(contents.shape()),
errors::InvalidArgument("contents must be scalar, got shape ", errors::InvalidArgument("contents must be scalar, got shape ",
contents.shape().DebugString())); contents.shape().DebugString()));
const StringPiece input = contents.scalar<string>()(); const StringPiece input = contents.scalar<tstring>()();
OP_REQUIRES(context, input.size() <= std::numeric_limits<int>::max(), OP_REQUIRES(context, input.size() <= std::numeric_limits<int>::max(),
errors::InvalidArgument("JPEG contents are too large for int: ", errors::InvalidArgument("JPEG contents are too large for int: ",
input.size())); input.size()));

View File

@ -85,7 +85,7 @@ class FactOpKernel : public OpKernel {
Tensor* output_tensor = nullptr; Tensor* output_tensor = nullptr;
OP_REQUIRES_OK( OP_REQUIRES_OK(
context, context->allocate_output(0, TensorShape({}), &output_tensor)); context, context->allocate_output(0, TensorShape({}), &output_tensor));
auto output = output_tensor->template scalar<string>(); auto output = output_tensor->template scalar<tstring>();
string coded = facts[context->env()->NowMicros() % count]; string coded = facts[context->env()->NowMicros() % count];
E(&coded); E(&coded);

View File

@ -110,14 +110,14 @@ class FingerprintOp : public OpKernel {
// and each row contains the fingerprint value of corresponding string. // and each row contains the fingerprint value of corresponding string.
// To compute fingerprints of multiple strings, this op fingerprints the // To compute fingerprints of multiple strings, this op fingerprints the
// buffer containing the string fingerprints. // buffer containing the string fingerprints.
FarmhashFingerprint64(input.flat<string>(), temp.tensor<uint8, 2>()); FarmhashFingerprint64(input.flat<tstring>(), temp.tensor<uint8, 2>());
FarmhashFingerprint64(static_cast<const Tensor&>(temp).shaped<uint8, 2>( FarmhashFingerprint64(static_cast<const Tensor&>(temp).shaped<uint8, 2>(
{dim0, dim1 * kFingerprintSize}), {dim0, dim1 * kFingerprintSize}),
output->matrix<uint8>()); output->matrix<uint8>());
} else { } else {
// In case dim1 == 1, each string computes into its own fingerprint // In case dim1 == 1, each string computes into its own fingerprint
// value. There is no need to fingerprint twice. // value. There is no need to fingerprint twice.
FarmhashFingerprint64(input.flat<string>(), output->matrix<uint8>()); FarmhashFingerprint64(input.flat<tstring>(), output->matrix<uint8>());
} }
} else { } else {
auto data = input.bit_casted_shaped<uint8, 2>( auto data = input.bit_casted_shaped<uint8, 2>(

View File

@ -51,7 +51,7 @@ class FingerprintOpTest : public OpsTestBase {
inputs_.push_back(TensorValue(data)); inputs_.push_back(TensorValue(data));
method_ = Tensor(DT_STRING, TensorShape{}); method_ = Tensor(DT_STRING, TensorShape{});
method_.scalar<string>()() = method; method_.scalar<tstring>()() = method;
inputs_.push_back(TensorValue(&method_)); inputs_.push_back(TensorValue(&method_));
return Status::OK(); return Status::OK();
} }
@ -77,7 +77,7 @@ TEST_F(FingerprintOpTest, GoldenValue) {
// special-case handling. // special-case handling.
TEST_F(FingerprintOpTest, StringGoldenValue) { TEST_F(FingerprintOpTest, StringGoldenValue) {
Tensor data(DT_STRING, {1, 2, 2}); Tensor data(DT_STRING, {1, 2, 2});
auto buffer = data.flat<string>(); auto buffer = data.flat<tstring>();
buffer(0).resize(10); buffer(0).resize(10);
buffer(1).resize(7); buffer(1).resize(7);
buffer(2).resize(0); buffer(2).resize(0);
@ -134,7 +134,7 @@ TEST_F(FingerprintOpTest, CollisionString) {
constexpr int64 size = 256; constexpr int64 size = 256;
Tensor tensor(DT_STRING, {1}); Tensor tensor(DT_STRING, {1});
auto& input = tensor.vec<string>()(0); auto& input = tensor.vec<tstring>()(0);
input.resize(size); input.resize(size);
TTypes<uint8>::UnalignedFlat buffer(reinterpret_cast<uint8*>(&*input.begin()), TTypes<uint8>::UnalignedFlat buffer(reinterpret_cast<uint8*>(&*input.begin()),
@ -163,7 +163,7 @@ TEST_F(FingerprintOpTest, CompareBytesAndString) {
auto pods = pods_tensor.matrix<float>(); auto pods = pods_tensor.matrix<float>();
pods.setRandom(); pods.setRandom();
auto strings = strings_tensor.vec<string>(); auto strings = strings_tensor.vec<tstring>();
for (int64 i = 0; i < strings.size(); ++i) { for (int64 i = 0; i < strings.size(); ++i) {
strings(i).assign(reinterpret_cast<const char*>(&pods(i, 0)), strings(i).assign(reinterpret_cast<const char*>(&pods(i, 0)),
pods.dimension(1) * sizeof(pods(i, 0))); pods.dimension(1) * sizeof(pods(i, 0)));
@ -199,7 +199,7 @@ TEST(FingerprintOpShapeFnTest, MethodKnownStatically) {
ShapeInferenceTestOp op("Fingerprint"); ShapeInferenceTestOp op("Fingerprint");
Tensor method(DT_STRING, TensorShape{}); Tensor method(DT_STRING, TensorShape{});
method.scalar<string>()() = "farmhash64"; method.scalar<tstring>()() = "farmhash64";
op.input_tensors.assign({nullptr, &method}); op.input_tensors.assign({nullptr, &method});
TF_ASSERT_OK(MakeNodeDef(DT_UINT8, &op.node_def)); TF_ASSERT_OK(MakeNodeDef(DT_UINT8, &op.node_def));
@ -229,12 +229,12 @@ TEST(FingerprintOpShapeFnTest, InvalidMethod) {
// When `method` shape is unknown statically. // When `method` shape is unknown statically.
Tensor method(DT_STRING, TensorShape{1}); Tensor method(DT_STRING, TensorShape{1});
method.vec<string>()(0) = "farmhash64"; method.vec<tstring>()(0) = "farmhash64";
op.input_tensors.assign({nullptr, &method}); op.input_tensors.assign({nullptr, &method});
INFER_ERROR("must be rank 0", op, "?;?"); INFER_ERROR("must be rank 0", op, "?;?");
method = Tensor(DT_STRING, TensorShape{}); method = Tensor(DT_STRING, TensorShape{});
method.scalar<string>()() = "unsupported_method"; method.scalar<tstring>()() = "unsupported_method";
op.input_tensors.assign({nullptr, &method}); op.input_tensors.assign({nullptr, &method});
INFER_ERROR("unsupported_method", op, "?;?"); INFER_ERROR("unsupported_method", op, "?;?");
} }

View File

@ -318,7 +318,7 @@ void RemoteCallOp::ComputeAsync(OpKernelContext* ctx, DoneCallback done) {
string target_device; string target_device;
OP_REQUIRES_OK_ASYNC( OP_REQUIRES_OK_ASYNC(
ctx, ctx,
DeviceNameUtils::CanonicalizeDeviceName(target->scalar<string>()(), DeviceNameUtils::CanonicalizeDeviceName(target->scalar<tstring>()(),
source_device, &target_device), source_device, &target_device),
done); done);

View File

@ -82,7 +82,7 @@ Status ToBool(gtl::ArraySlice<Tensor> t, bool* v) {
*v = t[0].scalar<bool>()(); *v = t[0].scalar<bool>()();
break; break;
case DT_STRING: case DT_STRING:
*v = !t[0].scalar<string>()().empty(); *v = !t[0].scalar<tstring>()().empty();
break; break;
default: default:
return errors::InvalidArgument(DataTypeString(t[0].dtype()), return errors::InvalidArgument(DataTypeString(t[0].dtype()),

View File

@ -51,7 +51,7 @@ class FuzzExampleProtoFastParsing : public FuzzSession {
void FuzzImpl(const uint8_t* data, size_t size) final { void FuzzImpl(const uint8_t* data, size_t size) final {
// TODO(dga): Test the batch case also. // TODO(dga): Test the batch case also.
Tensor input_tensor(tensorflow::DT_STRING, TensorShape({})); Tensor input_tensor(tensorflow::DT_STRING, TensorShape({}));
input_tensor.scalar<string>()() = input_tensor.scalar<tstring>()() =
string(reinterpret_cast<const char*>(data), size); string(reinterpret_cast<const char*>(data), size);
RunInputs({{"input", input_tensor}}); RunInputs({{"input", input_tensor}});
} }

Some files were not shown because too many files have changed in this diff Show More