Migrated a subset of kernels to use tstring.
Added SerializeToTString helper function. This is a part of a larger migration effort for tensorflow::tstring. See: https://github.com/tensorflow/community/pull/91 PiperOrigin-RevId: 263363492
This commit is contained in:
parent
a4e610db48
commit
5cc5d2a5da
@ -167,11 +167,11 @@ TEST_F(DebugIdentityOpTest, Int32Success_2_3) {
|
||||
|
||||
TEST_F(DebugIdentityOpTest, StringSuccess) {
|
||||
TF_ASSERT_OK(Init(DT_STRING));
|
||||
AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
|
||||
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
|
||||
test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
// Tests for DebugNanCountOp
|
||||
|
@ -552,7 +552,7 @@ class DenseCollector {
|
||||
case DataType::DT_INT64:
|
||||
return FillDefault<int64>(default_value_.value.v_int64);
|
||||
case DataType::DT_STRING:
|
||||
return FillDefault<string>(default_value_.value.v_string);
|
||||
return FillDefault<tstring>(default_value_.value.v_string);
|
||||
case DataType::DT_UINT8:
|
||||
return FillDefault<uint8>(default_value_.value.v_uint8);
|
||||
case DataType::DT_UINT32:
|
||||
@ -738,12 +738,12 @@ class DecodeProtoOp : public OpKernel {
|
||||
|
||||
// This is used to allocate binary bufs if used. It serves only to define
|
||||
// memory ownership.
|
||||
std::vector<string> tmp_binary_bufs(message_count);
|
||||
std::vector<tstring> tmp_binary_bufs(message_count);
|
||||
|
||||
// These are the actual buffers to use, which may be in tmp_binary_bufs
|
||||
// or may be pointers into the buf_tensor. Either way they are not owned
|
||||
// here.
|
||||
std::vector<const string*> bufs;
|
||||
std::vector<const tstring*> bufs;
|
||||
|
||||
if (is_binary_ && !sanitize_) {
|
||||
// Fast path.
|
||||
@ -808,7 +808,7 @@ class DecodeProtoOp : public OpKernel {
|
||||
private:
|
||||
// Copy a serialized message to binary, e.g. to handle text proto inputs.
|
||||
void ReserializeMessage(OpKernelContext* ctx, const string& buf,
|
||||
string* binary_buf) {
|
||||
tstring* binary_buf) {
|
||||
// Handle text protos by translating them to binary.
|
||||
std::unique_ptr<Message> message(message_prototype_->New());
|
||||
OP_REQUIRES(ctx, message, errors::DataLoss("Initializing message failed"));
|
||||
@ -823,7 +823,7 @@ class DecodeProtoOp : public OpKernel {
|
||||
errors::DataLoss("Unable to parse text protobuf"));
|
||||
}
|
||||
|
||||
OP_REQUIRES(ctx, message->SerializeToString(binary_buf),
|
||||
OP_REQUIRES(ctx, SerializeToTString(*message, binary_buf),
|
||||
errors::DataLoss("Unable to reserialize text proto as binary"));
|
||||
}
|
||||
|
||||
@ -875,7 +875,7 @@ class DecodeProtoOp : public OpKernel {
|
||||
|
||||
// Parse fields from a serialized message into preallocated tensors.
|
||||
void AccumulateFields(OpKernelContext* ctx,
|
||||
const std::vector<const string*>& bufs,
|
||||
const std::vector<const tstring*>& bufs,
|
||||
std::vector<Tensor*> outputs) {
|
||||
struct TensorInfo {
|
||||
explicit TensorInfo(Tensor* tensor) {
|
||||
@ -915,7 +915,7 @@ class DecodeProtoOp : public OpKernel {
|
||||
}
|
||||
|
||||
for (int message_index = 0; message_index < bufs.size(); ++message_index) {
|
||||
const string& buf = *bufs[message_index];
|
||||
const tstring& buf = *bufs[message_index];
|
||||
|
||||
std::vector<DenseCollector> collectors;
|
||||
collectors.reserve(field_count);
|
||||
|
@ -55,7 +55,7 @@ TEST(DecodeWavOpTest, DecodeWavTest) {
|
||||
0x00, 0x80, // fourth sample: -32768 (saturated)
|
||||
};
|
||||
Tensor content_tensor =
|
||||
test::AsScalar<string>(string(wav_data.begin(), wav_data.end()));
|
||||
test::AsScalar<tstring>(string(wav_data.begin(), wav_data.end()));
|
||||
Output content_op =
|
||||
Const(root.WithOpName("content_op"), Input::Initializer(content_tensor));
|
||||
|
||||
|
@ -60,8 +60,8 @@ class GenerateVocabRemappingOp : public OpKernel {
|
||||
new_vocab_file_tensor->scalar<tstring>()();
|
||||
OP_REQUIRES(context, !new_vocab_filename.empty(),
|
||||
errors::InvalidArgument("new vocab filename cannot be empty."));
|
||||
lookup::HashTable<int64, string>* new_vocab_table =
|
||||
new lookup::HashTable<int64, string>(context, this);
|
||||
lookup::HashTable<int64, tstring>* new_vocab_table =
|
||||
new lookup::HashTable<int64, tstring>(context, this);
|
||||
core::ScopedUnref unref_new(new_vocab_table);
|
||||
// Note: we pass -1 (unknown) for vocab_size, which is supposed to be the
|
||||
// total elements in file. This is different from num_new_vocab_, which
|
||||
@ -91,8 +91,8 @@ class GenerateVocabRemappingOp : public OpKernel {
|
||||
old_vocab_file_tensor->scalar<tstring>()();
|
||||
OP_REQUIRES(context, !old_vocab_filename.empty(),
|
||||
errors::InvalidArgument("new vocab filename cannot be empty."));
|
||||
lookup::HashTable<string, int64>* old_vocab_table =
|
||||
new lookup::HashTable<string, int64>(context, this);
|
||||
lookup::HashTable<tstring, int64>* old_vocab_table =
|
||||
new lookup::HashTable<tstring, int64>(context, this);
|
||||
core::ScopedUnref unref_old(old_vocab_table);
|
||||
// Note: If old_vocab_size_ is -1 (unknown), we retrieve all elements in
|
||||
// file (see TextFileLineIterator).
|
||||
|
@ -57,11 +57,11 @@ TEST_F(GuaranteeConstOpTest, Int32Success_2_3) {
|
||||
|
||||
TEST_F(GuaranteeConstOpTest, StringSuccess) {
|
||||
TF_ASSERT_OK(Init(DT_STRING));
|
||||
AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
|
||||
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
|
||||
test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(GuaranteeConstOpTest, ResourceInputError) {
|
||||
|
@ -42,7 +42,7 @@ class PrintingV2GraphTest : public OpsTestBase {
|
||||
|
||||
TEST_F(PrintingV2GraphTest, StringSuccess) {
|
||||
TF_ASSERT_OK(Init());
|
||||
AddInputFromArray<string>(TensorShape({}), {"bar"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
}
|
||||
|
||||
@ -90,8 +90,8 @@ TEST_F(PrintingGraphTest, Int32Success_Summarize6) {
|
||||
TEST_F(PrintingGraphTest, StringSuccess) {
|
||||
TF_ASSERT_OK(Init(DT_INT32, DT_STRING));
|
||||
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
|
||||
AddInputFromArray<string>(TensorShape({}), {"foo"});
|
||||
AddInputFromArray<string>(TensorShape({}), {"bar"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
|
||||
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
|
||||
@ -101,8 +101,8 @@ TEST_F(PrintingGraphTest, StringSuccess) {
|
||||
TEST_F(PrintingGraphTest, MsgSuccess) {
|
||||
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "Message: "));
|
||||
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
|
||||
AddInputFromArray<string>(TensorShape({}), {"foo"});
|
||||
AddInputFromArray<string>(TensorShape({}), {"bar"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
|
||||
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
|
||||
@ -112,8 +112,8 @@ TEST_F(PrintingGraphTest, MsgSuccess) {
|
||||
TEST_F(PrintingGraphTest, FirstNSuccess) {
|
||||
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "", 3));
|
||||
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
|
||||
AddInputFromArray<string>(TensorShape({}), {"foo"});
|
||||
AddInputFromArray<string>(TensorShape({}), {"bar"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
|
||||
// run 4 times but we only print 3 as intended
|
||||
for (int i = 0; i < 4; i++) TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
|
||||
|
@ -299,7 +299,7 @@ inline uint64 HashScalar(const T& key) {
|
||||
return static_cast<uint64>(key);
|
||||
}
|
||||
|
||||
inline uint64 HashScalar(const string& key) { return Hash64(key); }
|
||||
inline uint64 HashScalar(const tstring& key) { return Hash64(key); }
|
||||
|
||||
// If the given shape is a scalar return {1} instead. Otherwise leave it alone.
|
||||
TensorShape MaybeVectorizeShape(const TensorShape& shape) {
|
||||
@ -982,18 +982,18 @@ REGISTER_KERNEL_BUILDER(Name("LookupTableImportV2").Device(DEVICE_CPU),
|
||||
REGISTER_KERNEL(int32, double);
|
||||
REGISTER_KERNEL(int32, float);
|
||||
REGISTER_KERNEL(int32, int32);
|
||||
REGISTER_KERNEL(int32, string);
|
||||
REGISTER_KERNEL(int32, tstring);
|
||||
REGISTER_KERNEL(int64, double);
|
||||
REGISTER_KERNEL(int64, float);
|
||||
REGISTER_KERNEL(int64, int32);
|
||||
REGISTER_KERNEL(int64, int64);
|
||||
REGISTER_KERNEL(int64, string);
|
||||
REGISTER_KERNEL(string, bool);
|
||||
REGISTER_KERNEL(string, double);
|
||||
REGISTER_KERNEL(string, float);
|
||||
REGISTER_KERNEL(string, int32);
|
||||
REGISTER_KERNEL(string, int64);
|
||||
REGISTER_KERNEL(string, string);
|
||||
REGISTER_KERNEL(int64, tstring);
|
||||
REGISTER_KERNEL(tstring, bool);
|
||||
REGISTER_KERNEL(tstring, double);
|
||||
REGISTER_KERNEL(tstring, float);
|
||||
REGISTER_KERNEL(tstring, int32);
|
||||
REGISTER_KERNEL(tstring, int64);
|
||||
REGISTER_KERNEL(tstring, tstring);
|
||||
|
||||
#undef REGISTER_KERNEL
|
||||
|
||||
@ -1021,13 +1021,13 @@ REGISTER_KERNEL(int64, double);
|
||||
REGISTER_KERNEL(int64, float);
|
||||
REGISTER_KERNEL(int64, int32);
|
||||
REGISTER_KERNEL(int64, int64);
|
||||
REGISTER_KERNEL(int64, string);
|
||||
REGISTER_KERNEL(int64, tstring);
|
||||
REGISTER_KERNEL(int64, Variant);
|
||||
REGISTER_KERNEL(string, bool);
|
||||
REGISTER_KERNEL(string, double);
|
||||
REGISTER_KERNEL(string, float);
|
||||
REGISTER_KERNEL(string, int32);
|
||||
REGISTER_KERNEL(string, int64);
|
||||
REGISTER_KERNEL(tstring, bool);
|
||||
REGISTER_KERNEL(tstring, double);
|
||||
REGISTER_KERNEL(tstring, float);
|
||||
REGISTER_KERNEL(tstring, int32);
|
||||
REGISTER_KERNEL(tstring, int64);
|
||||
|
||||
#undef REGISTER_KERNEL
|
||||
|
||||
@ -1055,12 +1055,12 @@ REGISTER_KERNEL(int64, double);
|
||||
REGISTER_KERNEL(int64, float);
|
||||
REGISTER_KERNEL(int64, int32);
|
||||
REGISTER_KERNEL(int64, int64);
|
||||
REGISTER_KERNEL(int64, string);
|
||||
REGISTER_KERNEL(string, bool);
|
||||
REGISTER_KERNEL(string, double);
|
||||
REGISTER_KERNEL(string, float);
|
||||
REGISTER_KERNEL(string, int32);
|
||||
REGISTER_KERNEL(string, int64);
|
||||
REGISTER_KERNEL(int64, tstring);
|
||||
REGISTER_KERNEL(tstring, bool);
|
||||
REGISTER_KERNEL(tstring, double);
|
||||
REGISTER_KERNEL(tstring, float);
|
||||
REGISTER_KERNEL(tstring, int32);
|
||||
REGISTER_KERNEL(tstring, int64);
|
||||
|
||||
#undef REGISTER_KERNEL
|
||||
|
||||
@ -1090,11 +1090,11 @@ REGISTER_KERNEL(int64, float);
|
||||
REGISTER_KERNEL(int64, int32);
|
||||
REGISTER_KERNEL(int64, int64);
|
||||
REGISTER_KERNEL(int64, Variant);
|
||||
REGISTER_KERNEL(string, bool);
|
||||
REGISTER_KERNEL(string, double);
|
||||
REGISTER_KERNEL(string, float);
|
||||
REGISTER_KERNEL(string, int32);
|
||||
REGISTER_KERNEL(string, int64);
|
||||
REGISTER_KERNEL(tstring, bool);
|
||||
REGISTER_KERNEL(tstring, double);
|
||||
REGISTER_KERNEL(tstring, float);
|
||||
REGISTER_KERNEL(tstring, int32);
|
||||
REGISTER_KERNEL(tstring, int64);
|
||||
|
||||
#undef REGISTER_KERNEL
|
||||
|
||||
|
@ -79,10 +79,10 @@ class MergeV2CheckpointsOpTest : public OpsTestBase {
|
||||
// Now merges.
|
||||
MakeOp(delete_old_dirs);
|
||||
// Add checkpoint_prefixes.
|
||||
AddInput<string>(TensorShape({2}),
|
||||
[&prefixes](int i) -> string { return prefixes[i]; });
|
||||
AddInput<tstring>(TensorShape({2}),
|
||||
[&prefixes](int i) -> tstring { return prefixes[i]; });
|
||||
// Add destination_prefix.
|
||||
AddInput<string>(TensorShape({}), [kMergedPrefix](int unused) -> string {
|
||||
AddInput<tstring>(TensorShape({}), [kMergedPrefix](int unused) -> tstring {
|
||||
return kMergedPrefix;
|
||||
});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
@ -29,7 +29,7 @@ TEST_F(OpsTestBase, ScopedStepContainer) {
|
||||
.Input(FakeInput(DT_STRING))
|
||||
.Finalize(node_def()));
|
||||
TF_EXPECT_OK(InitOp());
|
||||
AddInputFromArray<string>(TensorShape({}), {""});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {""});
|
||||
TF_EXPECT_OK(RunOpKernel());
|
||||
EXPECT_TRUE(step_container_ != nullptr);
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ class SerializeTensorOp : public OpKernel {
|
||||
Tensor* proto_string = nullptr;
|
||||
OP_REQUIRES_OK(context,
|
||||
context->allocate_output(0, TensorShape({}), &proto_string));
|
||||
CHECK(proto.SerializeToString(&proto_string->scalar<string>()()));
|
||||
CHECK(SerializeToTString(proto, &proto_string->scalar<tstring>()()));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -186,12 +186,12 @@ TEST_F(SerializeTensorOpTest, SerializeTensorOpTest_bool) {
|
||||
}
|
||||
|
||||
TEST_F(SerializeTensorOpTest, SerializeTensorOpTest_string) {
|
||||
MakeOp<string>(TensorShape({10}),
|
||||
[](int x) -> string { return std::to_string(x / 10.); });
|
||||
MakeOp<tstring>(TensorShape({10}),
|
||||
[](int x) -> tstring { return std::to_string(x / 10.); });
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor parse_output;
|
||||
ParseSerializedOutput<string>(GetOutput(0), &parse_output);
|
||||
test::ExpectTensorEqual<string>(parse_output, GetInput(0));
|
||||
ParseSerializedOutput<tstring>(GetOutput(0), &parse_output);
|
||||
test::ExpectTensorEqual<tstring>(parse_output, GetInput(0));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -98,7 +98,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
|
||||
inputs.push_back({nullptr, &input_0});
|
||||
|
||||
// Input #1 is the tensor names
|
||||
Tensor input_1 = MakeInput<string>(
|
||||
Tensor input_1 = MakeInput<tstring>(
|
||||
TensorShape({static_cast<int>(tensor_names.size())}),
|
||||
[&tensor_names](int x) -> string { return tensor_names[x]; });
|
||||
inputs.push_back({nullptr, &input_1});
|
||||
@ -149,7 +149,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
|
||||
[](int x) -> int64 { return x - 9; });
|
||||
inputs.push_back({nullptr, &input_11});
|
||||
// Input #12 is a 1-d string tensor
|
||||
Tensor input_12 = MakeInput<string>(
|
||||
Tensor input_12 = MakeInput<tstring>(
|
||||
TensorShape({2}), [](int x) -> string { return x ? "yes" : "no"; });
|
||||
inputs.push_back({nullptr, &input_12});
|
||||
// Input #13 is a 1-d complex64 tensor
|
||||
@ -188,10 +188,10 @@ TEST_F(RestoreOpTest, RestoreSimple) {
|
||||
// The 1-d bool tensor
|
||||
{
|
||||
MakeRestoreOp(DT_BOOL);
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&filename](int x) -> string { return filename; });
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&](int x) -> string { return tensor_names[0]; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&filename](int x) -> tstring { return filename; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&](int x) -> tstring { return tensor_names[0]; });
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor* output = GetOutput(0);
|
||||
TensorShape expected({2});
|
||||
@ -432,13 +432,13 @@ TEST_F(RestoreSliceOpTest, RestoreInt) {
|
||||
MakeRestoreSliceOp(DT_INT32);
|
||||
string shape_and_slice = "4 16 0,2:-";
|
||||
// Add a file name
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&filename](int x) -> string { return filename; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&filename](int x) -> tstring { return filename; });
|
||||
// Add the tensor names
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&tensor_name](int x) -> string { return tensor_name; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&tensor_name](int x) -> tstring { return tensor_name; });
|
||||
// Add the tensor shape and slice
|
||||
AddInput<string>(TensorShape({}), [&shape_and_slice](int x) -> string {
|
||||
AddInput<tstring>(TensorShape({}), [&shape_and_slice](int x) -> tstring {
|
||||
return shape_and_slice;
|
||||
});
|
||||
|
||||
|
@ -109,12 +109,12 @@ class RestoreV2OpTest : public OpsTestBase {
|
||||
inputs.push_back({nullptr, &input_0});
|
||||
|
||||
// Input #1 is the tensor names
|
||||
Tensor input_1 = MakeInput<string>(
|
||||
Tensor input_1 = MakeInput<tstring>(
|
||||
TensorShape({static_cast<int>(tensor_names.size())}),
|
||||
[&tensor_names](int x) -> string { return tensor_names[x]; });
|
||||
inputs.push_back({nullptr, &input_1});
|
||||
|
||||
Tensor shape_and_slices = MakeInput<string>(
|
||||
Tensor shape_and_slices = MakeInput<tstring>(
|
||||
TensorShape({static_cast<int>(tensor_names.size())}),
|
||||
[](int x) -> string { return "" /* saves in full */; });
|
||||
if (save_op_to_use != "Save") {
|
||||
@ -195,11 +195,11 @@ class RestoreV2OpTest : public OpsTestBase {
|
||||
// The 1-d bool tensor
|
||||
{
|
||||
MakeRestoreOp(DT_BOOL);
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&filename](int x) -> string { return filename; });
|
||||
AddInput<string>(TensorShape({1}),
|
||||
[&](int x) -> string { return tensor_names[0]; });
|
||||
AddInput<string>(TensorShape({1}), [&](int x) -> string {
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&filename](int x) -> tstring { return filename; });
|
||||
AddInput<tstring>(TensorShape({1}),
|
||||
[&](int x) -> tstring { return tensor_names[0]; });
|
||||
AddInput<tstring>(TensorShape({1}), [&](int x) -> tstring {
|
||||
return "";
|
||||
}); // Restores in full.
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
@ -68,15 +68,15 @@ TEST_F(RollOpTest, ScalarIndices_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT32);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({5}), {"a", "b", "c", "d", "e"});
|
||||
AddInputFromArray<tstring>(TensorShape({5}), {"a", "b", "c", "d", "e"});
|
||||
AddInputFromArray<int32>(TensorShape({}), {3});
|
||||
AddInputFromArray<int32>(TensorShape({}), {0});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({5}));
|
||||
test::FillValues<string>(&expected, {"c", "d", "e", "a", "b"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"c", "d", "e", "a", "b"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, ScalarIndices_Complex) {
|
||||
@ -121,18 +121,18 @@ TEST_F(RollOpTest, Simple_TwoD32_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT32);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({3, 5}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
"k", "l", "m", "n", "o"});
|
||||
AddInputFromArray<tstring>(TensorShape({3, 5}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
"k", "l", "m", "n", "o"});
|
||||
AddInputFromArray<int32>(TensorShape({2}), {2, -1});
|
||||
AddInputFromArray<int32>(TensorShape({2}), {0, 1});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({3, 5}));
|
||||
test::FillValues<string>(&expected, {"g", "h", "i", "j", "f", "l", "m", "n",
|
||||
"o", "k", "b", "c", "d", "e", "a"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"g", "h", "i", "j", "f", "l", "m", "n",
|
||||
"o", "k", "b", "c", "d", "e", "a"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, Simple_ThreeD32) {
|
||||
@ -155,7 +155,7 @@ TEST_F(RollOpTest, Simple_ThreeD32_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT32);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(
|
||||
AddInputFromArray<tstring>(
|
||||
TensorShape({2, 2, 3}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
|
||||
AddInputFromArray<int32>(TensorShape({3}), {1, -1, -1});
|
||||
@ -164,9 +164,9 @@ TEST_F(RollOpTest, Simple_ThreeD32_NoMemcpy) {
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({2, 2, 3}));
|
||||
test::FillValues<string>(
|
||||
test::FillValues<tstring>(
|
||||
&expected, {"k", "l", "j", "h", "i", "g", "e", "f", "d", "b", "c", "a"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, Simple_TwoD64) {
|
||||
@ -190,18 +190,18 @@ TEST_F(RollOpTest, Simple_TwoD64_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT64);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({5, 3}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
"k", "l", "m", "n", "o"});
|
||||
AddInputFromArray<tstring>(TensorShape({5, 3}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
"k", "l", "m", "n", "o"});
|
||||
AddInputFromArray<int64>(TensorShape({2}), {-1, 4});
|
||||
AddInputFromArray<int64>(TensorShape({2}), {0, 1});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({5, 3}));
|
||||
test::FillValues<string>(&expected, {"f", "d", "e", "i", "g", "h", "l", "j",
|
||||
"k", "o", "m", "n", "c", "a", "b"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"f", "d", "e", "i", "g", "h", "l", "j",
|
||||
"k", "o", "m", "n", "c", "a", "b"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, Simple_ThreeD64) {
|
||||
@ -224,7 +224,7 @@ TEST_F(RollOpTest, Simple_ThreeD64_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT64);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(
|
||||
AddInputFromArray<tstring>(
|
||||
TensorShape({4, 1, 3}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
|
||||
AddInputFromArray<int64>(TensorShape({3}), {4, 3, 2});
|
||||
@ -233,9 +233,9 @@ TEST_F(RollOpTest, Simple_ThreeD64_NoMemcpy) {
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({4, 1, 3}));
|
||||
test::FillValues<string>(
|
||||
test::FillValues<tstring>(
|
||||
&expected, {"b", "c", "a", "e", "f", "d", "h", "i", "g", "k", "l", "j"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, ZeroShift_ThreeD32) {
|
||||
@ -258,7 +258,7 @@ TEST_F(RollOpTest, ZeroShift_ThreeD32_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT32);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(
|
||||
AddInputFromArray<tstring>(
|
||||
TensorShape({2, 2, 3}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
|
||||
AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0});
|
||||
@ -267,9 +267,9 @@ TEST_F(RollOpTest, ZeroShift_ThreeD32_NoMemcpy) {
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({2, 2, 3}));
|
||||
test::FillValues<string>(
|
||||
test::FillValues<tstring>(
|
||||
&expected, {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, ZeroSize_ThreeD32) {
|
||||
@ -290,14 +290,14 @@ TEST_F(RollOpTest, ZeroSize_ThreeD32_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT32);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({5, 0, 0}), {});
|
||||
AddInputFromArray<tstring>(TensorShape({5, 0, 0}), {});
|
||||
AddInputFromArray<int32>(TensorShape({}), {1});
|
||||
AddInputFromArray<int32>(TensorShape({}), {0});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({5, 0, 0}));
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, OneSize_ThreeD32) {
|
||||
@ -319,15 +319,15 @@ TEST_F(RollOpTest, OneSize_ThreeD32_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT32);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({1, 1, 1}), {"a"});
|
||||
AddInputFromArray<tstring>(TensorShape({1, 1, 1}), {"a"});
|
||||
AddInputFromArray<int32>(TensorShape({}), {1});
|
||||
AddInputFromArray<int32>(TensorShape({}), {0});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({1, 1, 1}));
|
||||
test::FillValues<string>(&expected, {"a"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"a"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, MultiShifts_TwoD32) {
|
||||
@ -351,18 +351,18 @@ TEST_F(RollOpTest, MultiShifts_TwoD32_NoMemcpy) {
|
||||
MakeOp(DT_STRING, DT_INT32);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({3, 5}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
"k", "l", "m", "n", "o"});
|
||||
AddInputFromArray<tstring>(TensorShape({3, 5}),
|
||||
{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
|
||||
"k", "l", "m", "n", "o"});
|
||||
AddInputFromArray<int32>(TensorShape({4}), {-2, 2, -1, 1});
|
||||
AddInputFromArray<int32>(TensorShape({4}), {1, 0, 0, 1});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
// Check the output.
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({3, 5}));
|
||||
test::FillValues<string>(&expected, {"l", "m", "n", "o", "k", "b", "c", "d",
|
||||
"e", "a", "g", "h", "i", "j", "f"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"l", "m", "n", "o", "k", "b", "c", "d",
|
||||
"e", "a", "g", "h", "i", "j", "f"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(RollOpTest, Error_InputMustBeVectorOrHigher) {
|
||||
|
@ -65,12 +65,13 @@ TEST_F(SaveOpTest, Simple) {
|
||||
|
||||
MakeOp();
|
||||
// Add a file name
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&filename](int x) -> string { return filename; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&filename](int x) -> tstring { return filename; });
|
||||
|
||||
// Add the tensor names
|
||||
AddInput<string>(TensorShape({14}),
|
||||
[&tensornames](int x) -> string { return tensornames[x]; });
|
||||
AddInput<tstring>(TensorShape({14}), [&tensornames](int x) -> tstring {
|
||||
return tensornames[x];
|
||||
});
|
||||
|
||||
// Add a 1-d bool tensor
|
||||
AddInput<bool>(TensorShape({2}), [](int x) -> bool { return x != 0; });
|
||||
@ -108,8 +109,8 @@ TEST_F(SaveOpTest, Simple) {
|
||||
AddInput<int64>(TensorShape({9}), [](int x) -> int64 { return x - 9; });
|
||||
|
||||
// Add a 1-d string tensor
|
||||
AddInput<string>(TensorShape({2}),
|
||||
[](int x) -> string { return x ? "yes" : "no"; });
|
||||
AddInput<tstring>(TensorShape({2}),
|
||||
[](int x) -> tstring { return x ? "yes" : "no"; });
|
||||
|
||||
// Add a 2-d complex64 tensor
|
||||
AddInput<complex64>(TensorShape({2, 3}), [](int x) -> complex64 {
|
||||
@ -328,7 +329,7 @@ TEST_F(SaveOpTest, Simple) {
|
||||
|
||||
// We expect the tensor value to be correct.
|
||||
TensorSlice s = TensorSlice::ParseOrDie("-");
|
||||
string data[2];
|
||||
tstring data[2];
|
||||
EXPECT_TRUE(reader.CopySliceData("tensor_string", s, data));
|
||||
EXPECT_EQ("no", data[0]);
|
||||
EXPECT_EQ("yes", data[1]);
|
||||
@ -425,15 +426,16 @@ TEST_F(SaveSlicesOpTest, Slices) {
|
||||
|
||||
MakeOp();
|
||||
// Add a file name
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&filename](int x) -> string { return filename; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&filename](int x) -> tstring { return filename; });
|
||||
|
||||
// Add the tensor names
|
||||
AddInput<string>(TensorShape({5}),
|
||||
[&tensornames](int x) -> string { return tensornames[x]; });
|
||||
AddInput<tstring>(TensorShape({5}), [&tensornames](int x) -> tstring {
|
||||
return tensornames[x];
|
||||
});
|
||||
|
||||
// Add the tensor shapes and slices
|
||||
AddInput<string>(TensorShape({5}), [&tensorshapes](int x) -> string {
|
||||
AddInput<tstring>(TensorShape({5}), [&tensorshapes](int x) -> tstring {
|
||||
return tensorshapes[x];
|
||||
});
|
||||
|
||||
@ -577,15 +579,16 @@ TEST_F(SaveOpSlices2Test, TwoSlices) {
|
||||
|
||||
MakeOp();
|
||||
// Add a file name
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&filename](int x) -> string { return filename; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&filename](int x) -> tstring { return filename; });
|
||||
|
||||
// Add the tensor names
|
||||
AddInput<string>(TensorShape({3}),
|
||||
[&tensornames](int x) -> string { return tensornames[x]; });
|
||||
AddInput<tstring>(TensorShape({3}), [&tensornames](int x) -> tstring {
|
||||
return tensornames[x];
|
||||
});
|
||||
|
||||
// Add the tensor shapes and slices
|
||||
AddInput<string>(TensorShape({3}), [&tensorshapes](int x) -> string {
|
||||
AddInput<tstring>(TensorShape({3}), [&tensorshapes](int x) -> tstring {
|
||||
return tensorshapes[x];
|
||||
});
|
||||
|
||||
@ -666,10 +669,10 @@ static void BM_LargeTensorWrite(int iters, int num_elements) {
|
||||
tensor.flat<float>().setZero();
|
||||
|
||||
// Builds the graph.
|
||||
const string temp_filename =
|
||||
const tstring temp_filename =
|
||||
io::JoinPath(testing::TmpDir(), "benchmark_checkpoint");
|
||||
auto root = Scope::NewRootScope().ExitOnError();
|
||||
const string tensor_name = "my_tensor";
|
||||
const tstring tensor_name = "my_tensor";
|
||||
ops::Save(root, temp_filename, {tensor_name}, {{tensor}});
|
||||
|
||||
// Disables optimizations.
|
||||
|
@ -59,7 +59,7 @@ void SaveTensors(
|
||||
std::numeric_limits<int>::max()),
|
||||
errors::InvalidArgument("Too many inputs to SaveTensors"));
|
||||
const int N = static_cast<int>(tensor_names_t.NumElements());
|
||||
const string* tensor_shapes_and_slices_ptr = nullptr;
|
||||
const tstring* tensor_shapes_and_slices_ptr = nullptr;
|
||||
if (save_slices) {
|
||||
const Tensor& tensor_shapes_and_slices_t = context->input(2);
|
||||
OP_REQUIRES(
|
||||
@ -103,7 +103,7 @@ void SaveTensors(
|
||||
TensorShape shape(input.shape());
|
||||
TensorSlice slice(input.dims());
|
||||
if (save_slices && !tensor_shapes_and_slices_ptr[i].empty()) {
|
||||
const string& shape_spec = tensor_shapes_and_slices_ptr[i];
|
||||
const tstring& shape_spec = tensor_shapes_and_slices_ptr[i];
|
||||
TensorShape slice_shape;
|
||||
OP_REQUIRES_OK(context, checkpoint::ParseShapeAndSlice(
|
||||
shape_spec, &shape, &slice, &slice_shape));
|
||||
@ -192,7 +192,8 @@ void RestoreTensor(OpKernelContext* context,
|
||||
TensorShape output_shape(saved_shape);
|
||||
TensorSlice slice_to_load(saved_shape.dims());
|
||||
if (restore_slice) {
|
||||
const string& shape_spec = context->input(2).flat<tstring>()(restore_index);
|
||||
const tstring& shape_spec =
|
||||
context->input(2).flat<tstring>()(restore_index);
|
||||
if (!shape_spec.empty()) {
|
||||
TensorShape parsed_shape;
|
||||
OP_REQUIRES_OK(context, checkpoint::ParseShapeAndSlice(
|
||||
|
@ -212,8 +212,8 @@ class MergeV2Checkpoints : public OpKernel {
|
||||
"Input destination_prefix should be a scalar tensor, got ",
|
||||
destination_prefix.shape().DebugString(), " instead."));
|
||||
|
||||
const gtl::ArraySlice<string> input_prefixes =
|
||||
gtl::ArraySlice<string>(checkpoint_prefixes.flat<string>());
|
||||
const gtl::ArraySlice<tstring> input_prefixes =
|
||||
gtl::ArraySlice<tstring>(checkpoint_prefixes.flat<tstring>());
|
||||
Env* env = Env::Default();
|
||||
const string& merged_prefix = destination_prefix.scalar<tstring>()();
|
||||
OP_REQUIRES_OK(
|
||||
|
@ -59,16 +59,17 @@ TEST_F(SaveV2OpTest, Simple) {
|
||||
|
||||
MakeOp();
|
||||
// Add a file name
|
||||
AddInput<string>(TensorShape({}),
|
||||
[&prefix](int x) -> string { return prefix; });
|
||||
AddInput<tstring>(TensorShape({}),
|
||||
[&prefix](int x) -> tstring { return prefix; });
|
||||
|
||||
// Add the tensor names
|
||||
AddInput<string>(TensorShape({13}),
|
||||
[&tensornames](int x) -> string { return tensornames[x]; });
|
||||
AddInput<tstring>(TensorShape({13}), [&tensornames](int x) -> tstring {
|
||||
return tensornames[x];
|
||||
});
|
||||
|
||||
// Add the slice specs
|
||||
AddInput<string>(TensorShape({13}),
|
||||
[](int x) -> string { return "" /* saves in full */; });
|
||||
AddInput<tstring>(TensorShape({13}),
|
||||
[](int x) -> tstring { return "" /* saves in full */; });
|
||||
|
||||
// Add a 1-d bool tensor
|
||||
AddInput<bool>(TensorShape({2}), [](int x) -> bool { return x != 0; });
|
||||
|
@ -84,7 +84,7 @@ int64 SparseTensorColumn<int64>::Feature(int64 batch, int64 n) const {
|
||||
|
||||
// InternalType is string or StringPiece when using StringCrosser.
|
||||
template <>
|
||||
string SparseTensorColumn<string>::Feature(int64 batch, int64 n) const {
|
||||
tstring SparseTensorColumn<tstring>::Feature(int64 batch, int64 n) const {
|
||||
const int64 start = feature_start_indices_[batch];
|
||||
if (DT_STRING == values_.dtype())
|
||||
return values_.vec<tstring>().data()[start + n];
|
||||
@ -124,7 +124,7 @@ int64 DenseTensorColumn<int64>::Feature(int64 batch, int64 n) const {
|
||||
|
||||
// Internal type is string or StringPiece when using StringCrosser.
|
||||
template <>
|
||||
string DenseTensorColumn<string>::Feature(int64 batch, int64 n) const {
|
||||
tstring DenseTensorColumn<tstring>::Feature(int64 batch, int64 n) const {
|
||||
if (DT_STRING == tensor_.dtype()) return tensor_.matrix<tstring>()(batch, n);
|
||||
return std::to_string(tensor_.matrix<int64>()(batch, n));
|
||||
}
|
||||
@ -275,7 +275,7 @@ struct CrossTraits;
|
||||
template <typename InternalType>
|
||||
struct CrossTraits<false, InternalType> {
|
||||
typedef StringCrosser<InternalType> Crosser;
|
||||
typedef OutputUpdater<string> Updater;
|
||||
typedef OutputUpdater<tstring> Updater;
|
||||
};
|
||||
|
||||
template <>
|
||||
@ -555,20 +555,20 @@ class SparseCrossOp : public OpKernel {
|
||||
|
||||
REGISTER_KERNEL_BUILDER(Name("SparseCross")
|
||||
.Device(DEVICE_CPU)
|
||||
.TypeConstraint<string>("out_type")
|
||||
.TypeConstraint<string>("internal_type"),
|
||||
.TypeConstraint<tstring>("out_type")
|
||||
.TypeConstraint<tstring>("internal_type"),
|
||||
SparseCrossOp<false, StringPiece>);
|
||||
|
||||
REGISTER_KERNEL_BUILDER(Name("SparseCross")
|
||||
.Device(DEVICE_CPU)
|
||||
.TypeConstraint<string>("out_type")
|
||||
.TypeConstraint<tstring>("out_type")
|
||||
.TypeConstraint<int64>("internal_type"),
|
||||
SparseCrossOp<false, string>);
|
||||
SparseCrossOp<false, tstring>);
|
||||
|
||||
REGISTER_KERNEL_BUILDER(Name("SparseCross")
|
||||
.Device(DEVICE_CPU)
|
||||
.TypeConstraint<int64>("out_type")
|
||||
.TypeConstraint<string>("internal_type"),
|
||||
.TypeConstraint<tstring>("internal_type"),
|
||||
SparseCrossOp<true, int64>);
|
||||
|
||||
REGISTER_KERNEL_BUILDER(Name("SparseCross")
|
||||
|
@ -152,7 +152,7 @@ class SparseToDense : public OpKernel {
|
||||
|
||||
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNELS_ALL);
|
||||
REGISTER_KERNELS_ALL(bool);
|
||||
REGISTER_KERNELS_ALL(string);
|
||||
REGISTER_KERNELS_ALL(tstring);
|
||||
|
||||
#undef REGISTER_KERNELS_ALL
|
||||
#undef REGISTER_KERNELS
|
||||
|
@ -47,8 +47,8 @@ TEST_F(StringFormatGraphTest, Int32Success_7) {
|
||||
AddInputFromArray<int32>(TensorShape({7}), {1, 2, 3, 4, 5, 6, 7});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({}));
|
||||
test::FillValues<string>(&expected, {"First tensor: [1 2 3 ... 5 6 7]"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"First tensor: [1 2 3 ... 5 6 7]"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
TEST_F(StringFormatGraphTest, Int32Success_3_3) {
|
||||
@ -57,9 +57,9 @@ TEST_F(StringFormatGraphTest, Int32Success_3_3) {
|
||||
AddInputFromArray<int32>(TensorShape({3, 3}), {1, 2, 3, 4, 5, 6, 7, 8, 9});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
Tensor expected(allocator(), DT_STRING, TensorShape({}));
|
||||
test::FillValues<string>(&expected, {"First tensor: [[1 ... 3]\n ..."
|
||||
"\n [7 ... 9]]"});
|
||||
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
|
||||
test::FillValues<tstring>(&expected, {"First tensor: [[1 ... 3]\n ..."
|
||||
"\n [7 ... 9]]"});
|
||||
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
|
||||
}
|
||||
|
||||
} // end namespace
|
||||
|
@ -39,7 +39,7 @@ class StringJoinOp : public OpKernel {
|
||||
OP_REQUIRES_OK(context, context->input_list("inputs", &input_list));
|
||||
TensorShape input_shape;
|
||||
std::vector<bool> is_scalar;
|
||||
std::vector<TTypes<string>::ConstFlat> inputs;
|
||||
std::vector<TTypes<tstring>::ConstFlat> inputs;
|
||||
|
||||
for (const auto& input : input_list) {
|
||||
inputs.push_back(input.flat<tstring>());
|
||||
|
@ -34,7 +34,7 @@ namespace {
|
||||
// a series of finds in the input string, making it much more effcient than
|
||||
// SplitOnCharSet.
|
||||
template <typename Predicate>
|
||||
std::vector<StringPiece> SplitOnChar(const string& str, const char delim,
|
||||
std::vector<StringPiece> SplitOnChar(const tstring& str, const char delim,
|
||||
Predicate p) {
|
||||
std::vector<StringPiece> result;
|
||||
StringPiece text(str);
|
||||
@ -58,8 +58,8 @@ std::vector<StringPiece> SplitOnChar(const string& str, const char delim,
|
||||
// is valid.
|
||||
// Based on str_util::Split.
|
||||
template <typename Predicate>
|
||||
std::vector<StringPiece> SplitOnCharSet(const string& str,
|
||||
const string& delim_set, Predicate p) {
|
||||
std::vector<StringPiece> SplitOnCharSet(const tstring& str,
|
||||
const tstring& delim_set, Predicate p) {
|
||||
std::vector<StringPiece> result;
|
||||
StringPiece text(str);
|
||||
StringPiece delims(delim_set);
|
||||
@ -80,7 +80,7 @@ std::vector<StringPiece> SplitOnCharSet(const string& str,
|
||||
// Returns a vector of StringPieces which are valid as long as input `str`
|
||||
// is valid.
|
||||
template <typename Predicate>
|
||||
std::vector<StringPiece> Split(const string& str, const string& delimiter,
|
||||
std::vector<StringPiece> Split(const tstring& str, const tstring& delimiter,
|
||||
Predicate predicate) {
|
||||
if (str.empty()) {
|
||||
return std::vector<StringPiece>();
|
||||
@ -99,7 +99,7 @@ std::vector<StringPiece> Split(const string& str, const string& delimiter,
|
||||
return SplitOnCharSet(str, delimiter, predicate);
|
||||
}
|
||||
|
||||
std::vector<StringPiece> SplitV2(const string& str, StringPiece sep,
|
||||
std::vector<StringPiece> SplitV2(const tstring& str, StringPiece sep,
|
||||
int maxsplit) {
|
||||
// This SplitV2 method matches the behavior of python's str.split:
|
||||
// If sep is given, consecutive delimiters are not grouped together
|
||||
@ -187,8 +187,8 @@ class StringSplitOp : public OpKernel {
|
||||
ctx, TensorShapeUtils::IsScalar(delimiter_tensor->shape()),
|
||||
errors::InvalidArgument("delimiter must be a scalar, got shape: ",
|
||||
delimiter_tensor->shape().DebugString()));
|
||||
const auto delimiter_vec = delimiter_tensor->flat<string>();
|
||||
const string& delimiter = delimiter_vec(0);
|
||||
const auto delimiter_vec = delimiter_tensor->flat<tstring>();
|
||||
const tstring& delimiter = delimiter_vec(0);
|
||||
// Empty delimiter means split the input character by character.
|
||||
std::vector<StringPiece> tokens;
|
||||
// Guess that we'll be unpacking a handful of tokens per example.
|
||||
|
@ -150,8 +150,8 @@ class SubstrOp : public OpKernel {
|
||||
Tensor input_buffer;
|
||||
OP_REQUIRES_OK(context, context->allocate_temp(
|
||||
DT_STRING, output_shape, &input_buffer));
|
||||
TTypes<string, 1>::Tensor input_bcast =
|
||||
input_buffer.shaped<string, 1>(bcast.result_shape());
|
||||
TTypes<tstring, 1>::Tensor input_bcast =
|
||||
input_buffer.shaped<tstring, 1>(bcast.result_shape());
|
||||
input_bcast =
|
||||
input.broadcast(BCast::ToIndexArray<1>(bcast.x_bcast()));
|
||||
|
||||
@ -213,8 +213,8 @@ class SubstrOp : public OpKernel {
|
||||
Tensor input_buffer;
|
||||
OP_REQUIRES_OK(context, context->allocate_temp(
|
||||
DT_STRING, output_shape, &input_buffer));
|
||||
TTypes<string, 2>::Tensor input_bcast =
|
||||
input_buffer.shaped<string, 2>(bcast.result_shape());
|
||||
TTypes<tstring, 2>::Tensor input_bcast =
|
||||
input_buffer.shaped<tstring, 2>(bcast.result_shape());
|
||||
input_bcast =
|
||||
input.broadcast(BCast::ToIndexArray<2>(bcast.x_bcast()));
|
||||
|
||||
|
@ -92,7 +92,7 @@ class SummaryAudioOp : public OpKernel {
|
||||
|
||||
Tensor* summary_tensor = nullptr;
|
||||
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
|
||||
CHECK(s.SerializeToString(&summary_tensor->scalar<string>()()));
|
||||
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -81,7 +81,7 @@ TEST_F(SummaryAudioOpTest, Basic3D) {
|
||||
MakeOp(kMaxOutputs);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<float>(TensorShape({4, 2, 2}),
|
||||
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
|
||||
@ -115,7 +115,7 @@ TEST_F(SummaryAudioOpTest, Basic2D) {
|
||||
MakeOp(kMaxOutputs);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<float>(TensorShape({4, 4}),
|
||||
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
|
||||
|
@ -106,7 +106,7 @@ class SummaryImageOp : public OpKernel {
|
||||
|
||||
Tensor* summary_tensor = nullptr;
|
||||
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
|
||||
CHECK(s.SerializeToString(&summary_tensor->scalar<string>()()));
|
||||
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
|
||||
}
|
||||
|
||||
template <class T>
|
||||
|
@ -78,7 +78,7 @@ TEST_F(SummaryImageOpTest, ThreeGrayImagesOutOfFive4dInput) {
|
||||
MakeOp(3 /* max images */);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<float>(TensorShape({5, 2, 1, 1}),
|
||||
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
@ -101,7 +101,7 @@ TEST_F(SummaryImageOpTest, OneGrayImage4dInput) {
|
||||
MakeOp(1 /* max images */);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<float>(TensorShape({5 /*batch*/, 2, 1, 1 /*depth*/}),
|
||||
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
@ -121,7 +121,7 @@ TEST_F(SummaryImageOpTest, OneColorImage4dInput) {
|
||||
MakeOp(1 /* max images */);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"tag"});
|
||||
AddInputFromArray<float>(
|
||||
TensorShape({1 /*batch*/, 5 /*rows*/, 2 /*columns*/, 3 /*depth*/}),
|
||||
{
|
||||
|
@ -52,13 +52,13 @@ class SummaryScalarOp : public OpKernel {
|
||||
Summary s;
|
||||
for (int i = 0; i < Ttags.size(); i++) {
|
||||
Summary::Value* v = s.add_value();
|
||||
v->set_tag(Ttags(i));
|
||||
v->set_tag(string(Ttags(i))); // NOLINT
|
||||
v->set_simple_value(float(Tvalues(i)));
|
||||
}
|
||||
|
||||
Tensor* summary_tensor = nullptr;
|
||||
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
|
||||
CHECK(s.SerializeToString(&summary_tensor->scalar<string>()()));
|
||||
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
|
||||
}
|
||||
|
||||
// If there's only one tag, include it in the error message
|
||||
@ -102,12 +102,12 @@ class SummaryHistoOp : public OpKernel {
|
||||
|
||||
Summary s;
|
||||
Summary::Value* v = s.add_value();
|
||||
v->set_tag(tags.scalar<string>()());
|
||||
v->set_tag(string(tags.scalar<tstring>()())); // NOLINT
|
||||
histo.EncodeToProto(v->mutable_histo(), false /* Drop zero buckets */);
|
||||
|
||||
Tensor* summary_tensor = nullptr;
|
||||
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
|
||||
CHECK(s.SerializeToString(&summary_tensor->scalar<string>()()));
|
||||
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
|
||||
}
|
||||
};
|
||||
|
||||
@ -164,7 +164,7 @@ class SummaryMergeOp : public OpKernel {
|
||||
|
||||
Tensor* summary_tensor = nullptr;
|
||||
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
|
||||
CHECK(s.SerializeToString(&summary_tensor->scalar<string>()()));
|
||||
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -60,7 +60,7 @@ TEST_F(SummaryScalarOpTest, SimpleFloat) {
|
||||
MakeOp(DT_FLOAT);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({3}), {"tag1", "tag2", "tag3"});
|
||||
AddInputFromArray<tstring>(TensorShape({3}), {"tag1", "tag2", "tag3"});
|
||||
AddInputFromArray<float>(TensorShape({3}), {1.0f, -0.73f, 10000.0f});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
@ -68,7 +68,7 @@ TEST_F(SummaryScalarOpTest, SimpleFloat) {
|
||||
Tensor* out_tensor = GetOutput(0);
|
||||
ASSERT_EQ(0, out_tensor->dims());
|
||||
Summary summary;
|
||||
ParseProtoUnlimited(&summary, out_tensor->scalar<string>()());
|
||||
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
|
||||
EXPECT_SummaryMatches(summary, R"(
|
||||
value { tag: 'tag1' simple_value: 1.0 }
|
||||
value { tag: 'tag2' simple_value: -0.73 }
|
||||
@ -80,7 +80,7 @@ TEST_F(SummaryScalarOpTest, SimpleDouble) {
|
||||
MakeOp(DT_DOUBLE);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({3}), {"tag1", "tag2", "tag3"});
|
||||
AddInputFromArray<tstring>(TensorShape({3}), {"tag1", "tag2", "tag3"});
|
||||
AddInputFromArray<double>(TensorShape({3}), {1.0, -0.73, 10000.0});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
@ -120,7 +120,7 @@ TEST_F(SummaryScalarOpTest, Error_MismatchedSize) {
|
||||
MakeOp(DT_FLOAT);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({2}), {"tag1", "tag2"});
|
||||
AddInputFromArray<tstring>(TensorShape({2}), {"tag1", "tag2"});
|
||||
AddInputFromArray<float>(TensorShape({3}), {1.0f, -0.73f, 10000.0f});
|
||||
Status s = RunOpKernel();
|
||||
EXPECT_TRUE(absl::StrContains(s.ToString(), "not the same shape")) << s;
|
||||
@ -130,7 +130,7 @@ TEST_F(SummaryScalarOpTest, Error_WrongDimsTags) {
|
||||
MakeOp(DT_FLOAT);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({2, 1}), {"tag1", "tag2"});
|
||||
AddInputFromArray<tstring>(TensorShape({2, 1}), {"tag1", "tag2"});
|
||||
AddInputFromArray<float>(TensorShape({2}), {1.0f, -0.73f});
|
||||
Status s = RunOpKernel();
|
||||
EXPECT_TRUE(
|
||||
@ -142,7 +142,7 @@ TEST_F(SummaryScalarOpTest, Error_WrongDimsValues) {
|
||||
MakeOp(DT_FLOAT);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({2}), {"tag1", "tag2"});
|
||||
AddInputFromArray<tstring>(TensorShape({2}), {"tag1", "tag2"});
|
||||
AddInputFromArray<float>(TensorShape({2, 1}), {1.0f, -0.73f});
|
||||
Status s = RunOpKernel();
|
||||
EXPECT_TRUE(
|
||||
@ -168,7 +168,7 @@ TEST_F(SummaryHistoOpTest, SimpleFloat) {
|
||||
MakeOp(DT_FLOAT);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"taghisto"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"taghisto"});
|
||||
AddInputFromArray<float>(TensorShape({3, 2}),
|
||||
{0.1f, -0.7f, 4.1f, 4., 5.f, 4.f});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
@ -197,7 +197,7 @@ TEST_F(SummaryHistoOpTest, SimpleDouble) {
|
||||
MakeOp(DT_DOUBLE);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"taghisto"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"taghisto"});
|
||||
AddInputFromArray<double>(TensorShape({3, 2}), {0.1, -0.7, 4.1, 4., 5., 4.});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
@ -225,7 +225,7 @@ TEST_F(SummaryHistoOpTest, SimpleHalf) {
|
||||
MakeOp(DT_HALF);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromList<string>(TensorShape({}), {"taghisto"});
|
||||
AddInputFromList<tstring>(TensorShape({}), {"taghisto"});
|
||||
AddInputFromList<Eigen::half>(TensorShape({3, 2}),
|
||||
{0.1, -0.7, 4.1, 4., 5., 4.});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
@ -254,7 +254,7 @@ TEST_F(SummaryHistoOpTest, Error_WrongDimsTags) {
|
||||
MakeOp(DT_FLOAT);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({2, 1}), {"tag1", "tag2"});
|
||||
AddInputFromArray<tstring>(TensorShape({2, 1}), {"tag1", "tag2"});
|
||||
AddInputFromArray<float>(TensorShape({2}), {1.0f, -0.73f});
|
||||
Status s = RunOpKernel();
|
||||
EXPECT_TRUE(absl::StrContains(s.ToString(), "tags must be scalar")) << s;
|
||||
@ -264,7 +264,7 @@ TEST_F(SummaryHistoOpTest, Error_TooManyTagValues) {
|
||||
MakeOp(DT_FLOAT);
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({2}), {"tag1", "tag2"});
|
||||
AddInputFromArray<tstring>(TensorShape({2}), {"tag1", "tag2"});
|
||||
AddInputFromArray<float>(TensorShape({2, 1}), {1.0f, -0.73f});
|
||||
Status s = RunOpKernel();
|
||||
EXPECT_TRUE(absl::StrContains(s.ToString(), "tags must be scalar")) << s;
|
||||
@ -299,7 +299,7 @@ TEST_F(SummaryMergeOpTest, Simple) {
|
||||
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
|
||||
"value { tag: \"tag4\" simple_value: 11.0 }", &s3));
|
||||
|
||||
AddInputFromArray<string>(
|
||||
AddInputFromArray<tstring>(
|
||||
TensorShape({3}),
|
||||
{s1.SerializeAsString(), s2.SerializeAsString(), s3.SerializeAsString()});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
@ -333,9 +333,9 @@ TEST_F(SummaryMergeOpTest, Simple_MultipleInputs) {
|
||||
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
|
||||
"value { tag: \"tag4\" simple_value: 11.0 }", &s3));
|
||||
|
||||
AddInputFromArray<string>(TensorShape({}), {s1.SerializeAsString()});
|
||||
AddInputFromArray<string>(TensorShape({}), {s2.SerializeAsString()});
|
||||
AddInputFromArray<string>(TensorShape({}), {s3.SerializeAsString()});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {s1.SerializeAsString()});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {s2.SerializeAsString()});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {s3.SerializeAsString()});
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
// Check the output size.
|
||||
@ -363,8 +363,8 @@ TEST_F(SummaryMergeOpTest, Error_MismatchedSize) {
|
||||
Summary s2;
|
||||
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
|
||||
"value { tag: \"tagduplicate\" simple_value: 1.0 } ", &s2));
|
||||
AddInputFromArray<string>(TensorShape({2}),
|
||||
{s1.SerializeAsString(), s2.SerializeAsString()});
|
||||
AddInputFromArray<tstring>(TensorShape({2}),
|
||||
{s1.SerializeAsString(), s2.SerializeAsString()});
|
||||
Status s = RunOpKernel();
|
||||
EXPECT_TRUE(absl::StrContains(s.ToString(), "Duplicate tag")) << s;
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ class SummaryTensorOpV2 : public OpKernel {
|
||||
|
||||
Summary s;
|
||||
Summary::Value* v = s.add_value();
|
||||
v->set_tag(tag.scalar<string>()());
|
||||
v->set_tag(string(tag.scalar<tstring>()())); // NOLINT
|
||||
|
||||
if (tensor.dtype() == DT_STRING) {
|
||||
// tensor_util.makeNdarray doesn't work for strings in tensor_content
|
||||
@ -49,11 +49,11 @@ class SummaryTensorOpV2 : public OpKernel {
|
||||
}
|
||||
|
||||
v->mutable_metadata()->ParseFromString(
|
||||
serialized_summary_metadata_tensor.scalar<string>()());
|
||||
serialized_summary_metadata_tensor.scalar<tstring>()());
|
||||
|
||||
Tensor* summary_tensor = nullptr;
|
||||
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
|
||||
CHECK(s.SerializeToString(&summary_tensor->scalar<string>()()));
|
||||
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
|
||||
}
|
||||
};
|
||||
|
||||
@ -92,7 +92,7 @@ class SummaryTensorOp : public OpKernel {
|
||||
|
||||
Tensor* summary_tensor = nullptr;
|
||||
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
|
||||
CHECK(s.SerializeToString(&summary_tensor->scalar<string>()()));
|
||||
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -62,8 +62,8 @@ TEST_F(SummaryTensorOpV2Test, BasicPluginData) {
|
||||
MakeOp();
|
||||
|
||||
// Feed and run
|
||||
AddInputFromArray<string>(TensorShape({}), {"tag_foo"});
|
||||
AddInputFromArray<string>(TensorShape({}), {"some string tensor content"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"tag_foo"});
|
||||
AddInputFromArray<tstring>(TensorShape({}), {"some string tensor content"});
|
||||
|
||||
// Create a SummaryMetadata that stores data for 2 plugins.
|
||||
SummaryMetadata summary_metadata;
|
||||
@ -71,8 +71,8 @@ TEST_F(SummaryTensorOpV2Test, BasicPluginData) {
|
||||
summary_metadata.mutable_plugin_data();
|
||||
plugin_data->set_plugin_name("foo");
|
||||
plugin_data->set_content("content_for_plugin_foo");
|
||||
AddInputFromArray<string>(TensorShape({}),
|
||||
{summary_metadata.SerializeAsString()});
|
||||
AddInputFromArray<tstring>(TensorShape({}),
|
||||
{summary_metadata.SerializeAsString()});
|
||||
|
||||
TF_ASSERT_OK(RunOpKernel());
|
||||
|
||||
|
@ -225,7 +225,7 @@ Status DoTransposeImpl(const Device& d, const Tensor& in,
|
||||
break;
|
||||
|
||||
case DT_STRING:
|
||||
Transpose<Device, string>::run(d, in, perm, out);
|
||||
Transpose<Device, tstring>::run(d, in, perm, out);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -186,7 +186,7 @@ struct Transpose<SYCLDevice, T, conjugate> {
|
||||
};
|
||||
|
||||
template <bool conjugate>
|
||||
struct Transpose<SYCLDevice, string, conjugate> {
|
||||
struct Transpose<SYCLDevice, tstring, conjugate> {
|
||||
static void run(const SYCLDevice& d, const Tensor& in,
|
||||
const gtl::ArraySlice<int32> perm, Tensor* out) {
|
||||
LOG(FATAL) << "DT_STRING not supported on SYCL device.";
|
||||
@ -194,7 +194,7 @@ struct Transpose<SYCLDevice, string, conjugate> {
|
||||
};
|
||||
|
||||
// Explicit instantiation.
|
||||
template struct Transpose<SYCLDevice, string, false>;
|
||||
template struct Transpose<SYCLDevice, tstring, false>;
|
||||
|
||||
INSTANTIATE(SYCLDevice)
|
||||
#undef INSTANTIATE
|
||||
|
@ -201,7 +201,7 @@ struct Transpose<GPUDevice, T, conjugate> {
|
||||
#undef HANDLE_DIM
|
||||
|
||||
template <bool conjugate>
|
||||
struct Transpose<GPUDevice, string, conjugate> {
|
||||
struct Transpose<GPUDevice, tstring, conjugate> {
|
||||
static void run(const GPUDevice& d, const Tensor& in,
|
||||
const gtl::ArraySlice<int32> perm, Tensor* out) {
|
||||
LOG(FATAL) << "Transpose of DT_STRING tensor not supported on GPU.";
|
||||
@ -209,7 +209,7 @@ struct Transpose<GPUDevice, string, conjugate> {
|
||||
};
|
||||
|
||||
// Explicit instantiation.
|
||||
template struct Transpose<GPUDevice, string, false>;
|
||||
template struct Transpose<GPUDevice, tstring, false>;
|
||||
|
||||
template <>
|
||||
Status DoTranspose(const GPUDevice& device, const Tensor& in,
|
||||
|
@ -236,7 +236,7 @@ class UniqueOp : public OpKernel {
|
||||
.TypeConstraint<int64>("out_idx"), \
|
||||
UniqueOp<type, int64>)
|
||||
TF_CALL_REAL_NUMBER_TYPES(REGISTER_UNIQUE);
|
||||
REGISTER_UNIQUE(string)
|
||||
REGISTER_UNIQUE(tstring)
|
||||
REGISTER_UNIQUE(bool)
|
||||
#undef REGISTER_UNIQUE
|
||||
|
||||
|
@ -132,7 +132,7 @@ static void BM_Unique_STRING(int iters, int dim) {
|
||||
.Attr("T", DT_STRING)
|
||||
.Finalize(g, &node));
|
||||
|
||||
testing::BytesProcessed(static_cast<int64>(iters) * dim * sizeof(string));
|
||||
testing::BytesProcessed(static_cast<int64>(iters) * dim * sizeof(tstring));
|
||||
testing::UseRealTime();
|
||||
testing::StartTiming();
|
||||
test::Benchmark("cpu", g).Run(iters);
|
||||
|
@ -79,6 +79,17 @@ inline void SetProtobufStringSwapAllowed(string* src, Cord* dest) {
|
||||
}
|
||||
#endif // defined(TENSORFLOW_PROTOBUF_USES_CORD)
|
||||
|
||||
inline bool SerializeToTString(const protobuf::MessageLite& proto,
|
||||
tstring* output) {
|
||||
#ifdef USE_TSTRING
|
||||
size_t size = proto.ByteSizeLong();
|
||||
output->resize_uninitialized(size);
|
||||
return proto.SerializeToArray(output->data(), size);
|
||||
#else // USE_TSTRING
|
||||
return proto.SerializeToString(output);
|
||||
#endif // USE_TSTRING
|
||||
}
|
||||
|
||||
} // namespace tensorflow
|
||||
|
||||
#endif // TENSORFLOW_CORE_PLATFORM_PROTOBUF_H_
|
||||
|
Loading…
Reference in New Issue
Block a user