TensorFlow: ASSERT_OK and EXPECT_OK are also defined in other projects

that are built with TensorFlow (protobuf), so prefix our macros with
TF_ to make them project specific.
Change: 113197186
This commit is contained in:
Vijay Vasudevan 2016-01-27 13:54:08 -08:00
parent 0cee1d3dac
commit c3c27f275f
45 changed files with 709 additions and 694 deletions

View File

@ -28,6 +28,9 @@
* `GraphOptions.skip_common_subexpression_elimination` has been removed. All
graph optimizer options are now specified via
`GraphOptions.OptimizerOptions`.
* ASSERT_OK / EXPECT_OK macros conflicted with external projects, so they were
renamed TF_ASSERT_OK, TF_EXPECT_OK. The existing macros are currently
maintained for short-term compatibility but will be removed.
## Bug fixes

View File

@ -52,9 +52,9 @@ class ConstantFoldingTest : public ::testing::Test {
TensorShape shape) {
EXPECT_TRUE(n->IsConstant());
const TensorProto* tensor_proto;
EXPECT_OK(GetNodeAttr(n->def(), "value", &tensor_proto));
TF_EXPECT_OK(GetNodeAttr(n->def(), "value", &tensor_proto));
DataType dtype;
EXPECT_OK(GetNodeAttr(n->def(), "dtype", &dtype));
TF_EXPECT_OK(GetNodeAttr(n->def(), "dtype", &dtype));
Tensor t(dtype);
EXPECT_TRUE(t.FromProto(*tensor_proto));
test::ExpectClose(t, test::AsTensor(values, shape));

View File

@ -84,7 +84,7 @@ TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork) {
Initialize({3, 2, -1, 0});
std::unique_ptr<Session> session(CreateSession());
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def_));
TF_ASSERT_OK(session->Create(def_));
std::vector<std::pair<string, Tensor>> inputs;
// Request two targets: one fetch output and one non-fetched output.
@ -92,7 +92,7 @@ TEST_F(DirectSessionMinusAXTest, RunSimpleNetwork) {
std::vector<string> target_nodes = {y_neg_};
std::vector<Tensor> outputs;
Status s = session->Run(inputs, output_names, target_nodes, &outputs);
ASSERT_OK(s);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
// The first output should be initialized and have the correct
@ -107,7 +107,7 @@ TEST_F(DirectSessionMinusAXTest, TestFeed) {
std::unique_ptr<Session> session(CreateSession());
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def_));
TF_ASSERT_OK(session->Create(def_));
// Fill in the input and ask for the output
//
@ -121,7 +121,7 @@ TEST_F(DirectSessionMinusAXTest, TestFeed) {
// Run the graph
Status s = session->Run(inputs, output_names, {}, &outputs);
ASSERT_OK(s);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
auto mat = outputs[0].matrix<float>();
@ -135,7 +135,7 @@ TEST_F(DirectSessionMinusAXTest, TestConcurrency) {
Initialize({1, 2, 3, 4});
std::unique_ptr<Session> session(CreateSession());
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def_));
TF_ASSERT_OK(session->Create(def_));
// Fill in the input and ask for the output
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
@ -172,7 +172,7 @@ TEST_F(DirectSessionMinusAXTest, TestPerSessionThreads) {
std::unique_ptr<Session> session(NewSession(options));
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def_));
TF_ASSERT_OK(session->Create(def_));
// Fill in the input and ask for the output
thread::ThreadPool* tp = new thread::ThreadPool(Env::Default(), "test", 4);
@ -204,7 +204,7 @@ TEST_F(DirectSessionMinusAXTest, TwoCreateCallsFails) {
Initialize({1, 2, 3, 4});
std::unique_ptr<Session> session(CreateSession());
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def_));
TF_ASSERT_OK(session->Create(def_));
// Second is not.
ASSERT_FALSE(session->Create(def_).ok());
@ -239,7 +239,7 @@ TEST_F(DirectSessionMinusAXTest, InvalidDevice) {
std::unique_ptr<Session> session(CreateSession());
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def));
TF_ASSERT_OK(session->Create(def));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> output_names = {y->name() + ":0"};
std::vector<Tensor> outputs;
@ -252,8 +252,8 @@ TEST_F(DirectSessionMinusAXTest, InvalidDevice) {
y->set_assigned_device_name("/job:localhost/replica:0/task:0/cpu:1");
test::graph::ToGraphDef(&graph, &def);
session.reset(CreateSession());
ASSERT_OK(session->Create(def));
ASSERT_OK(session->Run(inputs, output_names, {}, &outputs));
TF_ASSERT_OK(session->Create(def));
TF_ASSERT_OK(session->Run(inputs, output_names, {}, &outputs));
}
TEST(DirectSessionTest, KeepsStateAcrossRunsOfSession) {
@ -278,18 +278,18 @@ TEST(DirectSessionTest, KeepsStateAcrossRunsOfSession) {
std::unique_ptr<Session> session(CreateSession());
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def));
TF_ASSERT_OK(session->Create(def));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<Tensor> outputs;
// Initialize the variable
Status s = session->Run(inputs, {init->name()}, {}, &outputs);
ASSERT_OK(s);
TF_ASSERT_OK(s);
// Get the variable's data
s = session->Run(inputs, {var->name() + ":0"}, {}, &outputs);
ASSERT_OK(s);
TF_ASSERT_OK(s);
ASSERT_EQ(1, outputs.size());
ASSERT_TRUE(outputs[0].IsInitialized());
EXPECT_EQ(20.0, outputs[0].flat<float>()(0));
@ -315,7 +315,7 @@ TEST(DirectSessionTest, MultipleFeedTest) {
std::unique_ptr<Session> session(CreateSession());
ASSERT_TRUE(session != nullptr);
ASSERT_OK(session->Create(def));
TF_ASSERT_OK(session->Create(def));
std::vector<Tensor> outputs;
@ -389,7 +389,7 @@ TEST(DirectSessionTest, DarthKernel) {
GraphDef def;
test::graph::ToGraphDef(&g, &def);
auto sess = CreateSession();
ASSERT_OK(sess->Create(def));
TF_ASSERT_OK(sess->Create(def));
std::vector<Tensor> outputs;
auto s = sess->Run({}, {y->name() + ":0"}, {}, &outputs);
EXPECT_TRUE(errors::IsInternal(s));

View File

@ -38,7 +38,7 @@ class GpuStreamUtilTest : public OpsTestBase {
TEST_F(GpuStreamUtilTest, BogusOpts) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Graph g(OpRegistry::Global());
ASSERT_OK(b.ToGraph(&g));
TF_ASSERT_OK(b.ToGraph(&g));
std::unordered_map<int, int> node_to_stream_id;
gpu_stream_util::AssignStreamsOpts opts;
Status status;
@ -58,10 +58,10 @@ TEST_F(GpuStreamUtilTest, BogusOpts) {
TEST_F(GpuStreamUtilTest, EmptyGraph) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
Graph g(OpRegistry::Global());
ASSERT_OK(b.ToGraph(&g));
TF_ASSERT_OK(b.ToGraph(&g));
std::unordered_map<int, int> node_to_stream_id;
gpu_stream_util::AssignStreamsOpts opts;
ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
TF_ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
EXPECT_EQ(2, node_to_stream_id.size()); // _SOURCE and _SINK
}
@ -70,11 +70,11 @@ TEST_F(GpuStreamUtilTest, SimpleGraphOneStream) {
ops::MatMul(ops::Const(Tensor(DT_FLOAT), b.opts()),
ops::Const(Tensor(DT_FLOAT), b.opts()), b.opts());
Graph g(OpRegistry::Global());
ASSERT_OK(b.ToGraph(&g));
TF_ASSERT_OK(b.ToGraph(&g));
std::unordered_map<int, int> node_to_stream_id;
gpu_stream_util::AssignStreamsOpts opts;
ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
TF_ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
// There should be 5 nodes assigned.
EXPECT_EQ(5, node_to_stream_id.size());
@ -90,12 +90,12 @@ TEST_F(GpuStreamUtilTest, SimpleGraphManyStreams) {
ops::MatMul(ops::Const(Tensor(DT_FLOAT), b.opts()),
ops::Const(Tensor(DT_FLOAT), b.opts()), b.opts());
Graph g(OpRegistry::Global());
ASSERT_OK(b.ToGraph(&g));
TF_ASSERT_OK(b.ToGraph(&g));
std::unordered_map<int, int> node_to_stream_id;
gpu_stream_util::AssignStreamsOpts opts;
opts.max_streams = 3;
ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
TF_ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
// There should be 5 nodes assigned.
EXPECT_EQ(5, node_to_stream_id.size());
@ -115,7 +115,7 @@ TEST_F(GpuStreamUtilTest, StreamOverrides) {
ops::Const(Tensor(DT_FLOAT), b.opts()), b.opts());
ops::_Send(n, "output", "/gpu:0", 0, "/cpu:0", b.opts().WithName("output"));
Graph g(OpRegistry::Global());
ASSERT_OK(b.ToGraph(&g));
TF_ASSERT_OK(b.ToGraph(&g));
// Perform stream assignment using a large number of streams, but with
// op types constrained to specific streams.
@ -126,7 +126,7 @@ TEST_F(GpuStreamUtilTest, StreamOverrides) {
opts.send_stream = 91;
opts.recv_stream = 92;
opts.compute_stream = 93;
ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
TF_ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));
// There should be 7 nodes assigned.
EXPECT_EQ(7, node_to_stream_id.size()); // including _SOURCE and _SINK

View File

@ -238,10 +238,10 @@ TEST_F(SimplePlacerTest, TestNoConstraints) {
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
ops::UnaryOp("TestRelu", ops::NodeOut(input, 0), b.opts().WithName("n1"));
ops::UnaryOp("TestRelu", ops::NodeOut(input, 1), b.opts().WithName("n2"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
EXPECT_OK(Place(&g));
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", DEVICE_CPU);
EXPECT_DEVICE_TYPE(g, "n1", DEVICE_GPU);
EXPECT_DEVICE_TYPE(g, "n2", DEVICE_GPU);
@ -259,10 +259,10 @@ TEST_F(SimplePlacerTest, TestDeviceTypeConstraints) {
ops::BinaryOp("AssignCPU", var_cpu, input, b.opts().WithName("assign_cpu"));
Node* var_gpu = ops::SourceOp("VariableGPU", b.opts().WithName("var_gpu"));
ops::BinaryOp("AssignGPU", var_gpu, input, b.opts().WithName("assign_gpu"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
EXPECT_OK(Place(&g));
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", DEVICE_CPU);
EXPECT_DEVICE_TYPE(g, "var_cpu", DEVICE_CPU);
EXPECT_DEVICE_TYPE(g, "assign_cpu", DEVICE_CPU);
@ -281,10 +281,10 @@ TEST_F(SimplePlacerTest, TestPartialSpec) {
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/job:a"));
ops::SourceOp("TestVariable",
b.opts().WithName("var").WithDevice("/job:a"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
EXPECT_OK(Place(&g));
TF_EXPECT_OK(Place(&g));
EXPECT_DEVICE_TYPE(g, "in", DEVICE_CPU);
EXPECT_DEVICE_CONTAINS(g, "in", "/job:a");
EXPECT_DEVICE_TYPE(g, "var", DEVICE_GPU);
@ -297,13 +297,13 @@ TEST_F(SimplePlacerTest, TestAssignedDevicePreserved) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")
->set_assigned_device_name("/job:a/replica:0/task:0/cpu:7");
EXPECT_OK(Place(&g));
TF_EXPECT_OK(Place(&g));
EXPECT_EQ("/job:a/replica:0/task:0/cpu:7",
GetNodeByName(g, "in")->assigned_device_name());
}
@ -317,12 +317,12 @@ TEST_F(SimplePlacerTest, TestPartialSpecGpuToCpu) {
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/gpu:0"));
ops::SourceOp("TestVariable",
b.opts().WithName("var").WithDevice("/gpu:0"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
SessionOptions options;
options.config.set_allow_soft_placement(true);
EXPECT_OK(Place(&g, &options));
TF_EXPECT_OK(Place(&g, &options));
EXPECT_DEVICE_TYPE(g, "in", DEVICE_CPU);
EXPECT_DEVICE_CONTAINS(g, "in", "/cpu");
EXPECT_DEVICE_TYPE(g, "var", DEVICE_GPU);
@ -336,7 +336,7 @@ TEST_F(SimplePlacerTest, TestAssignedGpuDeviceToCpuDevice) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")
@ -369,7 +369,7 @@ Status SimplePlacerTest::ReferenceTestHelper(const string& variable_op_type,
ops::BinaryOp(assign_op_type, var, input,
b.opts().WithName(strings::StrCat("assign_", i)));
}
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
TF_RETURN_IF_ERROR(Place(&g));
@ -388,25 +388,25 @@ Status SimplePlacerTest::ReferenceTestHelper(const string& variable_op_type,
// (unconstrained, CPU-only, and GPU-only).
TEST_F(SimplePlacerTest, TestReferenceConnection) {
Status s;
EXPECT_OK(ReferenceTestHelper("TestVariable", "TestAssign", DEVICE_GPU));
EXPECT_OK(ReferenceTestHelper("TestVariable", "AssignCPU", DEVICE_CPU));
EXPECT_OK(ReferenceTestHelper("TestVariable", "AssignGPU", DEVICE_GPU));
EXPECT_OK(ReferenceTestHelper("VariableCPU", "TestAssign", DEVICE_CPU));
EXPECT_OK(ReferenceTestHelper("VariableCPU", "AssignCPU", DEVICE_CPU));
TF_EXPECT_OK(ReferenceTestHelper("TestVariable", "TestAssign", DEVICE_GPU));
TF_EXPECT_OK(ReferenceTestHelper("TestVariable", "AssignCPU", DEVICE_CPU));
TF_EXPECT_OK(ReferenceTestHelper("TestVariable", "AssignGPU", DEVICE_GPU));
TF_EXPECT_OK(ReferenceTestHelper("VariableCPU", "TestAssign", DEVICE_CPU));
TF_EXPECT_OK(ReferenceTestHelper("VariableCPU", "AssignCPU", DEVICE_CPU));
{
Status s = ReferenceTestHelper("VariableCPU", "AssignGPU", DEVICE_CPU);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(StringPiece(s.error_message())
.contains("no device type supports both of those nodes"));
}
EXPECT_OK(ReferenceTestHelper("VariableGPU", "TestAssign", DEVICE_GPU));
TF_EXPECT_OK(ReferenceTestHelper("VariableGPU", "TestAssign", DEVICE_GPU));
{
Status s = ReferenceTestHelper("VariableGPU", "AssignCPU", DEVICE_CPU);
EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
EXPECT_TRUE(StringPiece(s.error_message())
.contains("no device type supports both of those nodes"));
}
EXPECT_OK(ReferenceTestHelper("VariableGPU", "AssignGPU", DEVICE_GPU));
TF_EXPECT_OK(ReferenceTestHelper("VariableGPU", "AssignGPU", DEVICE_GPU));
}
// Test the handling of '@node_name' colocation constraints, when
@ -431,10 +431,10 @@ TEST_F(SimplePlacerTest, TestColocatedChain) {
.WithDevice(strings::StrCat("@n_", i - 1)));
}
}
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
EXPECT_OK(Place(&g));
TF_EXPECT_OK(Place(&g));
for (int i = 0; i < 100; ++i) {
if (i % 10 != 0) {
EXPECT_COLOCATED(g, strings::StrCat("n_", i - (i % 1)),
@ -463,10 +463,10 @@ TEST_F(SimplePlacerTest, TestColocatedChainWithLongRangeColocations) {
.WithName(strings::StrCat("n_", i))
.WithDevice(strings::StrCat("@n_", i % 10)));
}
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
EXPECT_OK(Place(&g));
TF_EXPECT_OK(Place(&g));
for (int i = 10; i < 100; ++i) {
EXPECT_COLOCATED(g, strings::StrCat("n_", i % 10),
strings::StrCat("n_", i));
@ -497,10 +497,10 @@ TEST_F(SimplePlacerTest, TestColocationAndReferenceConnections) {
.WithName(strings::StrCat("assign_", i))
.WithDevice(strings::StrCat("@assign_", i % 3)));
}
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
EXPECT_OK(Place(&g));
TF_EXPECT_OK(Place(&g));
for (int i = 0; i < 10; ++i) {
EXPECT_COLOCATED(g, strings::StrCat("var_", i),
strings::StrCat("assign_", i));
@ -521,7 +521,7 @@ TEST_F(SimplePlacerTest, TestEmptyDeviceSet) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
DeviceSet empty;
@ -541,7 +541,7 @@ TEST_F(SimplePlacerTest, TestHeterogeneousDeviceSetFailure) {
Node* var = ops::SourceOp("VariableGPU", b.opts().WithName("var"));
ops::BinaryOp("TestAssign", var, in,
b.opts().WithName("assign").WithDevice("/job:b/task:1"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
DeviceSet heterogeneous;
@ -564,7 +564,7 @@ TEST_F(SimplePlacerTest, TestUnknownDevice) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/job:foo"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
@ -582,7 +582,7 @@ TEST_F(SimplePlacerTest, TestUnknownMergedDevice) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/job:foo"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
@ -600,7 +600,7 @@ TEST_F(SimplePlacerTest, TestUnknownAssignedDevice) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name("/job:foo");
@ -619,7 +619,7 @@ TEST_F(SimplePlacerTest, TestNoKernelsRegistered) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableNoKernels", b.opts().WithName("var"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
@ -637,7 +637,7 @@ TEST_F(SimplePlacerTest, TestNoDevicesRegistered) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableGPU", b.opts().WithName("var"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
DeviceSet cpu_only;
@ -658,7 +658,7 @@ TEST_F(SimplePlacerTest, TestMalformedDeviceSpecification) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("/foo:bar"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
@ -673,7 +673,7 @@ TEST_F(SimplePlacerTest, TestMalformedAssignedDevice) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name("/foo:bar");
@ -691,7 +691,7 @@ TEST_F(SimplePlacerTest, TestNonUniqueAssignedDevice) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
GetNodeByName(g, "in")->set_assigned_device_name("/job:a");
@ -710,7 +710,7 @@ TEST_F(SimplePlacerTest, TestUnknownColocatedNode) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("@foo"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
@ -725,7 +725,7 @@ TEST_F(SimplePlacerTest, TestMalformedColocatedNode) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestInput", b.opts().WithName("in").WithDevice("@"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
@ -741,12 +741,12 @@ TEST_F(SimplePlacerTest, TestNonexistentGpuAllowSoftPlacement) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestDevice", b.opts().WithName("in").WithDevice("/gpu:11"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
SessionOptions options;
options.config.set_allow_soft_placement(true);
EXPECT_OK(Place(&g, &options));
TF_EXPECT_OK(Place(&g, &options));
EXPECT_DEVICE_CONTAINS(g, "in", "/gpu:0");
}
@ -757,7 +757,7 @@ TEST_F(SimplePlacerTest, TestNonexistentGpuNoAllowSoftPlacement) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("TestDevice", b.opts().WithName("in").WithDevice("/gpu:11"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
SessionOptions options;
@ -776,7 +776,7 @@ TEST_F(SimplePlacerTest, TestUnsupportedDeviceNoAllowSoftPlacement) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableGPU", b.opts().WithName("var").WithDevice("/cpu:0"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
SessionOptions options;
@ -793,12 +793,12 @@ TEST_F(SimplePlacerTest, TestUnsupportedDeviceAllowSoftPlacement) {
{ // Scope for temporary variables used to construct g.
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
ops::SourceOp("VariableGPU", b.opts().WithName("var").WithDevice("/cpu:0"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
SessionOptions options;
options.config.set_allow_soft_placement(true);
EXPECT_OK(Place(&g, &options));
TF_EXPECT_OK(Place(&g, &options));
}
// Test that a graph with device type and reference constraints on
@ -820,12 +820,12 @@ TEST_F(SimplePlacerTest, TestDeviceTypeConstraintsAllowSoftPlacement) {
Node* var_cpu = ops::SourceOp("VariableCPU", b.opts().WithName("var_cpu"));
ops::UnaryOp("TestDeviceEnforce", var_cpu,
b.opts().WithName("force_cpu").WithDevice("/gpu:0"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
SessionOptions options;
options.config.set_allow_soft_placement(true);
EXPECT_OK(Place(&g, &options));
TF_EXPECT_OK(Place(&g, &options));
EXPECT_DEVICE_TYPE(g, "var_gpu", DEVICE_GPU);
EXPECT_DEVICE_TYPE(g, "force_gpu", DEVICE_GPU);
EXPECT_COLOCATED(g, "var_gpu", "force_gpu");
@ -843,7 +843,7 @@ TEST_F(SimplePlacerTest, TestUnsatisfiableConstraintWithReferenceConnections) {
Node* var = ops::SourceOp("VariableGPU", b.opts().WithName("var"));
Node* input = ops::SourceOp("TestInput", b.opts().WithName("in"));
ops::BinaryOp("AssignCPU", var, input, b.opts().WithName("assign"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);
@ -865,7 +865,7 @@ TEST_F(SimplePlacerTest, TestUnsatisfiableConstraintWithColocatedNodes) {
b.opts().WithName("relu_1").WithDevice("@in"));
ops::UnaryOp("ReluGPU", relu_1,
b.opts().WithName("relu_2").WithDevice("@relu_1"));
EXPECT_OK(BuildGraph(b, &g));
TF_EXPECT_OK(BuildGraph(b, &g));
}
Status s = Place(&g);

View File

@ -560,14 +560,14 @@ TEST(InstantiateErrors, TypeList_Missing_Arg) {
TEST(FunctionCallFrame, Void_Void) {
FunctionCallFrame frame({}, {});
EXPECT_OK(frame.SetArgs({}));
TF_EXPECT_OK(frame.SetArgs({}));
auto a = test::AsTensor<float>({100});
HasError(frame.SetArgs({a}), "Invalid argument");
Tensor v;
HasError(frame.GetArg(0, &v), "Out of range");
HasError(frame.SetRetval(0, v), "Out of range");
std::vector<Tensor> rets;
EXPECT_OK(frame.GetRetvals(&rets));
TF_EXPECT_OK(frame.GetRetvals(&rets));
EXPECT_EQ(rets.size(), 0);
}
@ -579,14 +579,14 @@ TEST(FunctionCallFrame, Float_Float_Float) {
auto c = test::AsTensor<int64>({300});
HasError(frame.SetArgs({a, c}),
"Invalid argument: Expects arg[1] to be float");
EXPECT_OK(frame.SetArgs({a, b}));
TF_EXPECT_OK(frame.SetArgs({a, b}));
Tensor v;
HasError(frame.GetArg(-1, &v), "Out of range");
HasError(frame.GetArg(2, &v), "Out of range");
EXPECT_OK(frame.GetArg(0, &v));
TF_EXPECT_OK(frame.GetArg(0, &v));
test::ExpectTensorEqual<float>(a, v);
EXPECT_OK(frame.GetArg(1, &v));
TF_EXPECT_OK(frame.GetArg(1, &v));
test::ExpectTensorEqual<float>(b, v);
v = test::AsTensor<float>({-100});
@ -597,10 +597,10 @@ TEST(FunctionCallFrame, Float_Float_Float) {
std::vector<Tensor> rets;
HasError(frame.GetRetvals(&rets), "does not have value");
EXPECT_OK(frame.SetRetval(0, v));
TF_EXPECT_OK(frame.SetRetval(0, v));
HasError(frame.SetRetval(0, v), "has already been set");
EXPECT_OK(frame.GetRetvals(&rets));
TF_EXPECT_OK(frame.GetRetvals(&rets));
EXPECT_EQ(rets.size(), 1);
test::ExpectTensorEqual<float>(rets[0], v);
}

View File

@ -32,7 +32,7 @@ class NodeDefBuilderTest : public ::testing::Test {
protected:
// Specify an OpDef via an OpDefBuilder.
void Op(const OpDefBuilder& op_def_builder) {
EXPECT_OK(op_def_builder.Finalize(&op_def_));
TF_EXPECT_OK(op_def_builder.Finalize(&op_def_));
}
// Resets builder_ with a new NodeDefBuilder using the Op from the last call
@ -50,7 +50,7 @@ class NodeDefBuilderTest : public ::testing::Test {
DataTypeSlice expected_out_types, StringPiece proto) {
NodeDef node_def;
Status status = builder.Finalize(&node_def);
EXPECT_OK(status);
TF_EXPECT_OK(status);
if (!status.ok()) return;
NodeDef expected;
protobuf::TextFormat::ParseFromString(strings::StrCat("name: 'n' ", proto),
@ -60,7 +60,7 @@ class NodeDefBuilderTest : public ::testing::Test {
DataTypeVector in_types, out_types;
status =
InOutTypesForNode(node_def, builder.op_def(), &in_types, &out_types);
EXPECT_OK(status);
TF_EXPECT_OK(status);
if (!status.ok()) return;
EXPECT_EQ(DataTypeSliceString(expected_in_types),
DataTypeVectorString(in_types));
@ -68,7 +68,7 @@ class NodeDefBuilderTest : public ::testing::Test {
DataTypeVectorString(out_types));
status = ValidateNodeDef(node_def, op_def_);
EXPECT_OK(status);
TF_EXPECT_OK(status);
}
// Calls Finalize() and verifies it returns an error.

View File

@ -29,7 +29,7 @@ namespace {
OpDef ToOpDef(const OpDefBuilder& builder) {
OpDef op_def;
EXPECT_OK(builder.Finalize(&op_def));
TF_EXPECT_OK(builder.Finalize(&op_def));
return op_def;
}
@ -41,7 +41,7 @@ NodeDef ToNodeDef(const string& text) {
NodeDef ToNodeDef(const NodeDefBuilder& builder) {
NodeDef node_def;
EXPECT_OK(builder.Finalize(&node_def));
TF_EXPECT_OK(builder.Finalize(&node_def));
return node_def;
}
@ -339,7 +339,7 @@ TEST(NameRangesForNodeTest, Simple) {
NameRangeMap inputs, outputs;
const NodeDef node_def = ToNodeDef(
NodeDefBuilder("simple", &op_def).Input(FakeInput()).Input(FakeInput()));
EXPECT_OK(NameRangesForNode(node_def, op_def, &inputs, &outputs));
TF_EXPECT_OK(NameRangesForNode(node_def, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 2}}}), outputs);
@ -360,7 +360,7 @@ TEST(NameRangesForNodeTest, Polymorphic) {
const NodeDef node_def1 = ToNodeDef(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32)));
EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("poly = Polymorphic[T=DT_INT32](a, b)",
@ -369,7 +369,7 @@ TEST(NameRangesForNodeTest, Polymorphic) {
const NodeDef node_def2 = ToNodeDef(NodeDefBuilder("poly", &op_def)
.Input(FakeInput(DT_BOOL))
.Input(FakeInput(DT_BOOL)));
EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 1}}, {"b", {1, 2}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}}), outputs);
EXPECT_EQ("poly = Polymorphic[T=DT_BOOL](a, b)", SummarizeNodeDef(node_def2));
@ -390,7 +390,7 @@ TEST(NameRangesForNodeTest, NRepeats) {
.Input(FakeInput(4, DT_INT32))
.Input(FakeInput(4, DT_FLOAT))
.Attr("M", 3));
EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 4}}, {"b", {4, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 5}}, {"e", {5, 8}}}),
outputs);
@ -402,7 +402,7 @@ TEST(NameRangesForNodeTest, NRepeats) {
.Input(FakeInput(2, DT_INT32))
.Input(FakeInput(2, DT_DOUBLE))
.Attr("M", 7));
EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 4}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);
@ -430,7 +430,7 @@ TEST(NameRangesForNodeTest, TypeList) {
.Input(FakeInput({DT_BOOL, DT_FLOAT}))
.Input(FakeInput(4, DT_FLOAT))
.Attr("T3", {DT_INT32, DT_DOUBLE, DT_STRING}));
EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
TF_EXPECT_OK(NameRangesForNode(node_def1, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 2}}, {"b", {2, 6}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 4}}, {"d", {4, 7}}, {"e", {7, 9}}}),
outputs);
@ -444,7 +444,7 @@ TEST(NameRangesForNodeTest, TypeList) {
.Input(FakeInput(7, DT_INT32))
.Input(FakeInput({DT_DOUBLE}))
.Attr("T3", {DT_DOUBLE, DT_STRING}));
EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
TF_EXPECT_OK(NameRangesForNode(node_def2, op_def, &inputs, &outputs));
EXPECT_EQ(NameRangeMap({{"a", {0, 7}}, {"b", {7, 8}}}), inputs);
EXPECT_EQ(NameRangeMap({{"c", {0, 1}}, {"d", {1, 3}}, {"e", {3, 10}}}),
outputs);

View File

@ -50,25 +50,25 @@ class OpCompatibilityTest : public OpsTestBase {
void ExpectSuccess(const OpDef& old_op_def) {
// Record the original signature before we change *node_def().
DataTypeVector old_in_types, old_out_types;
ASSERT_OK(InOutTypesForNode(*node_def(), old_op_def, &old_in_types,
&old_out_types));
TF_ASSERT_OK(InOutTypesForNode(*node_def(), old_op_def, &old_in_types,
&old_out_types));
// This should be all that is needed to get compatibility.
const OpDef* new_op_def = RegisteredOpDef();
AddDefaultsToNodeDef(*new_op_def, node_def());
// Validate that it is indeed compatible.
ASSERT_OK(ValidateNodeDef(*node_def(), *new_op_def));
TF_ASSERT_OK(ValidateNodeDef(*node_def(), *new_op_def));
DataTypeVector new_in_types, new_out_types;
ASSERT_OK(InOutTypesForNode(*node_def(), *new_op_def, &new_in_types,
&new_out_types));
TF_ASSERT_OK(InOutTypesForNode(*node_def(), *new_op_def, &new_in_types,
&new_out_types));
ASSERT_EQ(new_in_types, old_in_types);
ASSERT_EQ(new_out_types, old_out_types);
ASSERT_OK(OpDefCompatible(old_op_def, *new_op_def));
TF_ASSERT_OK(OpDefCompatible(old_op_def, *new_op_def));
// Verify the Op actually runs. Result() will return the output.
ASSERT_OK(InitOp());
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(InitOp());
TF_ASSERT_OK(RunOpKernel());
}
string Result() { return GetOutput(0)->scalar<string>()(); }
@ -90,8 +90,8 @@ class OpCompatibilityTest : public OpsTestBase {
const string& compatibility_error) {
// Record the original signature before we change *node_def().
DataTypeVector old_in_types, old_out_types;
ASSERT_OK(InOutTypesForNode(*node_def(), old_op_def, &old_in_types,
&old_out_types));
TF_ASSERT_OK(InOutTypesForNode(*node_def(), old_op_def, &old_in_types,
&old_out_types));
// This should be all that is needed to get compatibility.
const OpDef* new_op_def = RegisteredOpDef();
@ -114,19 +114,19 @@ class OpCompatibilityTest : public OpsTestBase {
const string& compatibility_error) {
// Record the original signature before we change *node_def().
DataTypeVector old_in_types, old_out_types;
ASSERT_OK(InOutTypesForNode(*node_def(), old_op_def, &old_in_types,
&old_out_types));
TF_ASSERT_OK(InOutTypesForNode(*node_def(), old_op_def, &old_in_types,
&old_out_types));
// This should be all that is needed to get compatibility.
const OpDef* new_op_def = RegisteredOpDef();
AddDefaultsToNodeDef(*new_op_def, node_def());
// Validate that it is valid, but with incompatible types.
ASSERT_OK(ValidateNodeDef(*node_def(), *new_op_def));
TF_ASSERT_OK(ValidateNodeDef(*node_def(), *new_op_def));
DataTypeVector new_in_types, new_out_types;
ASSERT_OK(InOutTypesForNode(*node_def(), *new_op_def, &new_in_types,
&new_out_types));
TF_ASSERT_OK(InOutTypesForNode(*node_def(), *new_op_def, &new_in_types,
&new_out_types));
if (new_in_types == old_in_types && new_out_types == old_out_types) {
ADD_FAILURE() << SummarizeNodeDef(*node_def()) << "\n"
<< DataTypeVectorString(new_in_types) << " -> "
@ -151,13 +151,13 @@ REGISTER_OP("Same")
REGISTER_KERNEL_BUILDER(Name("Same").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, Same) {
ASSERT_OK(NodeDefBuilder("same", "Same")
.Input(FakeInput())
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(3))
.Input(FakeInput(3, DT_FLOAT))
.Input(FakeInput(2, DT_BOOL))
.Finalize(node_def()));
TF_ASSERT_OK(NodeDefBuilder("same", "Same")
.Input(FakeInput())
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(3))
.Input(FakeInput(3, DT_FLOAT))
.Input(FakeInput(2, DT_BOOL))
.Finalize(node_def()));
ExpectSuccess(*RegisteredOpDef());
EXPECT_EQ(
"same = Same[N=3, T=DT_FLOAT, TList=[DT_BOOL, DT_BOOL]](a, b, c, c:1, "
@ -171,9 +171,9 @@ REGISTER_KERNEL_BUILDER(Name("AddAttr").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, AddAttr) {
OpDef old_op_def;
ASSERT_OK(
TF_ASSERT_OK(
OpDefBuilder("AddAttr").Output("ndef: string").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("add_attr", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(NodeDefBuilder("add_attr", &old_op_def).Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("add_attr = AddAttr[a=42]()", Result());
}
@ -184,13 +184,13 @@ REGISTER_KERNEL_BUILDER(Name("LessStrict").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, LessStrict) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("LessStrict")
.Output("ndef: string")
.Attr("a: {'A', 'B'}")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("less_strict", &old_op_def)
.Attr("a", "B")
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("LessStrict")
.Output("ndef: string")
.Attr("a: {'A', 'B'}")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("less_strict", &old_op_def)
.Attr("a", "B")
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("less_strict = LessStrict[a=\"B\"]()", Result());
}
@ -202,13 +202,13 @@ REGISTER_KERNEL_BUILDER(Name("RemoveRestriction").Device(DEVICE_CPU),
TEST_F(OpCompatibilityTest, RemoveRestriction) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("RemoveRestriction")
.Output("ndef: string")
.Attr("a: {int32, bool}")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("remove_restriction", &old_op_def)
.Attr("a", DT_INT32)
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("RemoveRestriction")
.Output("ndef: string")
.Attr("a: {int32, bool}")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("remove_restriction", &old_op_def)
.Attr("a", DT_INT32)
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("remove_restriction = RemoveRestriction[a=DT_INT32]()", Result());
}
@ -219,15 +219,15 @@ REGISTER_KERNEL_BUILDER(Name("AttrOrder").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, AttrOrder) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AttrOrder")
.Output("ndef: string")
.Attr("b: bool")
.Attr("a: int")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("attr_order", &old_op_def)
.Attr("b", true)
.Attr("a", 7)
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AttrOrder")
.Output("ndef: string")
.Attr("b: bool")
.Attr("a: int")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("attr_order", &old_op_def)
.Attr("b", true)
.Attr("a", 7)
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("attr_order = AttrOrder[a=7, b=true]()", Result());
}
@ -238,13 +238,13 @@ REGISTER_KERNEL_BUILDER(Name("AddDefault").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, AddDefault) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AddDefault")
.Output("ndef: string")
.Attr("a: int")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("add_default", &old_op_def)
.Attr("a", 765)
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AddDefault")
.Output("ndef: string")
.Attr("a: int")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("add_default", &old_op_def)
.Attr("a", 765)
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("add_default = AddDefault[a=765]()", Result());
}
@ -256,11 +256,12 @@ REGISTER_KERNEL_BUILDER(Name("RemoveDefault").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, RemoveDefault) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("RemoveDefault")
.Output("ndef: string")
.Attr("a: int = 91")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("remove_default", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("RemoveDefault")
.Output("ndef: string")
.Attr("a: int = 91")
.Finalize(&old_op_def));
TF_ASSERT_OK(
NodeDefBuilder("remove_default", &old_op_def).Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("remove_default = RemoveDefault[a=91]()", Result());
}
@ -275,13 +276,13 @@ REGISTER_KERNEL_BUILDER(Name("TypePolymorphic").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, TypePolymorphic) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("TypePolymorphic")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("type_polymorphic", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("TypePolymorphic")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("type_polymorphic", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("type_polymorphic = TypePolymorphic[T=DT_INT32](a)", Result());
}
@ -296,13 +297,13 @@ REGISTER_KERNEL_BUILDER(Name("MakeList").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, MakeList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("MakeList")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("make_list", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("MakeList")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("make_list", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("make_list = MakeList[N=1](a)", Result());
}
@ -319,13 +320,13 @@ REGISTER_KERNEL_BUILDER(Name("MakePolyList").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, MakePolyList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("MakePolyList")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("make_poly_list", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("MakePolyList")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("make_poly_list", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("make_poly_list = MakePolyList[N=1, T=DT_INT32](a)", Result());
}
@ -340,13 +341,13 @@ REGISTER_KERNEL_BUILDER(Name("MakeAnyList").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, MakeAnyList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("MakeAnyList")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("make_any_list", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("MakeAnyList")
.Input("a: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("make_any_list", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("make_any_list = MakeAnyList[T=[DT_INT32]](a)", Result());
}
@ -362,14 +363,14 @@ REGISTER_KERNEL_BUILDER(Name("PolyIntoList").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, PolyIntoList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("PolyIntoList")
.Input("a: T")
.Output("ndef: string")
.Attr("T: type")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("poly_into_list", &old_op_def)
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("PolyIntoList")
.Input("a: T")
.Output("ndef: string")
.Attr("T: type")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("poly_into_list", &old_op_def)
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("poly_into_list = PolyIntoList[N=1, T=DT_INT32](a)", Result());
}
@ -387,15 +388,15 @@ REGISTER_KERNEL_BUILDER(Name("MakeMultipleSameList").Device(DEVICE_CPU),
TEST_F(OpCompatibilityTest, MakeMultipleSameList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("MakeMultipleSameList")
.Input("a: int32")
.Input("b: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("make_list", &old_op_def)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("MakeMultipleSameList")
.Input("a: int32")
.Input("b: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("make_list", &old_op_def)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("make_list = MakeMultipleSameList[N=2](a, b)", Result());
}
@ -411,15 +412,15 @@ REGISTER_KERNEL_BUILDER(Name("MakeMultipleAnyList").Device(DEVICE_CPU),
TEST_F(OpCompatibilityTest, MakeMultipleAnyList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("MakeMultipleAnyList")
.Input("a: int32")
.Input("b: float")
.Output("ndef: string")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("make_list", &old_op_def)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("MakeMultipleAnyList")
.Input("a: int32")
.Input("b: float")
.Output("ndef: string")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("make_list", &old_op_def)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("make_list = MakeMultipleAnyList[T=[DT_INT32, DT_FLOAT]](a, b)",
Result());
@ -431,13 +432,13 @@ REGISTER_KERNEL_BUILDER(Name("ChangeName").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, ChangeName) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("ChangeName")
.Input("x: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("change_name", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("ChangeName")
.Input("x: int32")
.Output("ndef: string")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("change_name", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("change_name = ChangeName[](a)", Result());
}
@ -452,9 +453,9 @@ REGISTER_KERNEL_BUILDER(Name("AddNInts").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, AddNInts) {
OpDef old_op_def;
ASSERT_OK(
TF_ASSERT_OK(
OpDefBuilder("AddNInts").Output("ndef: string").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("add_n_ints", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(NodeDefBuilder("add_n_ints", &old_op_def).Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("add_n_ints = AddNInts[N=0]()", Result());
}
@ -470,9 +471,9 @@ REGISTER_KERNEL_BUILDER(Name("AddNSame").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, AddNSame) {
OpDef old_op_def;
ASSERT_OK(
TF_ASSERT_OK(
OpDefBuilder("AddNSame").Output("ndef: string").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("add_n_same", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(NodeDefBuilder("add_n_same", &old_op_def).Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("add_n_same = AddNSame[N=0, T=DT_BOOL]()", Result());
}
@ -490,14 +491,14 @@ REGISTER_KERNEL_BUILDER(Name("AddNSameAsExisting").Device(DEVICE_CPU),
TEST_F(OpCompatibilityTest, AddNSameAsExisting) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AddNSameAsExisting")
.Input("a: T")
.Output("ndef: string")
.Attr("T: type")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("add_n_same_as_existing", &old_op_def)
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AddNSameAsExisting")
.Input("a: T")
.Output("ndef: string")
.Attr("T: type")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("add_n_same_as_existing", &old_op_def)
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("add_n_same_as_existing = AddNSameAsExisting[N=0, T=DT_STRING](a)",
Result());
@ -513,9 +514,10 @@ REGISTER_KERNEL_BUILDER(Name("AddAnyList").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, AddAnyList) {
OpDef old_op_def;
ASSERT_OK(
TF_ASSERT_OK(
OpDefBuilder("AddAnyList").Output("ndef: string").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("add_any_list", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(
NodeDefBuilder("add_any_list", &old_op_def).Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("add_any_list = AddAnyList[T=[]]()", Result());
}
@ -529,14 +531,14 @@ REGISTER_KERNEL_BUILDER(Name("ShorterAnyList").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, ShorterAnyList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("ShorterAnyList")
.Input("a: T")
.Output("ndef: string")
.Attr("T: list(type) >= 2")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("shorter_any_list", &old_op_def)
.Input(FakeInput(2, DT_BOOL))
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("ShorterAnyList")
.Input("a: T")
.Output("ndef: string")
.Attr("T: list(type) >= 2")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("shorter_any_list", &old_op_def)
.Input(FakeInput(2, DT_BOOL))
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("shorter_any_list = ShorterAnyList[T=[DT_BOOL, DT_BOOL]](a, a:1)",
Result());
@ -550,14 +552,14 @@ REGISTER_KERNEL_BUILDER(Name("ShorterSameList").Device(DEVICE_CPU), TestKernel);
TEST_F(OpCompatibilityTest, ShorterSameList) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("ShorterSameList")
.Input("a: N * int32")
.Output("ndef: string")
.Attr("N: int >= 2")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("shorter_same_list", &old_op_def)
.Input(FakeInput(2))
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("ShorterSameList")
.Input("a: N * int32")
.Output("ndef: string")
.Attr("N: int >= 2")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("shorter_same_list", &old_op_def)
.Input(FakeInput(2))
.Finalize(node_def()));
ExpectSuccess(old_op_def);
EXPECT_EQ("shorter_same_list = ShorterSameList[N=2](a, a:1)", Result());
}
@ -569,8 +571,8 @@ REGISTER_OP("RemoveAttr");
TEST_F(OpCompatibilityTest, RemoveAttrFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("RemoveAttr").Attr("a: int").Finalize(&old_op_def));
ASSERT_OK(
TF_ASSERT_OK(OpDefBuilder("RemoveAttr").Attr("a: int").Finalize(&old_op_def));
TF_ASSERT_OK(
NodeDefBuilder("fails", &old_op_def).Attr("a", 3).Finalize(node_def()));
ExpectInvalid(old_op_def, "NodeDef mentions attr 'a' not in",
"Attr 'a' removed");
@ -581,8 +583,8 @@ REGISTER_OP("AddAttrNoDefault").Attr("a: int");
TEST_F(OpCompatibilityTest, AddAttrNoDefaultFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AddAttrNoDefault").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AddAttrNoDefault").Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
ExpectInvalid(old_op_def, "NodeDef missing attr 'a'",
"Attr 'a' added without default");
}
@ -592,8 +594,8 @@ REGISTER_OP("AddSingleInput").Input("a: int32");
TEST_F(OpCompatibilityTest, AddSingleInputFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AddSingleInput").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AddSingleInput").Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
ExpectInvalid(old_op_def,
"expected inputs 'int32' do not match 0 inputs specified",
"Input signature mismatch '' vs. 'int32'");
@ -612,8 +614,8 @@ REGISTER_OP("AddListBigDefault")
TEST_F(OpCompatibilityTest, AddNIntsBigDefaultFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AddNIntsBigDefault").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AddNIntsBigDefault").Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
ExpectInvalid(old_op_def,
"expected inputs 'int32' do not match 0 inputs specified",
"Input signature mismatch '' vs. 'int32'");
@ -621,8 +623,8 @@ TEST_F(OpCompatibilityTest, AddNIntsBigDefaultFails) {
TEST_F(OpCompatibilityTest, AddNSameBigDefaultFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AddNSameBigDefault").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AddNSameBigDefault").Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
ExpectInvalid(old_op_def,
"expected inputs 'int32' do not match 0 inputs specified",
"Input signature mismatch '' vs. 'int32'");
@ -630,8 +632,8 @@ TEST_F(OpCompatibilityTest, AddNSameBigDefaultFails) {
TEST_F(OpCompatibilityTest, AddListBigDefaultFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AddListBigDefault").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("AddListBigDefault").Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def).Finalize(node_def()));
ExpectInvalid(old_op_def,
"expected inputs 'int32' do not match 0 inputs specified",
"Input signature mismatch '' vs. 'int32'");
@ -643,10 +645,11 @@ REGISTER_OP("ChangeType").Input("a: float");
TEST_F(OpCompatibilityTest, ChangeTypeFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("ChangeType").Input("a: int32").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(
OpDefBuilder("ChangeType").Input("a: int32").Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
ExpectTypeMismatch(old_op_def,
"Input signature mismatch 'int32' vs. 'float'");
}
@ -657,14 +660,14 @@ REGISTER_OP("ChangeOrder").Input("a: float").Input("b: int32");
TEST_F(OpCompatibilityTest, ChangeOrderFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("ChangeOrder")
.Input("b: int32")
.Input("a: float")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("ChangeOrder")
.Input("b: int32")
.Input("a: float")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput())
.Input(FakeInput())
.Finalize(node_def()));
ExpectTypeMismatch(
old_op_def, "Input signature mismatch 'int32, float' vs. 'float, int32'");
}
@ -675,11 +678,11 @@ REGISTER_OP("RemoveInput");
TEST_F(OpCompatibilityTest, RemoveInputFails) {
OpDef old_op_def;
ASSERT_OK(
TF_ASSERT_OK(
OpDefBuilder("RemoveInput").Input("a: float").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput())
.Finalize(node_def()));
ExpectInvalid(old_op_def,
"expected inputs '' do not match 1 inputs specified",
"Input signature mismatch 'float' vs. ''");
@ -691,11 +694,11 @@ REGISTER_OP("ChangeAttrType").Attr("a: int");
TEST_F(OpCompatibilityTest, ChangeAttrTypeFails) {
OpDef old_op_def;
ASSERT_OK(
TF_ASSERT_OK(
OpDefBuilder("ChangeAttrType").Attr("a: bool").Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Attr("a", true)
.Finalize(node_def()));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Attr("a", true)
.Finalize(node_def()));
ExpectInvalid(old_op_def, "value with type 'bool' when 'int' expected",
"Attr 'a' changed type 'bool' -> 'int'");
}
@ -706,9 +709,9 @@ REGISTER_OP("AttrFromList").Attr("a: int");
TEST_F(OpCompatibilityTest, AttrFromListFails) {
OpDef old_op_def;
ASSERT_OK(
TF_ASSERT_OK(
OpDefBuilder("AttrFromList").Attr("a: list(int)").Finalize(&old_op_def));
ASSERT_OK(
TF_ASSERT_OK(
NodeDefBuilder("fails", &old_op_def).Attr("a", {5}).Finalize(node_def()));
ExpectInvalid(old_op_def, "value with type 'list(int)' when 'int' expected",
"Attr 'a' changed type 'list(int)' -> 'int'");
@ -720,8 +723,8 @@ REGISTER_OP("AttrToList").Attr("a: list(int)");
TEST_F(OpCompatibilityTest, AttrToListFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("AttrToList").Attr("a: int").Finalize(&old_op_def));
ASSERT_OK(
TF_ASSERT_OK(OpDefBuilder("AttrToList").Attr("a: int").Finalize(&old_op_def));
TF_ASSERT_OK(
NodeDefBuilder("fails", &old_op_def).Attr("a", 5).Finalize(node_def()));
ExpectInvalid(old_op_def, "value with type 'int' when 'list(int)' expected",
"Attr 'a' changed type 'int' -> 'list(int)'");
@ -733,13 +736,13 @@ REGISTER_OP("PolymorphicToAnyList").Input("a: T").Attr("T: list(type)");
TEST_F(OpCompatibilityTest, PolymorphicToAnyListFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("PolymorphicToAnyList")
.Input("a: T")
.Attr("T: type")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("PolymorphicToAnyList")
.Input("a: T")
.Attr("T: type")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
ExpectInvalid(old_op_def, "value with type 'type' when 'list(type)' expected",
"Attr 'T' changed type 'type' -> 'list(type)'");
}
@ -753,14 +756,14 @@ REGISTER_OP("SameToAnyList")
TEST_F(OpCompatibilityTest, SameToAnyListFails) {
OpDef old_op_def;
ASSERT_OK(OpDefBuilder("SameToAnyList")
.Input("a: N * T")
.Attr("T: type")
.Attr("N: int")
.Finalize(&old_op_def));
ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput(1, DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(OpDefBuilder("SameToAnyList")
.Input("a: N * T")
.Attr("T: type")
.Attr("N: int")
.Finalize(&old_op_def));
TF_ASSERT_OK(NodeDefBuilder("fails", &old_op_def)
.Input(FakeInput(1, DT_INT32))
.Finalize(node_def()));
ExpectInvalid(old_op_def, "value with type 'type' when 'list(type)' expected",
"Attr 'T' changed type 'type' -> 'list(type)'");
}

View File

@ -41,7 +41,7 @@ class OpDefBuilderTest : public ::testing::Test {
void ExpectSuccess(const OpDefBuilder& builder, StringPiece proto) {
OpDef op_def;
Status status = builder.Finalize(&op_def);
EXPECT_OK(status);
TF_EXPECT_OK(status);
if (status.ok()) {
OpDef expected;
protobuf::TextFormat::ParseFromString(
@ -56,7 +56,7 @@ class OpDefBuilderTest : public ::testing::Test {
void ExpectOrdered(const OpDefBuilder& builder, StringPiece proto) {
OpDef op_def;
Status status = builder.Finalize(&op_def);
EXPECT_OK(status);
TF_EXPECT_OK(status);
if (status.ok()) {
OpDef expected;
protobuf::TextFormat::ParseFromString(

View File

@ -39,7 +39,7 @@ class ValidateOpDefTest : public ::testing::Test {
Status TestBuilder(const OpDefBuilder& builder) {
OpDef op_def;
Status status = builder.Finalize(&op_def);
EXPECT_OK(status);
TF_EXPECT_OK(status);
if (!status.ok()) {
return status;
} else {
@ -58,16 +58,16 @@ class ValidateOpDefTest : public ::testing::Test {
};
TEST_F(ValidateOpDefTest, OpDefValid) {
EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Input("a: int32")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Output("a: bool")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("t: type").Input("a: t")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int = 3")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5 = 3")));
EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: numbertype")));
EXPECT_OK(TestBuilder(OpDefBuilder("Uppercase")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Input("a: int32")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Output("a: bool")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("t: type").Input("a: t")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int = 3")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5 = 3")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: numbertype")));
TF_EXPECT_OK(TestBuilder(OpDefBuilder("Uppercase")));
}
TEST_F(ValidateOpDefTest, InvalidName) {
@ -167,16 +167,17 @@ TEST_F(ValidateOpDefTest, BadAttrDefault) {
// default_value {} is indistinguishable from default_value{ list{} } (one
// with an empty list) in proto3 semantics.
EXPECT_OK(
TF_EXPECT_OK(
TestProto("name: 'GoodAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { } }"));
// Empty lists are allowed:
EXPECT_OK(
TF_EXPECT_OK(
TestProto("name: 'GoodAttrDef' attr { name: 'a' "
"type: 'list(int)' default_value { list { } } }"));
// Builder should make the same proto:
EXPECT_OK(TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(int) = []")));
TF_EXPECT_OK(
TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(int) = []")));
// Unless there is a minimum length specified:
ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' "
@ -224,7 +225,7 @@ TEST_F(ValidateOpDefTest, BadAttrMin) {
TestProto("name: 'BadAttrMin' attr { name: 'a' "
"type: 'list(string)' has_minimum: true minimum: -5 }"),
"list type must have a non-negative minimum, not -5");
EXPECT_OK(
TF_EXPECT_OK(
TestProto("name: 'GoodAttrMin' attr { name: 'a' type: 'list(string)' "
"has_minimum: true minimum: 1 }"));
ExpectFailure(TestProto("name: 'NoHasMin' attr { name: 'a' "
@ -235,7 +236,7 @@ TEST_F(ValidateOpDefTest, BadAttrMin) {
TEST_F(ValidateOpDefTest, BadAttrAllowed) {
// Is in list of allowed types.
EXPECT_OK(TestBuilder(
TF_EXPECT_OK(TestBuilder(
OpDefBuilder("GoodAttrtude").Attr("x: numbertype = DT_INT32")));
// Not in list of allowed types.
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude")
@ -246,7 +247,7 @@ TEST_F(ValidateOpDefTest, BadAttrAllowed) {
.Attr("x: list(realnumbertype) = [DT_COMPLEX64]")),
"attr 'x' of complex64 is not in the list of allowed values");
// Is in list of allowed strings.
EXPECT_OK(TestBuilder(
TF_EXPECT_OK(TestBuilder(
OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'")));
// Not in list of allowed strings.
ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude")
@ -300,13 +301,13 @@ TEST_F(ValidateOpDefTest, BadArgType) {
"attr { name: 'n' type: 'int' has_minimum: true minimum: 1 }"),
"Attr 'x' used as type_attr for input 'a' has type list(type)");
// But list(type) is fine as the type of an arg without a number_attr:
EXPECT_OK(TestProto(
TF_EXPECT_OK(TestProto(
"name: 'Arg' input_arg { name: 'a' type_list_attr: 'x' } "
"attr { name: 'x' type: 'list(type)' } attr { name: 'n' type: 'int' "
"has_minimum: true minimum: 1 }"));
// number_attr
EXPECT_OK(TestProto(
TF_EXPECT_OK(TestProto(
"name: 'Arg' input_arg { name: 'a' type: DT_INT32 number_attr: 'n' } "
"attr { name: 'n' type: 'int' has_minimum: true minimum: 0 }"));

View File

@ -182,7 +182,7 @@ TEST_F(OpKernelTest, SuccessBothCpuAndGpu) {
TEST_F(OpKernelTest, CpuTypeRegistered) {
NodeDef ndef = CreateNodeDef("Test1", {DT_FLOAT, DT_INT32});
DeviceTypeVector devs;
ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(1, devs.size());
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[0]);
}
@ -193,7 +193,7 @@ TEST_F(OpKernelTest, CpuAndGpuTypeRegistered) {
// only on CPU.
NodeDef ndef = CreateNodeDef("Test3", {DT_INT8, DT_INT8});
DeviceTypeVector devs;
ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(1, devs.size());
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[0]);
}
@ -202,7 +202,7 @@ TEST_F(OpKernelTest, CpuAndGpuTypeRegistered) {
// only on GPU.
NodeDef ndef = CreateNodeDef("Test3", {DT_FLOAT, DT_FLOAT});
DeviceTypeVector devs;
ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(1, devs.size());
EXPECT_EQ(DeviceType(DEVICE_GPU), devs[0]);
}
@ -210,7 +210,7 @@ TEST_F(OpKernelTest, CpuAndGpuTypeRegistered) {
// Try a node def of an op that is only registered for other types.
NodeDef ndef = CreateNodeDef("Test3", {DT_STRING, DT_STRING});
DeviceTypeVector devs;
ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(0, devs.size());
}
@ -218,7 +218,7 @@ TEST_F(OpKernelTest, CpuAndGpuTypeRegistered) {
// Try a node def of an op that is registered for both.
NodeDef ndef = CreateNodeDef("Test4", {DT_FLOAT});
DeviceTypeVector devs;
ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
TF_ASSERT_OK(SupportedDeviceTypesForNode(DeviceTypes(), ndef, &devs));
EXPECT_EQ(2, devs.size());
EXPECT_EQ(DeviceType(DEVICE_GPU), devs[0]);
EXPECT_EQ(DeviceType(DEVICE_CPU), devs[1]);
@ -295,7 +295,7 @@ TEST_F(OpKernelTest, SaveTempFalse) {
OpKernelContext* ctx = new OpKernelContext(&params);
Tensor t;
EXPECT_OK(ctx->allocate_temp(DT_FLOAT, TensorShape(), &t));
TF_EXPECT_OK(ctx->allocate_temp(DT_FLOAT, TensorShape(), &t));
TensorReferenceVector referenced_tensors;
ctx->retrieve_accessed_tensors(&referenced_tensors);
@ -319,7 +319,7 @@ TEST_F(OpKernelTest, SaveTempTrue) {
OpKernelContext* ctx = new OpKernelContext(&params);
Tensor t;
EXPECT_OK(ctx->allocate_temp(DT_FLOAT, TensorShape(), &t));
TF_EXPECT_OK(ctx->allocate_temp(DT_FLOAT, TensorShape(), &t));
TensorReferenceVector referenced_tensors;
ctx->retrieve_accessed_tensors(&referenced_tensors);
@ -377,7 +377,7 @@ class OpKernelBuilderTest : public ::testing::Test {
// Test SupportedDeviceTypesForNode()
DeviceTypeVector devices;
EXPECT_OK(SupportedDeviceTypesForNode(DeviceTypes(), def, &devices));
TF_EXPECT_OK(SupportedDeviceTypesForNode(DeviceTypes(), def, &devices));
bool found = false;
for (DeviceType dt : devices) {
if (dt == device_type) {
@ -411,7 +411,7 @@ class OpKernelBuilderTest : public ::testing::Test {
// Test SupportedDeviceTypesForNode().
DeviceTypeVector devices;
if (errors::IsNotFound(status)) {
EXPECT_OK(SupportedDeviceTypesForNode(DeviceTypes(), def, &devices));
TF_EXPECT_OK(SupportedDeviceTypesForNode(DeviceTypes(), def, &devices));
for (DeviceType dt : devices) {
EXPECT_NE(dt, device_type);
}
@ -762,20 +762,20 @@ REGISTER_KERNEL_BUILDER(Name("HostMemoryTest")
TEST(MemoryTypesForNode, Simple) {
NodeDef node_def;
ASSERT_OK(NodeDefBuilder("test", "HostMemoryTest")
.Input(FakeInput())
.Input(FakeInput(DT_BOOL))
.Input(FakeInput(3))
.Finalize(&node_def));
TF_ASSERT_OK(NodeDefBuilder("test", "HostMemoryTest")
.Input(FakeInput())
.Input(FakeInput(DT_BOOL))
.Input(FakeInput(3))
.Finalize(&node_def));
MemoryTypeVector input, output;
EXPECT_OK(MemoryTypesForNode(OpRegistry::Global(), DEVICE_CPU, node_def,
&input, &output));
TF_EXPECT_OK(MemoryTypesForNode(OpRegistry::Global(), DEVICE_CPU, node_def,
&input, &output));
EXPECT_EQ(MemoryTypeVector(5, DEVICE_MEMORY), input);
EXPECT_EQ(MemoryTypeVector(3, DEVICE_MEMORY), output);
EXPECT_OK(MemoryTypesForNode(OpRegistry::Global(), DEVICE_GPU, node_def,
&input, &output));
TF_EXPECT_OK(MemoryTypesForNode(OpRegistry::Global(), DEVICE_GPU, node_def,
&input, &output));
EXPECT_EQ(MemoryTypeVector({HOST_MEMORY, DEVICE_MEMORY, HOST_MEMORY,
HOST_MEMORY, HOST_MEMORY}),
input);

View File

@ -85,12 +85,12 @@ TEST_F(OpSegmentTest, Basic) {
for (int i = 0; i < 10; ++i) {
// Register in session A.
auto* ndef = &float_nodedefs_[i];
EXPECT_OK(opseg.FindOrCreate("A", ndef->name(), &op, GetFn(ndef)));
TF_EXPECT_OK(opseg.FindOrCreate("A", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_FLOAT);
// Register in session B.
ndef = &int32_nodedefs_[i];
EXPECT_OK(opseg.FindOrCreate("B", ndef->name(), &op, GetFn(ndef)));
TF_EXPECT_OK(opseg.FindOrCreate("B", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_INT32);
}
@ -99,11 +99,13 @@ TEST_F(OpSegmentTest, Basic) {
};
for (int i = 0; i < 10; ++i) {
// Lookup op in session A.
EXPECT_OK(opseg.FindOrCreate("A", strings::StrCat("op", i), &op, reterr));
TF_EXPECT_OK(
opseg.FindOrCreate("A", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, float_nodedefs_[i], DT_FLOAT);
// Lookup op in session B.
EXPECT_OK(opseg.FindOrCreate("B", strings::StrCat("op", i), &op, reterr));
TF_EXPECT_OK(
opseg.FindOrCreate("B", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, int32_nodedefs_[i], DT_INT32);
}
@ -140,7 +142,7 @@ TEST_F(OpSegmentTest, AddRemoveHolds) {
// Thread1 register the op and wants to ensure it alive.
opseg.AddHold("foo");
EXPECT_OK(opseg.FindOrCreate("foo", ndef.name(), &op, GetFn(&ndef)));
TF_EXPECT_OK(opseg.FindOrCreate("foo", ndef.name(), &op, GetFn(&ndef)));
// Thread2 starts some execution needs "op" to be alive.
opseg.AddHold("foo");

View File

@ -46,7 +46,7 @@ TEST(RendezvousTest, Key) {
"var0;"
"0:0");
Rendezvous::ParsedKey parsed;
EXPECT_OK(Rendezvous::ParseKey(key, &parsed));
TF_EXPECT_OK(Rendezvous::ParseKey(key, &parsed));
EXPECT_EQ(parsed.src_device, "/job:mnist/replica:1/task:2/CPU:0");
EXPECT_EQ(parsed.src_incarnation, 7890);
EXPECT_EQ(parsed.src.type, "CPU");
@ -98,11 +98,11 @@ string V(const Tensor& tensor) {
TEST_F(LocalRendezvousTest, SendRecv) {
Rendezvous::Args args;
ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
TF_ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
EXPECT_TRUE(errors::IsAborted(rendez_->Send("foo", args, V("hello"), false)));
Tensor val(DT_STRING);
bool is_dead = false;
ASSERT_OK(rendez_->Recv("foo", args, &val, &is_dead));
TF_ASSERT_OK(rendez_->Recv("foo", args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
@ -110,12 +110,12 @@ TEST_F(LocalRendezvousTest, RecvSend) {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
TF_ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv("foo", args, &val, &is_dead));
TF_ASSERT_OK(rendez_->Recv("foo", args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
@ -124,16 +124,16 @@ TEST_F(LocalRendezvousTest, DuplicateWaiterRecv) {
Tensor t(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv("foo", args, &t, &is_dead));
ASSERT_OK(rendez_->Send("bar", args, t, is_dead));
TF_ASSERT_OK(rendez_->Recv("foo", args, &t, &is_dead));
TF_ASSERT_OK(rendez_->Send("bar", args, t, is_dead));
});
Env::Default()->SleepForMicroseconds(1000000);
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
EXPECT_TRUE(errors::IsAborted(rendez_->Recv("foo", args, &val, &val_dead)));
ASSERT_OK(rendez_->Send("foo", args, V("secret msg"), val_dead));
ASSERT_OK(rendez_->Recv("bar", args, &val, &val_dead));
TF_ASSERT_OK(rendez_->Send("foo", args, V("secret msg"), val_dead));
TF_ASSERT_OK(rendez_->Recv("bar", args, &val, &val_dead));
EXPECT_EQ("secret msg", V(val));
}
@ -142,15 +142,15 @@ TEST_F(LocalRendezvousTest, DuplicateSerialRecv) {
Tensor t(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv("foo", args, &t, &is_dead));
ASSERT_OK(rendez_->Send("bar", args, t, is_dead));
TF_ASSERT_OK(rendez_->Recv("foo", args, &t, &is_dead));
TF_ASSERT_OK(rendez_->Send("bar", args, t, is_dead));
});
Env::Default()->SleepForMicroseconds(1000000);
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Send("foo", args, V("secret msg"), val_dead));
ASSERT_OK(rendez_->Recv("bar", args, &val, &val_dead));
TF_ASSERT_OK(rendez_->Send("foo", args, V("secret msg"), val_dead));
TF_ASSERT_OK(rendez_->Recv("bar", args, &val, &val_dead));
EXPECT_EQ("secret msg", V(val));
EXPECT_TRUE(errors::IsAborted(rendez_->Recv("foo", args, &val, &val_dead)));
}
@ -174,8 +174,8 @@ TEST_F(LocalRendezvousTest, RandomSendRecv) {
random::SimplePhilox rnd(&philox);
Env::Default()->SleepForMicroseconds(1000 + rnd.Uniform(10000));
Rendezvous::Args args;
ASSERT_OK(rendez_->Send(strings::StrCat(i), args, V(strings::StrCat(i)),
false));
TF_ASSERT_OK(rendez_->Send(strings::StrCat(i), args,
V(strings::StrCat(i)), false));
});
SchedClosure([this, &state, i]() {
random::PhiloxRandom philox(testing::RandomSeed() + N + i, 17);
@ -184,7 +184,7 @@ TEST_F(LocalRendezvousTest, RandomSendRecv) {
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv(strings::StrCat(i), args, &val, &val_dead));
TF_ASSERT_OK(rendez_->Recv(strings::StrCat(i), args, &val, &val_dead));
EXPECT_EQ(strings::StrCat(i), V(val));
bool done = false;
{
@ -255,7 +255,7 @@ TEST_F(LocalRendezvousTest, TransferDummyDeviceContext) {
Rendezvous::Args args;
args.device_context = new DummyDeviceContext(123);
ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
TF_ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
Notification n;
Rendezvous::Args args1;

View File

@ -156,7 +156,7 @@ TEST(TensorSliceTest, SliceTensorShape) {
TensorSlice a = TensorSlice::ParseOrDie("1,1:-:4,1:2,6");
TensorShape x({2, 4, 5, 8});
TensorShape y;
EXPECT_OK(a.SliceTensorShape(x, &y));
TF_EXPECT_OK(a.SliceTensorShape(x, &y));
EXPECT_EQ("[1,4,1,6]", y.DebugString());
}

View File

@ -83,7 +83,7 @@ TEST(AlgorithmTest, ReversePostOrder) {
BinaryOp("TestMul", w2, {input, 1}, b.opts().WithName("t3"));
Graph g(OpRegistry::Global());
ASSERT_OK(b.ToGraph(&g));
TF_ASSERT_OK(b.ToGraph(&g));
std::vector<Node*> order;
// Test reverse post order:

View File

@ -35,13 +35,13 @@ TEST(GraphDefBuilderTest, Version) {
// Check version when we convert to a Graph
Graph graph(OpRegistry::Global());
EXPECT_OK(builder.ToGraph(&graph));
TF_EXPECT_OK(builder.ToGraph(&graph));
ASSERT_EQ(graph.versions().producer(), TF_GRAPH_DEF_VERSION);
ASSERT_EQ(graph.versions().min_consumer(), TF_GRAPH_DEF_VERSION_MIN_CONSUMER);
// Check version when we convert to a GraphDef
GraphDef graph_def;
EXPECT_OK(builder.ToGraphDef(&graph_def));
TF_EXPECT_OK(builder.ToGraphDef(&graph_def));
ASSERT_EQ(graph_def.versions().producer(), TF_GRAPH_DEF_VERSION);
ASSERT_EQ(graph_def.versions().min_consumer(),
TF_GRAPH_DEF_VERSION_MIN_CONSUMER);

View File

@ -31,18 +31,18 @@ TEST(NodeBuilderTest, Simple) {
RequireDefaultOps();
Graph graph(OpRegistry::Global());
Node* source_node;
EXPECT_OK(NodeBuilder("source_op", "Source")
.Attr("out_types", {DT_INT32, DT_STRING})
.Finalize(&graph, &source_node));
TF_EXPECT_OK(NodeBuilder("source_op", "Source")
.Attr("out_types", {DT_INT32, DT_STRING})
.Finalize(&graph, &source_node));
ASSERT_TRUE(source_node != nullptr);
// Try connecting to each of source_node's outputs.
EXPECT_OK(NodeBuilder("sink1", "Sink")
.Input(source_node)
.Finalize(&graph, nullptr));
EXPECT_OK(NodeBuilder("sink2", "Sink")
.Input(source_node, 1)
.Finalize(&graph, nullptr));
TF_EXPECT_OK(NodeBuilder("sink1", "Sink")
.Input(source_node)
.Finalize(&graph, nullptr));
TF_EXPECT_OK(NodeBuilder("sink2", "Sink")
.Input(source_node, 1)
.Finalize(&graph, nullptr));
// Generate an error if the index is out of range.
EXPECT_FALSE(NodeBuilder("sink3", "Sink")

View File

@ -42,7 +42,7 @@ TEST(ValidateGraphDefTest, TestValidGraph) {
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
ASSERT_OK(graph::ValidateGraphDef(graph_def, OpRegistry::Global()));
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedDefaultAttr) {
@ -62,10 +62,10 @@ TEST(ValidateGraphDefTest, GraphWithUnspecifiedDefaultAttr) {
EXPECT_TRUE(StringPiece(s.ToString()).contains("NodeDef missing attr"));
// Add the defaults.
ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, OpRegistry::Global(), 0));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, OpRegistry::Global(), 0));
// Validation should succeed.
ASSERT_OK(graph::ValidateGraphDef(graph_def, OpRegistry::Global()));
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedRequiredAttr) {
@ -85,7 +85,7 @@ TEST(ValidateGraphDefTest, GraphWithUnspecifiedRequiredAttr) {
EXPECT_TRUE(StringPiece(s.ToString()).contains("NodeDef missing attr"));
// Add the defaults.
ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, OpRegistry::Global(), 0));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, OpRegistry::Global(), 0));
// Validation should still fail.
s = graph::ValidateGraphDef(graph_def, OpRegistry::Global());

View File

@ -37,14 +37,14 @@ class AdjustContrastOpTest : public OpsTestBase {
TEST_F(AdjustContrastOpTest, Simple_1113) {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("adjust_constrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("adjust_constrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 1, 3}), {-1, 2, 3});
AddInputFromArray<float>(TensorShape({}), {1.0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 3}));
test::FillValues<float>(&expected, {-1, 2, 3});
@ -53,15 +53,15 @@ TEST_F(AdjustContrastOpTest, Simple_1113) {
TEST_F(AdjustContrastOpTest, Simple_1223) {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("adjust_constrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("adjust_constrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2, 2, 3}),
{1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12});
AddInputFromArray<float>(TensorShape({}), {0.2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 3}));
test::FillValues<float>(&expected, {2.2, 6.2, 10.2, 2.4, 6.4, 10.4, 2.6, 6.6,
@ -70,11 +70,11 @@ TEST_F(AdjustContrastOpTest, Simple_1223) {
}
TEST_F(AdjustContrastOpTest, Big_99x99x3) {
EXPECT_OK(NodeDefBuilder("adjust_constrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("adjust_constrast_op", "AdjustContrastv2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
std::vector<float> values;
for (int i = 0; i < 99 * 99 * 3; ++i) {
@ -83,7 +83,7 @@ TEST_F(AdjustContrastOpTest, Big_99x99x3) {
AddInputFromArray<float>(TensorShape({1, 99, 99, 3}), values);
AddInputFromArray<float>(TensorShape({}), {0.2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
}
} // namespace tensorflow

View File

@ -40,12 +40,12 @@ class CastOpTest : public OpsTestBase {
protected:
void MakeOp(DataType src, DataType dst) {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("cast_op", "Cast")
.Input(FakeInput(src))
.Attr("SrcT", src)
.Attr("DstT", dst)
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("cast_op", "Cast")
.Input(FakeInput(src))
.Attr("SrcT", src)
.Attr("DstT", dst)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
template <typename IN, typename OUT>
@ -54,7 +54,7 @@ class CastOpTest : public OpsTestBase {
DataType out_type = DataTypeToEnum<OUT>::v();
MakeOp(in_type, out_type);
AddInputFromArray<IN>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), out_type, TensorShape({1, 2, 2, 1}));
test::FillValues<OUT>(&expected, {1, 2, 3, 4});
test::ExpectTensorEqual<OUT>(expected, *GetOutput(0));

View File

@ -33,17 +33,17 @@ class RGBToHSVOpTest : public OpsTestBase {
protected:
RGBToHSVOpTest() {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("rgb_to_hsv_op", "RGBToHSV")
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("rgb_to_hsv_op", "RGBToHSV")
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(RGBToHSVOpTest, CheckBlack) {
// Black pixel should map to hsv = [0,0,0]
AddInputFromArray<float>(TensorShape({3}), {0, 0, 0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0.0, 0.0, 0.0});
@ -53,7 +53,7 @@ TEST_F(RGBToHSVOpTest, CheckBlack) {
TEST_F(RGBToHSVOpTest, CheckGray) {
// Gray pixel should have hue = saturation = 0.0, value = r/255
AddInputFromArray<float>(TensorShape({3}), {.5, .5, .5});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0.0, 0.0, .5});
@ -63,7 +63,7 @@ TEST_F(RGBToHSVOpTest, CheckGray) {
TEST_F(RGBToHSVOpTest, CheckWhite) {
// Gray pixel should have hue = saturation = 0.0, value = 1.0
AddInputFromArray<float>(TensorShape({3}), {1, 1, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0.0, 0.0, 1.0});
@ -73,7 +73,7 @@ TEST_F(RGBToHSVOpTest, CheckWhite) {
TEST_F(RGBToHSVOpTest, CheckRedMax) {
// Test case where red channel dominates
AddInputFromArray<float>(TensorShape({3}), {.8, .4, .2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
float expected_h = 1. / 6. * .2 / .6;
float expected_s = .6 / .8;
@ -87,7 +87,7 @@ TEST_F(RGBToHSVOpTest, CheckRedMax) {
TEST_F(RGBToHSVOpTest, CheckGreenMax) {
// Test case where green channel dominates
AddInputFromArray<float>(TensorShape({3}), {.2, .8, .4});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
float expected_h = 1. / 6. * (2.0 + (.2 / .6));
float expected_s = .6 / .8;
@ -101,7 +101,7 @@ TEST_F(RGBToHSVOpTest, CheckGreenMax) {
TEST_F(RGBToHSVOpTest, CheckBlueMax) {
// Test case where blue channel dominates
AddInputFromArray<float>(TensorShape({3}), {.4, .2, .8});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
float expected_h = 1. / 6. * (4.0 + (.2 / .6));
float expected_s = .6 / .8;
@ -114,7 +114,7 @@ TEST_F(RGBToHSVOpTest, CheckBlueMax) {
TEST_F(RGBToHSVOpTest, CheckNegativeDifference) {
AddInputFromArray<float>(TensorShape({3}), {0, .1, .2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
float expected_h = 1. / 6. * (4.0 + (-.1 / .2));
float expected_s = .2 / .2;
@ -129,17 +129,17 @@ class HSVToRGBOpTest : public OpsTestBase {
protected:
HSVToRGBOpTest() {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("hsv_to_rgb_op", "HSVToRGB")
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("hsv_to_rgb_op", "HSVToRGB")
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(HSVToRGBOpTest, CheckBlack) {
// Black pixel should map to rgb = [0,0,0]
AddInputFromArray<float>(TensorShape({3}), {0.0, 0.0, 0.0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0, 0, 0});
@ -149,7 +149,7 @@ TEST_F(HSVToRGBOpTest, CheckBlack) {
TEST_F(HSVToRGBOpTest, CheckGray) {
// Gray pixel should have hue = saturation = 0.0, value = r/255
AddInputFromArray<float>(TensorShape({3}), {0.0, 0.0, .5});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {.5, .5, .5});
@ -159,7 +159,7 @@ TEST_F(HSVToRGBOpTest, CheckGray) {
TEST_F(HSVToRGBOpTest, CheckWhite) {
// Gray pixel should have hue = saturation = 0.0, value = 1.0
AddInputFromArray<float>(TensorShape({3}), {0.0, 0.0, 1.0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {1, 1, 1});
@ -174,7 +174,7 @@ TEST_F(HSVToRGBOpTest, CheckRedMax) {
AddInputFromArray<float>(TensorShape({3}),
{expected_h, expected_s, expected_v});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {.8, .4, .2});
@ -189,7 +189,7 @@ TEST_F(HSVToRGBOpTest, CheckGreenMax) {
AddInputFromArray<float>(TensorShape({3}),
{expected_h, expected_s, expected_v});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {.2, .8, .4});
@ -204,7 +204,7 @@ TEST_F(HSVToRGBOpTest, CheckBlueMax) {
AddInputFromArray<float>(TensorShape({3}),
{expected_h, expected_s, expected_v});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {.4, .2, .8});

View File

@ -30,11 +30,11 @@ class SwitchOpTest : public OpsTestBase {
protected:
void Initialize(DataType dt) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("op", "Switch")
.Input(FakeInput(dt))
.Input(FakeInput())
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("op", "Switch")
.Input(FakeInput(dt))
.Input(FakeInput())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -42,7 +42,7 @@ TEST_F(SwitchOpTest, Int32Success_6_s0) {
Initialize(DT_INT32);
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<bool>(TensorShape({}), {false});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
@ -53,7 +53,7 @@ TEST_F(SwitchOpTest, Int32Success_6_s1) {
Initialize(DT_INT32);
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<bool>(TensorShape({}), {true});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(1));
@ -64,7 +64,7 @@ TEST_F(SwitchOpTest, Int32Success_2_3_s0) {
Initialize(DT_INT32);
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<bool>(TensorShape({}), {false});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
@ -75,7 +75,7 @@ TEST_F(SwitchOpTest, StringSuccess_s1) {
Initialize(DT_STRING);
AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<bool>(TensorShape({}), {true});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<string>(expected, *GetOutput(1));

View File

@ -36,12 +36,12 @@ class DynamicPartitionOpTest : public OpsTestBase {
protected:
void MakeOp() {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "DynamicPartition")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("num_partitions", 4)
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "DynamicPartition")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("num_partitions", 4)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -53,7 +53,7 @@ TEST_F(DynamicPartitionOpTest, Simple_OneD) {
// Feed and run
AddInputFromArray<float>(TensorShape({6}), {0, 13, 2, 39, 4, 17});
AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 3, 2, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output sizes
{ // Output 0
@ -86,7 +86,7 @@ TEST_F(DynamicPartitionOpTest, Simple_TwoD) {
TensorShape({6, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17});
AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 3, 2, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output sizes
{ // Output 0
@ -117,7 +117,7 @@ TEST_F(DynamicPartitionOpTest, SomeOutputsEmpty) {
// Feed and run
AddInputFromArray<float>(TensorShape({6}), {0, 13, 2, 39, 4, 17});
AddInputFromArray<int32>(TensorShape({6}), {0, 0, 2, 2, 0, 2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
TensorShape empty_one_dim;
empty_one_dim.AddDim(0);

View File

@ -38,11 +38,11 @@ class DynamicStitchOpTest : public OpsTestBase {
protected:
void MakeOp(int n, DataType dt) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "DynamicStitch")
.Input(FakeInput(n, DT_INT32))
.Input(FakeInput(n, dt))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "DynamicStitch")
.Input(FakeInput(n, DT_INT32))
.Input(FakeInput(n, dt))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -54,7 +54,7 @@ TEST_F(DynamicStitchOpTest, Simple_OneD) {
AddInputFromArray<int32>(TensorShape({5}), {1, 6, 2, 3, 5});
AddInputFromArray<float>(TensorShape({3}), {0, 40, 70});
AddInputFromArray<float>(TensorShape({5}), {10, 60, 20, 30, 50});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output.
Tensor expected(allocator(), DT_FLOAT, TensorShape({8}));
@ -72,7 +72,7 @@ TEST_F(DynamicStitchOpTest, Simple_TwoD) {
AddInputFromArray<float>(TensorShape({3, 2}), {0, 1, 40, 41, 70, 71});
AddInputFromArray<float>(TensorShape({2, 2}), {10, 11, 60, 61});
AddInputFromArray<float>(TensorShape({3, 2}), {20, 21, 30, 31, 50, 51});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output.
Tensor expected(allocator(), DT_FLOAT, TensorShape({8, 2}));

View File

@ -40,11 +40,11 @@ class GatherOpTest : public OpsTestBase {
protected:
void MakeOp(DataType index_type) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "Gather")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(index_type))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "Gather")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(index_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -54,7 +54,7 @@ TEST_F(GatherOpTest, ScalarIndices) {
// Feed and run
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({}), {3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output.
Tensor expected(allocator(), DT_FLOAT, TensorShape({}));
@ -69,7 +69,7 @@ TEST_F(GatherOpTest, Simple_TwoD32) {
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14});
AddInputFromArray<int32>(TensorShape({4}), {0, 4, 0, 2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output.
Tensor expected(allocator(), DT_FLOAT, TensorShape({4, 3}));
@ -84,7 +84,7 @@ TEST_F(GatherOpTest, Simple_TwoD64) {
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14});
AddInputFromArray<int64>(TensorShape({4}), {0, 4, 0, 2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output.
Tensor expected(allocator(), DT_FLOAT, TensorShape({4, 3}));
@ -98,7 +98,7 @@ TEST_F(GatherOpTest, HighRank) {
// Feed and run
AddInputFromArray<float>(TensorShape({4}), {0, 1, 2, 3});
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 0, 2, 3, 0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));

View File

@ -39,33 +39,33 @@ class IdentityOpTest : public OpsTestBase {
};
TEST_F(IdentityOpTest, Int32Success_6) {
ASSERT_OK(Init(DT_INT32));
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, Int32Success_2_3) {
ASSERT_OK(Init(DT_INT32));
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, StringSuccess) {
ASSERT_OK(Init(DT_STRING));
TF_ASSERT_OK(Init(DT_STRING));
AddInputFromArray<string>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<string>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<string>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, RefInputError) { ASSERT_OK(Init(DT_INT32_REF)); }
TEST_F(IdentityOpTest, RefInputError) { TF_ASSERT_OK(Init(DT_INT32_REF)); }
} // namespace
} // namespace tensorflow

View File

@ -43,56 +43,56 @@ class PrintingGraphTest : public OpsTestBase {
};
TEST_F(PrintingGraphTest, Int32Success_6) {
ASSERT_OK(Init(DT_INT32, DT_INT32));
TF_ASSERT_OK(Init(DT_INT32, DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, Int32Success_Summarize6) {
ASSERT_OK(Init(DT_INT32, DT_INT32, "", -1, 6));
TF_ASSERT_OK(Init(DT_INT32, DT_INT32, "", -1, 6));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, StringSuccess) {
ASSERT_OK(Init(DT_INT32, DT_STRING));
TF_ASSERT_OK(Init(DT_INT32, DT_STRING));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<string>(TensorShape({}), {"foo"});
AddInputFromArray<string>(TensorShape({}), {"bar"});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, MsgSuccess) {
ASSERT_OK(Init(DT_INT32, DT_STRING, "Message: "));
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "Message: "));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<string>(TensorShape({}), {"foo"});
AddInputFromArray<string>(TensorShape({}), {"bar"});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, FirstNSuccess) {
ASSERT_OK(Init(DT_INT32, DT_STRING, "", 3));
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "", 3));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<string>(TensorShape({}), {"foo"});
AddInputFromArray<string>(TensorShape({}), {"bar"});
// run 4 times but we only print 3 as intended
for (int i = 0; i < 4; i++) ASSERT_OK(RunOpKernel());
for (int i = 0; i < 4; i++) TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));

View File

@ -94,17 +94,17 @@ class LRNFloatTest : public OpsTestBase {
};
TEST_F(LRNFloatTest, Depth96) {
ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
.Input(FakeInput())
.Attr("depth_radius", 5)
.Attr("bias", 1.0f)
.Attr("alpha", 0.1f)
.Attr("beta", 2.0f)
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
.Input(FakeInput())
.Attr("depth_radius", 5)
.Attr("bias", 1.0f)
.Attr("alpha", 0.1f)
.Attr("beta", 2.0f)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInput<float>(TensorShape({1, 1, 1, 96}),
[this](int i) -> float { return i + 1; });
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
auto actual = GetOutput(0)->tensor<float, 4>();
// Output for Node 0 with Value 1:
@ -130,17 +130,17 @@ TEST_F(LRNFloatTest, Depth96) {
}
TEST_F(LRNFloatTest, Depth16) {
ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
.Input(FakeInput())
.Attr("depth_radius", 5)
.Attr("bias", 1.0f)
.Attr("alpha", 0.1f)
.Attr("beta", 2.0f)
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN")
.Input(FakeInput())
.Attr("depth_radius", 5)
.Attr("bias", 1.0f)
.Attr("alpha", 0.1f)
.Attr("beta", 2.0f)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInput<float>(TensorShape({1, 1, 1, 16}),
[this](int i) -> float { return i + 1; });
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
auto actual = GetOutput(0)->tensor<float, 4>();
// Output for Node 0 with Value 1:
@ -173,17 +173,17 @@ static double RndGaussian(random::SimplePhilox* rnd) {
#define TCASE(NAME, DEPTH, BATCH, DEPTH_RADIUS, BIAS, ALPHA, BETA) \
TEST_F(LRNFloatTest, NAME) { \
ASSERT_OK(NodeDefBuilder("lrn_op", "LRN") \
.Input(FakeInput()) \
.Attr("depth_radius", (DEPTH_RADIUS)) \
.Attr("bias", (BIAS)) \
.Attr("alpha", ((ALPHA) / 10)) \
.Attr("beta", (BETA)) \
.Finalize(node_def())); \
ASSERT_OK(InitOp()); \
TF_ASSERT_OK(NodeDefBuilder("lrn_op", "LRN") \
.Input(FakeInput()) \
.Attr("depth_radius", (DEPTH_RADIUS)) \
.Attr("bias", (BIAS)) \
.Attr("alpha", ((ALPHA) / 10)) \
.Attr("beta", (BETA)) \
.Finalize(node_def())); \
TF_ASSERT_OK(InitOp()); \
AddInput<float>(TensorShape({BATCH, 1, 1, DEPTH}), \
[this](int i) -> float { return RndGaussian(&rand_); }); \
ASSERT_OK(RunOpKernel()); \
TF_ASSERT_OK(RunOpKernel()); \
EXPECT_TRUE(Compare()); \
}

View File

@ -33,19 +33,19 @@ class RandomCropOpTest : public OpsTestBase {
protected:
RandomCropOpTest() {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("random_crop_op", "RandomCrop")
.Input(FakeInput(DT_UINT8))
.Input(FakeInput(DT_INT64))
.Attr("T", DT_UINT8)
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("random_crop_op", "RandomCrop")
.Input(FakeInput(DT_UINT8))
.Input(FakeInput(DT_INT64))
.Attr("T", DT_UINT8)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(RandomCropOpTest, Basic) {
AddInputFromArray<uint8>(TensorShape({1, 2, 1}), {2, 2});
AddInputFromArray<int64>(TensorShape({2}), {1, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_UINT8, TensorShape({1, 1, 1}));
test::FillValues<uint8>(&expected, {2});
@ -55,7 +55,7 @@ TEST_F(RandomCropOpTest, Basic) {
TEST_F(RandomCropOpTest, SameSizeOneChannel) {
AddInputFromArray<uint8>(TensorShape({2, 1, 1}), {1, 2});
AddInputFromArray<int64>(TensorShape({2}), {2, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_UINT8, TensorShape({2, 1, 1}));
test::FillValues<uint8>(&expected, {1, 2});
@ -65,7 +65,7 @@ TEST_F(RandomCropOpTest, SameSizeOneChannel) {
TEST_F(RandomCropOpTest, SameSizeMultiChannel) {
AddInputFromArray<uint8>(TensorShape({2, 1, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int64>(TensorShape({2}), {2, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_UINT8, TensorShape({2, 1, 3}));
test::FillValues<uint8>(&expected, {1, 2, 3, 4, 5, 6});

View File

@ -33,12 +33,12 @@ class ResizeBilinearOpTest : public OpsTestBase {
protected:
ResizeBilinearOpTest() {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", false)
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", false)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
@ -46,12 +46,12 @@ class ResizeBilinearOpAlignCornersTest : public OpsTestBase {
protected:
ResizeBilinearOpAlignCornersTest() {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", true)
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("resize_bilinear_op", "ResizeBilinear")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
@ -61,7 +61,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear2x2To1x1) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// When scaling down, we have to arbitrarily pick a pixel from the
// original input. In this case, we choose the top/left most pixel.
@ -76,7 +76,7 @@ TEST_F(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To1x1) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// When scaling down, we have to arbitrarily pick a pixel from the
// original input. In this case, we choose the top/left most pixel.
@ -91,7 +91,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear2x2To3x3) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
@ -111,7 +111,7 @@ TEST_F(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners2x2To3x3) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
@ -136,7 +136,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
@ -157,7 +157,7 @@ TEST_F(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
@ -178,7 +178,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear3x3To4x4) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
// clang-format off
@ -202,7 +202,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear4x4To3x3) {
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
@ -226,7 +226,7 @@ TEST_F(ResizeBilinearOpAlignCornersTest, TestBilinearAlignCorners4x4To3x3) {
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
@ -248,7 +248,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear2x2To3x3Batch2) {
// repeated twice
AddInputFromArray<float>(TensorShape({2, 2, 2, 1}), {1, 2, 3, 4, 1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 1}));
// clang-format off
@ -264,7 +264,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear2x2x2To3x3x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 2}),
{1, -1, 2, -2, 3, -3, 4, -4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 2}));
// clang-format off
@ -290,7 +290,7 @@ TEST_F(ResizeBilinearOpTest, TestBilinear2x2To4x4) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
// clang-format off

View File

@ -36,11 +36,11 @@ class ResizeNearestNeighborOpTest : public OpsTestBase {
protected:
ResizeNearestNeighborOpTest() {
RequireDefaultOps();
EXPECT_OK(NodeDefBuilder("resize_nn", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
EXPECT_OK(InitOp());
TF_EXPECT_OK(NodeDefBuilder("resize_nn", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
@ -50,7 +50,7 @@ TEST_F(ResizeNearestNeighborOpTest, TestNearest2x2To1x1) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
@ -67,7 +67,7 @@ TEST_F(ResizeNearestNeighborOpTest, TestNearest2x2To3x3) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
@ -87,7 +87,7 @@ TEST_F(ResizeNearestNeighborOpTest, TestNearest2x2To2x5) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {2, 5});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 5, 1}));
@ -106,7 +106,7 @@ TEST_F(ResizeNearestNeighborOpTest, TestNearest2x2To5x2) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {5, 2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 5, 2, 1}));
@ -128,7 +128,7 @@ TEST_F(ResizeNearestNeighborOpTest, TestNearest2x2To4x4) {
// 3, 4
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
@ -152,7 +152,7 @@ TEST_F(ResizeNearestNeighborOpTest, TestNearest2x2x2x2To2x3x3x2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 2}),
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 2}));

View File

@ -42,12 +42,12 @@ class RestoreOpTest : public OpsTestBase {
// Makes an operation to restore two tensors
void MakeRestoreOp(DataType dt) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "Restore")
.Input(FakeInput())
.Input(FakeInput())
.Attr("dt", dt)
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "Restore")
.Input(FakeInput())
.Input(FakeInput())
.Attr("dt", dt)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -71,13 +71,13 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
// Initialize an operation
NodeDef save;
ASSERT_OK(NodeDefBuilder("myop", "Save")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE,
DT_QINT8, DT_QINT32, DT_UINT8, DT_INT8,
DT_INT16, DT_STRING, DT_COMPLEX64}))
.Finalize(&save));
TF_ASSERT_OK(NodeDefBuilder("myop", "Save")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput({DT_BOOL, DT_INT32, DT_FLOAT, DT_DOUBLE,
DT_QINT8, DT_QINT32, DT_UINT8, DT_INT8,
DT_INT16, DT_STRING, DT_COMPLEX64}))
.Finalize(&save));
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
@ -88,7 +88,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(),
cpu_allocator(), save,
TF_GRAPH_DEF_VERSION, &status));
EXPECT_OK(status);
TF_EXPECT_OK(status);
// Run it
@ -170,7 +170,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
OpKernelContext ctx(&params);
op->Compute(&ctx);
EXPECT_OK(ctx.status());
TF_EXPECT_OK(ctx.status());
}
// Now we restore
@ -182,7 +182,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
[&filename](int x) -> string { return filename; });
AddInput<string>(TensorShape({}),
[&](int x) -> string { return tensor_names[0]; });
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({2});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -194,7 +194,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_INT32);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[1];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({10});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -206,7 +206,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_FLOAT);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[2];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({2, 4});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -218,7 +218,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_DOUBLE);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[3];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({2, 4});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -230,7 +230,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_QINT8);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[4];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({3, 2});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -242,7 +242,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_QINT32);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[5];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({2, 3});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -255,7 +255,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_UINT8);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[6];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({11});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -267,7 +267,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_INT8);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[7];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({7});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -279,7 +279,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_INT16);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[8];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({7});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -291,7 +291,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_INT64);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[9];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({9});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -303,7 +303,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_STRING);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[10];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({2});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -314,7 +314,7 @@ TEST_F(RestoreOpTest, RestoreSimple) {
{
MakeRestoreOp(DT_COMPLEX64);
(*mutable_input(1).tensor).scalar<string>()() = tensor_names[11];
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
TensorShape expected({2, 3});
EXPECT_TRUE(output->shape().IsSameSize(expected));
@ -328,13 +328,13 @@ class RestoreSliceOpTest : public OpsTestBase {
protected:
void MakeRestoreSliceOp(DataType dt) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "RestoreSlice")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Attr("dt", dt)
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "RestoreSlice")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Attr("dt", dt)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -346,11 +346,11 @@ TEST_F(RestoreSliceOpTest, RestoreInt) {
{
// Initialize an operation
NodeDef save;
ASSERT_OK(NodeDefBuilder("save", "Save")
.Input(FakeInput(DT_STRING))
.Input(FakeInput(DT_STRING))
.Input(FakeInput({DT_INT32}))
.Finalize(&save));
TF_ASSERT_OK(NodeDefBuilder("save", "Save")
.Input(FakeInput(DT_STRING))
.Input(FakeInput(DT_STRING))
.Input(FakeInput({DT_INT32}))
.Finalize(&save));
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
@ -361,7 +361,7 @@ TEST_F(RestoreSliceOpTest, RestoreInt) {
std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(),
cpu_allocator(), save,
TF_GRAPH_DEF_VERSION, &status));
EXPECT_OK(status);
TF_EXPECT_OK(status);
// Run it
@ -394,7 +394,7 @@ TEST_F(RestoreSliceOpTest, RestoreInt) {
OpKernelContext ctx(&params);
op->Compute(&ctx);
EXPECT_OK(ctx.status());
TF_EXPECT_OK(ctx.status());
}
// Now we restore
@ -411,7 +411,7 @@ TEST_F(RestoreSliceOpTest, RestoreInt) {
return shape_and_slice;
});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check that we have an integer tensor
Tensor* output = GetOutput(0);

View File

@ -39,12 +39,12 @@ class ReverseOpTest : public OpsTestBase {
protected:
void MakeOp(DataType data_type) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "Reverse")
.Input(FakeInput(data_type))
.Input(FakeInput())
.Attr("T", data_type)
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "Reverse")
.Input(FakeInput(data_type))
.Input(FakeInput())
.Attr("T", data_type)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -52,7 +52,7 @@ TEST_F(ReverseOpTest, Reverse_0) {
MakeOp(DT_FLOAT);
AddInputFromArray<float>(TensorShape({}), {3});
AddInputFromArray<bool>(TensorShape({}), {true});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor* output = GetOutput(0);
Tensor expected(allocator(), DT_FLOAT, TensorShape({}));
@ -71,7 +71,7 @@ TEST_F(ReverseOpTest, Reverse_234) {
15, 16, 17, 18, 19, 20, 21, 22, 23});
AddInputFromArray<bool>(TensorShape({3}), {true, false, true});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor* params_tensor = GetOutput(0);
@ -96,7 +96,7 @@ TEST_F(ReverseOpTest, Reverse_1234) {
15, 16, 17, 18, 19, 20, 21, 22, 23});
AddInputFromArray<bool>(TensorShape({4}), {true, true, false, true});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor* params_tensor = GetOutput(0);

View File

@ -39,7 +39,7 @@ class SaveOpTest : public OpsTestBase {
protected:
void MakeOp() {
RequireDefaultOps();
ASSERT_OK(
TF_ASSERT_OK(
NodeDefBuilder("myop", "Save")
.Input(FakeInput())
.Input(FakeInput())
@ -47,7 +47,7 @@ class SaveOpTest : public OpsTestBase {
DT_QINT32, DT_UINT8, DT_INT8, DT_INT16, DT_INT64,
DT_STRING, DT_COMPLEX64}))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(InitOp());
}
};
@ -111,12 +111,12 @@ TEST_F(SaveOpTest, Simple) {
return complex64(100 + x, 200 + x);
});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check that the checkpoint file is properly written
checkpoint::TensorSliceReader reader(filename,
checkpoint::OpenTableTensorSliceReader);
EXPECT_OK(reader.status());
TF_EXPECT_OK(reader.status());
// We expect to find all saved tensors
{
@ -344,14 +344,14 @@ class SaveSlicesOpTest : public OpsTestBase {
protected:
void MakeOp() {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "SaveSlices")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput(
{DT_INT32, DT_FLOAT, DT_DOUBLE, DT_QINT8, DT_QINT32}))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "SaveSlices")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput(
{DT_INT32, DT_FLOAT, DT_DOUBLE, DT_QINT8, DT_QINT32}))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -407,12 +407,12 @@ TEST_F(SaveSlicesOpTest, Slices) {
return *reinterpret_cast<qint32*>(&x) * qint8(2);
});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check that the checkpoint file is properly written
checkpoint::TensorSliceReader reader(filename,
checkpoint::OpenTableTensorSliceReader);
EXPECT_OK(reader.status());
TF_EXPECT_OK(reader.status());
// We expect to find all saved tensors
{
@ -504,13 +504,13 @@ class SaveOpSlices2Test : public OpsTestBase {
protected:
void MakeOp() {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "SaveSlices")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput({DT_INT32, DT_INT32, DT_FLOAT}))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "SaveSlices")
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput())
.Input(FakeInput({DT_INT32, DT_INT32, DT_FLOAT}))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -551,12 +551,12 @@ TEST_F(SaveOpSlices2Test, TwoSlices) {
AddInput<float>(TensorShape({2, 4}),
[](int x) -> float { return static_cast<float>(x) / 10; });
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check that the checkpoint file is properly written
checkpoint::TensorSliceReader reader(filename,
checkpoint::OpenTableTensorSliceReader);
EXPECT_OK(reader.status());
TF_EXPECT_OK(reader.status());
{
// Reload the two slices of "four_by_sixteen" into that tensor.

View File

@ -40,12 +40,12 @@ class ScatterUpdateOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_ref_type, DataType index_type) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "ScatterUpdate")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterUpdate")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -54,7 +54,7 @@ TEST_F(ScatterUpdateOpTest, Simple_StringType) {
AddInputFromArray<string>(TensorShape({1}), {"Brain"});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<string>(TensorShape({1}), {"TensorFlow"});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_STRING, TensorShape({1}));
@ -67,7 +67,7 @@ TEST_F(ScatterUpdateOpTest, Simple_BoolType) {
AddInputFromArray<bool>(TensorShape({1}), {false});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<bool>(TensorShape({1}), {true});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_BOOL, TensorShape({1}));
@ -84,7 +84,7 @@ TEST_F(ScatterUpdateOpTest, Simple_TwoD32) {
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
@ -103,7 +103,7 @@ TEST_F(ScatterUpdateOpTest, Simple_Two64) {
AddInputFromArray<int64>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
@ -120,7 +120,7 @@ TEST_F(ScatterUpdateOpTest, Simple_ZeroD) {
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {101});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
@ -136,7 +136,7 @@ TEST_F(ScatterUpdateOpTest, Simple_OneD) {
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3}), {100, 101, 102});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
@ -152,7 +152,7 @@ TEST_F(ScatterUpdateOpTest, HigherRank) {
AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({2, 3}), {0, 4, 2, 1, 3, 6});
AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the new state of the input
Tensor params_tensor = *mutable_input(0).tensor;
@ -229,11 +229,11 @@ class ScatterUpdateBM : public ScatterUpdateOpTest {
public:
virtual void TestBody() {}
void MakeBenchmarkOp(const char* op, DataType index_type) {
ASSERT_OK(NodeDefBuilder("myop", op)
.Input(FakeInput(DT_FLOAT_REF))
.Input(FakeInput(index_type))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_ASSERT_OK(NodeDefBuilder("myop", op)
.Input(FakeInput(DT_FLOAT_REF))
.Input(FakeInput(index_type))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
}
};

View File

@ -42,13 +42,13 @@ class SparseToDenseTest : public OpsTestBase {
void SetUp() override { RequireDefaultOps(); }
void MakeOp(int dim, DataType index_type, DataType value_type) {
ASSERT_OK(NodeDefBuilder("sparsetodense", "SparseToDense")
.Input(FakeInput(index_type))
.Input(FakeInput(index_type))
.Input(FakeInput(value_type))
.Input(FakeInput(value_type))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("sparsetodense", "SparseToDense")
.Input(FakeInput(index_type))
.Input(FakeInput(index_type))
.Input(FakeInput(value_type))
.Input(FakeInput(value_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -64,7 +64,7 @@ TEST_F(SparseToDenseTest, OneD_OneValue) {
// default_value
AddInputFromArray<float>(TensorShape({}), {-2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {5});
test::FillValues<float>(&expected, {-2, 2, -2, 2, 2});
@ -83,7 +83,7 @@ TEST_F(SparseToDenseTest, OneD_OneValue_int64_double) {
// default_value
AddInputFromArray<double>(TensorShape({}), {-2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, {5});
test::FillValues<double>(&expected, {-2, 2, -2, 2, 2});
@ -102,7 +102,7 @@ TEST_F(SparseToDenseTest, OneD_MultValues) {
// default_value
AddInputFromArray<float>({}, {-2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {5});
test::FillValues<float>(&expected, {-2, 3, -2, 4, 5});
@ -121,7 +121,7 @@ TEST_F(SparseToDenseTest, TwoD_OneValue) {
// default_value
AddInputFromArray<float>(TensorShape({}), {-2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4});
expected.flat<float>().setConstant(-2);
@ -143,7 +143,7 @@ TEST_F(SparseToDenseTest, TwoD_MultValues) {
// default_value
AddInputFromArray<float>(TensorShape({}), {-2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4});
expected.flat<float>().setConstant(-2);
@ -165,7 +165,7 @@ TEST_F(SparseToDenseTest, ThreeD_OneValue) {
// default_value
AddInputFromArray<float>(TensorShape({}), {-2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4, 2});
expected.flat<float>().setConstant(-2);
@ -187,7 +187,7 @@ TEST_F(SparseToDenseTest, ThreeD_MultValues) {
// default_value
AddInputFromArray<float>(TensorShape({}), {-2});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4, 2});
expected.flat<float>().setConstant(-2);
@ -264,7 +264,7 @@ static void BM_SparseToDense(int iters, const int bm_arg) {
for (int i = 0; i < iters; ++i) {
delete sparse_context->release_output(0).tensor;
op->Compute(sparse_context.get());
ASSERT_OK(sparse_context->status());
TF_ASSERT_OK(sparse_context->status());
}
tensorflow::testing::StopTiming();

View File

@ -51,12 +51,12 @@ class SummaryImageOpTest : public OpsTestBase {
protected:
void MakeOp(int max_images) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "ImageSummary")
.Input(FakeInput())
.Input(FakeInput())
.Attr("max_images", max_images)
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "ImageSummary")
.Input(FakeInput())
.Input(FakeInput())
.Attr("max_images", max_images)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
void CheckAndRemoveEncodedImages(Summary* summary) {
@ -83,7 +83,7 @@ TEST_F(SummaryImageOpTest, ThreeGrayImagesOutOfFive4dInput) {
AddInputFromArray<string>(TensorShape({}), {"tag"});
AddInputFromArray<float>(TensorShape({5, 2, 1, 1}),
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);
@ -106,7 +106,7 @@ TEST_F(SummaryImageOpTest, OneGrayImage4dInput) {
AddInputFromArray<string>(TensorShape({}), {"tag"});
AddInputFromArray<float>(TensorShape({5 /*batch*/, 2, 1, 1 /*depth*/}),
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);
@ -138,7 +138,7 @@ TEST_F(SummaryImageOpTest, OneColorImage4dInput) {
/* r4, c0, RGB */ 1.0, 1.0, 0.0,
/* r4, c1, RGB */ 1.0, 0.0, 1.0,
});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);

View File

@ -49,11 +49,11 @@ class SummaryScalarOpTest : public OpsTestBase {
protected:
void MakeOp(DataType dt) {
RequireDefaultOps();
ASSERT_OK(NodeDefBuilder("myop", "ScalarSummary")
.Input(FakeInput())
.Input(FakeInput(dt))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "ScalarSummary")
.Input(FakeInput())
.Input(FakeInput(dt))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -63,7 +63,7 @@ TEST_F(SummaryScalarOpTest, SimpleFloat) {
// Feed and run
AddInputFromArray<string>(TensorShape({3}), {"tag1", "tag2", "tag3"});
AddInputFromArray<float>(TensorShape({3}), {1.0, -0.73, 10000.0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);
@ -83,7 +83,7 @@ TEST_F(SummaryScalarOpTest, SimpleDouble) {
// Feed and run
AddInputFromArray<string>(TensorShape({3}), {"tag1", "tag2", "tag3"});
AddInputFromArray<double>(TensorShape({3}), {1.0, -0.73, 10000.0});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);
@ -137,11 +137,11 @@ TEST_F(SummaryScalarOpTest, Error_WrongDimsValues) {
class SummaryHistoOpTest : public OpsTestBase {
protected:
void MakeOp(DataType dt) {
ASSERT_OK(NodeDefBuilder("myop", "HistogramSummary")
.Input(FakeInput())
.Input(FakeInput(dt))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "HistogramSummary")
.Input(FakeInput())
.Input(FakeInput(dt))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -151,7 +151,7 @@ TEST_F(SummaryHistoOpTest, SimpleFloat) {
// Feed and run
AddInputFromArray<string>(TensorShape({}), {"taghisto"});
AddInputFromArray<float>(TensorShape({3, 2}), {0.1, -0.7, 4.1, 4., 5., 4.});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);
@ -179,7 +179,7 @@ TEST_F(SummaryHistoOpTest, SimpleDouble) {
// Feed and run
AddInputFromArray<string>(TensorShape({}), {"taghisto"});
AddInputFromArray<double>(TensorShape({3, 2}), {0.1, -0.7, 4.1, 4., 5., 4.});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);
@ -227,10 +227,10 @@ TEST_F(SummaryHistoOpTest, Error_TooManyTagValues) {
class SummaryMergeOpTest : public OpsTestBase {
protected:
void MakeOp(int num_inputs) {
ASSERT_OK(NodeDefBuilder("myop", "MergeSummary")
.Input(FakeInput(num_inputs))
.Finalize(node_def()));
ASSERT_OK(InitOp());
TF_ASSERT_OK(NodeDefBuilder("myop", "MergeSummary")
.Input(FakeInput(num_inputs))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
@ -253,7 +253,7 @@ TEST_F(SummaryMergeOpTest, Simple) {
AddInputFromArray<string>(
TensorShape({3}),
{s1.SerializeAsString(), s2.SerializeAsString(), s3.SerializeAsString()});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);
@ -287,7 +287,7 @@ TEST_F(SummaryMergeOpTest, Simple_MultipleInputs) {
AddInputFromArray<string>(TensorShape({}), {s1.SerializeAsString()});
AddInputFromArray<string>(TensorShape({}), {s2.SerializeAsString()});
AddInputFromArray<string>(TensorShape({}), {s3.SerializeAsString()});
ASSERT_OK(RunOpKernel());
TF_ASSERT_OK(RunOpKernel());
// Check the output size.
Tensor* out_tensor = GetOutput(0);

View File

@ -23,8 +23,8 @@ namespace tensorflow {
TEST(Status, OK) {
EXPECT_EQ(Status::OK().code(), error::OK);
EXPECT_EQ(Status::OK().error_message(), "");
EXPECT_OK(Status::OK());
ASSERT_OK(Status::OK());
TF_EXPECT_OK(Status::OK());
TF_ASSERT_OK(Status::OK());
EXPECT_EQ(Status::OK(), Status());
Status s;
EXPECT_TRUE(s.ok());

View File

@ -19,8 +19,14 @@ limitations under the License.
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
// Macros for testing the results of functions that return util::Status.
// Macros for testing the results of functions that return tensorflow::Status.
#define TF_EXPECT_OK(statement) \
EXPECT_EQ(::tensorflow::Status::OK(), (statement))
#define TF_ASSERT_OK(statement) \
ASSERT_EQ(::tensorflow::Status::OK(), (statement))
// These are deprecated and will be removed, since they conflict with
// macros for related projects (e.g., protobuf).
#define EXPECT_OK(statement) EXPECT_EQ(::tensorflow::Status::OK(), (statement))
#define ASSERT_OK(statement) ASSERT_EQ(::tensorflow::Status::OK(), (statement))

View File

@ -46,14 +46,14 @@ TEST(GetMatchingFiles, Simple) {
EXPECT_EQ(Match(env, "thereisnosuchfile*"), "");
// Populate a few files
EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-00"), ""));
EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-0a"), ""));
EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-01"), ""));
EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-aaa"), ""));
TF_EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-00"), ""));
TF_EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-0a"), ""));
TF_EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-01"), ""));
TF_EXPECT_OK(WriteStringToFile(Env::Default(),
JoinPath(testing::TmpDir(), "match-aaa"), ""));
EXPECT_EQ(Match(env, "match-*"), "match-00,match-01,match-0a,match-aaa");
EXPECT_EQ(Match(env, "match-0[0-9]"), "match-00,match-01");

View File

@ -115,7 +115,7 @@ class RecordioTest : public ::testing::Test {
void Write(const string& msg) {
ASSERT_TRUE(!reading_) << "Write() after starting to read";
ASSERT_OK(writer_->WriteRecord(StringPiece(msg)));
TF_ASSERT_OK(writer_->WriteRecord(StringPiece(msg)));
}
size_t WrittenBytes() const { return dest_.contents_.size(); }

View File

@ -300,11 +300,11 @@ static void MergeDevNamesHelperImpl(const string& name_a, const string& name_b,
const string& expected_merge_name,
bool allow_soft_placement) {
DeviceNameUtils::ParsedName target_a = Name(name_a);
EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_a, Name(name_b),
allow_soft_placement));
TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_a, Name(name_b),
allow_soft_placement));
DeviceNameUtils::ParsedName target_b = Name(name_b);
EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_b, Name(name_a),
allow_soft_placement));
TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_b, Name(name_a),
allow_soft_placement));
EXPECT_EQ(target_a, target_b);
EXPECT_EQ(target_a, Name(expected_merge_name));
EXPECT_EQ(target_b, Name(expected_merge_name));

View File

@ -107,7 +107,7 @@ void SimpleFloatHelper(TensorSliceWriter::CreateBuilderFunction create_function,
// Now we need to read the tensor slices
const string filepattern = strings::StrCat(fname_base, "_*");
TensorSliceReader reader(filepattern, open_function);
EXPECT_OK(reader.status());
TF_EXPECT_OK(reader.status());
EXPECT_EQ(2, reader.num_files());
// We query some of the tensors
@ -231,7 +231,7 @@ void SimpleIntXHelper(TensorSliceWriter::CreateBuilderFunction create_function,
// Now we need to read the tensor slices
const string filepattern = strings::StrCat(fname_base, "_*");
TensorSliceReader reader(filepattern, open_function);
EXPECT_OK(reader.status());
TF_EXPECT_OK(reader.status());
EXPECT_EQ(2, reader.num_files());
// We query some of the tensors