Fix code that ignores tensorflow::Status.

Add a new tensorflow::Status::IgnoreError() method to mark call sites where a Status has been intentionally ignored.
Change: 147402405
This commit is contained in:
Peter Hawkins 2017-02-13 15:34:58 -08:00 committed by TensorFlower Gardener
parent d065a5d984
commit bc225bfaa5
106 changed files with 459 additions and 361 deletions

View File

@ -715,7 +715,7 @@ TF_Buffer* TF_GetAllOpList() {
*(op_list.add_op()) = op; *(op_list.add_op()) = op;
} }
TF_Buffer* ret = TF_NewBuffer(); TF_Buffer* ret = TF_NewBuffer();
MessageToBuffer(op_list, ret); TF_CHECK_OK(MessageToBuffer(op_list, ret));
return ret; return ret;
} }
@ -1166,7 +1166,7 @@ static TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc,
// TODO(b/28152992): Enable returning the result of this // TODO(b/28152992): Enable returning the result of this
// code-path once we have converted all python shape functions // code-path once we have converted all python shape functions
// to call their C++ versions. // to call their C++ versions.
desc->graph->refiner.AddNode(ret); desc->graph->refiner.AddNode(ret).IgnoreError();
// Add the node to the name-to-node mapping. // Add the node to the name-to-node mapping.
desc->graph->name_map[ret->name()] = ret; desc->graph->name_map[ret->name()] = ret;

View File

@ -920,7 +920,7 @@ void WriteCCOps(const OpList& ops, const string& dot_h_fname,
// Load the override map. // Load the override map.
OpGenOverrideMap override_map; OpGenOverrideMap override_map;
if (!overrides_fnames.empty()) { if (!overrides_fnames.empty()) {
override_map.LoadFileList(env, overrides_fnames); TF_CHECK_OK(override_map.LoadFileList(env, overrides_fnames));
} }
// Write the initial boilerplate to the .h and .cc files. // Write the initial boilerplate to the .h and .cc files.

View File

@ -36,7 +36,7 @@ struct SavedModelBundle {
/// resource leaks, we explicitly call Close on Sessions that we create. /// resource leaks, we explicitly call Close on Sessions that we create.
~SavedModelBundle() { ~SavedModelBundle() {
if (session) { if (session) {
session->Close(); session->Close().IgnoreError();
} }
} }

View File

@ -31,8 +31,8 @@ Coordinator::Coordinator(const std::vector<error::Code>& clean_stop_errors)
} }
Coordinator::~Coordinator() { Coordinator::~Coordinator() {
RequestStop(); RequestStop().IgnoreError();
Join(); Join().IgnoreError();
} }
Status Coordinator::RegisterRunner(std::unique_ptr<RunnerInterface> runner) { Status Coordinator::RegisterRunner(std::unique_ptr<RunnerInterface> runner) {

View File

@ -46,7 +46,7 @@ TEST(CoordinatorTest, TestStopAndWaitOnStop) {
Env::Default()->SleepForMicroseconds(10000000); Env::Default()->SleepForMicroseconds(10000000);
EXPECT_EQ(stopped, false); EXPECT_EQ(stopped, false);
coord.RequestStop(); TF_EXPECT_OK(coord.RequestStop());
done.WaitForNotification(); done.WaitForNotification();
EXPECT_EQ(stopped, true); EXPECT_EQ(stopped, true);
EXPECT_EQ(coord.ShouldStop(), true); EXPECT_EQ(coord.ShouldStop(), true);
@ -98,7 +98,7 @@ class MockQueueRunner : public RunnerInterface {
(*counter)++; (*counter)++;
Env::Default()->SleepForMicroseconds(100000); Env::Default()->SleepForMicroseconds(100000);
} }
coord_->RequestStop(); coord_->RequestStop().IgnoreError();
} }
void SetStatusThread(const Status& status, BlockingCounter* counter) { void SetStatusThread(const Status& status, BlockingCounter* counter) {
Env::Default()->SleepForMicroseconds(100000); Env::Default()->SleepForMicroseconds(100000);
@ -118,16 +118,16 @@ TEST(CoordinatorTest, TestRealStop) {
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord)); std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartCounting(&counter, 100); qr1->StartCounting(&counter, 100);
coord.RegisterRunner(std::move(qr1)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord)); std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartCounting(&counter, 100); qr2->StartCounting(&counter, 100);
coord.RegisterRunner(std::move(qr2)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
// Wait until the counting has started // Wait until the counting has started
while (counter.load() == 0) while (counter.load() == 0)
; ;
coord.RequestStop(); TF_EXPECT_OK(coord.RequestStop());
int temp_counter = counter.load(); int temp_counter = counter.load();
Env::Default()->SleepForMicroseconds(10000000); Env::Default()->SleepForMicroseconds(10000000);
@ -142,7 +142,7 @@ TEST(CoordinatorTest, TestRequestStop) {
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
qr.reset(new MockQueueRunner(&coord)); qr.reset(new MockQueueRunner(&coord));
qr->StartCounting(&counter, 10); qr->StartCounting(&counter, 10);
coord.RegisterRunner(std::move(qr)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
} }
coord.WaitForStop(); coord.WaitForStop();
@ -156,12 +156,12 @@ TEST(CoordinatorTest, TestJoin) {
int join_counter = 0; int join_counter = 0;
std::unique_ptr<MockQueueRunner> qr1( std::unique_ptr<MockQueueRunner> qr1(
new MockQueueRunner(&coord, &join_counter)); new MockQueueRunner(&coord, &join_counter));
coord.RegisterRunner(std::move(qr1)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2( std::unique_ptr<MockQueueRunner> qr2(
new MockQueueRunner(&coord, &join_counter)); new MockQueueRunner(&coord, &join_counter));
coord.RegisterRunner(std::move(qr2)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
coord.RequestStop(); TF_EXPECT_OK(coord.RequestStop());
TF_EXPECT_OK(coord.Join()); TF_EXPECT_OK(coord.Join());
EXPECT_EQ(join_counter, 2); EXPECT_EQ(join_counter, 2);
} }
@ -172,25 +172,25 @@ TEST(CoordinatorTest, StatusReporting) {
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord)); std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartSettingStatus(Status(Code::CANCELLED, ""), &counter); qr1->StartSettingStatus(Status(Code::CANCELLED, ""), &counter);
coord.RegisterRunner(std::move(qr1)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord)); std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartSettingStatus(Status(Code::INVALID_ARGUMENT, ""), &counter); qr2->StartSettingStatus(Status(Code::INVALID_ARGUMENT, ""), &counter);
coord.RegisterRunner(std::move(qr2)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
std::unique_ptr<MockQueueRunner> qr3(new MockQueueRunner(&coord)); std::unique_ptr<MockQueueRunner> qr3(new MockQueueRunner(&coord));
qr3->StartSettingStatus(Status(Code::OUT_OF_RANGE, ""), &counter); qr3->StartSettingStatus(Status(Code::OUT_OF_RANGE, ""), &counter);
coord.RegisterRunner(std::move(qr3)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr3)));
counter.Wait(); counter.Wait();
coord.RequestStop(); TF_EXPECT_OK(coord.RequestStop());
EXPECT_EQ(coord.Join().code(), Code::INVALID_ARGUMENT); EXPECT_EQ(coord.Join().code(), Code::INVALID_ARGUMENT);
} }
TEST(CoordinatorTest, JoinWithoutStop) { TEST(CoordinatorTest, JoinWithoutStop) {
Coordinator coord; Coordinator coord;
std::unique_ptr<MockQueueRunner> qr(new MockQueueRunner(&coord)); std::unique_ptr<MockQueueRunner> qr(new MockQueueRunner(&coord));
coord.RegisterRunner(std::move(qr)); TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
EXPECT_EQ(coord.Join().code(), Code::FAILED_PRECONDITION); EXPECT_EQ(coord.Join().code(), Code::FAILED_PRECONDITION);
} }
@ -198,7 +198,7 @@ TEST(CoordinatorTest, JoinWithoutStop) {
TEST(CoordinatorTest, AllRunnersStopped) { TEST(CoordinatorTest, AllRunnersStopped) {
Coordinator coord; Coordinator coord;
MockQueueRunner* qr = new MockQueueRunner(&coord); MockQueueRunner* qr = new MockQueueRunner(&coord);
coord.RegisterRunner(std::unique_ptr<RunnerInterface>(qr)); TF_ASSERT_OK(coord.RegisterRunner(std::unique_ptr<RunnerInterface>(qr)));
EXPECT_FALSE(coord.AllRunnersStopped()); EXPECT_FALSE(coord.AllRunnersStopped());
qr->Stop(); qr->Stop();

View File

@ -77,7 +77,7 @@ Status QueueRunner::Init(const QueueRunnerDef& queue_runner_def) {
QueueRunner::~QueueRunner() { QueueRunner::~QueueRunner() {
// Cannot run Stop() here because the session might already be closed or // Cannot run Stop() here because the session might already be closed or
// destroyed. // destroyed.
Join(); Join().IgnoreError();
} }
Status QueueRunner::Start(Session* sess) { return Start(sess, 0); } Status QueueRunner::Start(Session* sess) { return Start(sess, 0); }
@ -175,7 +175,7 @@ void QueueRunner::Run(Session* sess, const string& enqueue_op) {
} else if (!status.ok()) { } else if (!status.ok()) {
UpdateStatus(status); UpdateStatus(status);
if (coord_) { if (coord_) {
coord_->RequestStop(); coord_->RequestStop().IgnoreError();
} }
} }
} }

View File

@ -293,7 +293,7 @@ TEST(QueueRunnerTest, StartTimeout) {
// This will timeout since queue0 is not fed and queue1 is fetching data from // This will timeout since queue0 is not fed and queue1 is fetching data from
// queue0. // queue0.
EXPECT_EQ(qr->Start(session.get(), 1).code(), Code::DEADLINE_EXCEEDED); EXPECT_EQ(qr->Start(session.get(), 1).code(), Code::DEADLINE_EXCEEDED);
session->Close(); TF_EXPECT_OK(session->Close());
} }
TEST(QueueRunnerTest, TestCoordinatorStop) { TEST(QueueRunnerTest, TestCoordinatorStop) {
@ -317,8 +317,8 @@ TEST(QueueRunnerTest, TestCoordinatorStop) {
TF_EXPECT_OK(QueueRunner::New(queue_runner1, &coord, &qr1)); TF_EXPECT_OK(QueueRunner::New(queue_runner1, &coord, &qr1));
TF_CHECK_OK(qr1->Start(session.get())); TF_CHECK_OK(qr1->Start(session.get()));
coord.RegisterRunner(std::move(qr0)); TF_EXPECT_OK(coord.RegisterRunner(std::move(qr0)));
coord.RegisterRunner(std::move(qr1)); TF_EXPECT_OK(coord.RegisterRunner(std::move(qr1)));
std::vector<Tensor> dq; std::vector<Tensor> dq;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq)); TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq));
@ -340,7 +340,7 @@ TEST(QueueRunnerTest, CallbackCalledOnError) {
bool error_caught = false; bool error_caught = false;
qr->AddErrorCallback([&error_caught](const Status&) { error_caught = true; }); qr->AddErrorCallback([&error_caught](const Status&) { error_caught = true; });
TF_EXPECT_OK(qr->Start(session.get())); TF_EXPECT_OK(qr->Start(session.get()));
qr->Join(); EXPECT_FALSE(qr->Join().ok());
EXPECT_TRUE(error_caught); EXPECT_TRUE(error_caught);
} }

View File

@ -168,7 +168,7 @@ TEST(EncapsulateSubgraphsTest, NoFunctions) {
GraphDef graphdef_in; GraphDef graphdef_in;
FunctionDefLibrary library_in; FunctionDefLibrary library_in;
builder.ToGraphDef(&graphdef_in); TF_EXPECT_OK(builder.ToGraphDef(&graphdef_in));
*library_in.add_function() = test::function::XTimesTwo(); *library_in.add_function() = test::function::XTimesTwo();
GraphDef graphdef_out = graphdef_in; GraphDef graphdef_out = graphdef_in;
@ -195,7 +195,7 @@ TEST(EncapsulateSubgraphsTest, OneFunction) {
Node* d = Binary(b, c, b1.opts().WithName("c").WithControlInput(c).WithAttr( Node* d = Binary(b, c, b1.opts().WithName("c").WithControlInput(c).WithAttr(
"_encapsulate", "F1")); "_encapsulate", "F1"));
Binary(a, d, b1.opts().WithName("E")); Binary(a, d, b1.opts().WithName("E"));
b1.ToGraphDef(&graphdef); TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
} }
TF_EXPECT_OK(Encapsulate(&graphdef, &library)); TF_EXPECT_OK(Encapsulate(&graphdef, &library));
@ -224,7 +224,7 @@ TEST(EncapsulateSubgraphsTest, OneFunction) {
Node* call = b2.opts().FinalizeBuilder(&node_builder); Node* call = b2.opts().FinalizeBuilder(&node_builder);
Binary(a, call, b2.opts().WithName("E")); Binary(a, call, b2.opts().WithName("E"));
b2.ToGraphDef(&graphdef_expected); TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
} }
// If there are no marked nodes, funcification should be a no-op. // If there are no marked nodes, funcification should be a no-op.
@ -251,7 +251,7 @@ TEST(EncapsulateSubgraphsTest, TwoFunctions) {
Binary(b, c, b1.opts().WithName("D").WithControlInput(control).WithAttr( Binary(b, c, b1.opts().WithName("D").WithControlInput(control).WithAttr(
"_encapsulate", "F2")); "_encapsulate", "F2"));
Binary(a, d, b1.opts().WithName("E")); Binary(a, d, b1.opts().WithName("E"));
b1.ToGraphDef(&graphdef); TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
} }
TF_EXPECT_OK(Encapsulate(&graphdef, &library)); TF_EXPECT_OK(Encapsulate(&graphdef, &library));
@ -290,7 +290,7 @@ TEST(EncapsulateSubgraphsTest, TwoFunctions) {
Node* call2 = b2.opts().FinalizeBuilder(&nb2); Node* call2 = b2.opts().FinalizeBuilder(&nb2);
Binary(a, call2, b2.opts().WithName("E")); Binary(a, call2, b2.opts().WithName("E"));
b2.ToGraphDef(&graphdef_expected); TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
} }
// If there are no marked nodes, funcification should be a no-op. // If there are no marked nodes, funcification should be a no-op.

View File

@ -126,8 +126,8 @@ Status GraphToFunctionDef(const Graph& graph, const string& name,
if (node->type_string() == kArgOp) { if (node->type_string() == kArgOp) {
int index; int index;
DataType type; DataType type;
GetNodeAttr(node->def(), "T", &type); TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "T", &type));
GetNodeAttr(node->def(), "index", &index); TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "index", &index));
while (fdef->signature().input_arg_size() <= index) { while (fdef->signature().input_arg_size() <= index) {
fdef->mutable_signature()->add_input_arg(); fdef->mutable_signature()->add_input_arg();
} }
@ -143,8 +143,8 @@ Status GraphToFunctionDef(const Graph& graph, const string& name,
if (node->type_string() == kRetValOp) { if (node->type_string() == kRetValOp) {
int index; int index;
DataType type; DataType type;
GetNodeAttr(node->def(), "T", &type); TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "T", &type));
GetNodeAttr(node->def(), "index", &index); TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "index", &index));
while (fdef->signature().output_arg_size() <= index) { while (fdef->signature().output_arg_size() <= index) {
fdef->mutable_signature()->add_output_arg(); fdef->mutable_signature()->add_output_arg();
} }

View File

@ -54,7 +54,7 @@ TEST(GraphToFunctionDefTest, Basics) {
auto h = ops::_Retval(root.WithOpName("H"), g, 0); auto h = ops::_Retval(root.WithOpName("H"), g, 0);
GraphDef graph_def; GraphDef graph_def;
root.ToGraphDef(&graph_def); TF_EXPECT_OK(root.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphConstructorOptions options; GraphConstructorOptions options;

View File

@ -77,7 +77,7 @@ TEST(XlaCompilationTest, Chains) {
ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D")); ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D"));
Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E")); Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E"));
ops::UnaryOp("Relu", e, builder.opts().WithName("F")); ops::UnaryOp("Relu", e, builder.opts().WithName("F"));
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph); MarkForCompilation(&graph);
@ -102,7 +102,7 @@ TEST(XlaCompilationTest, UncompilableCycles) {
Node* b = Node* b =
ops::UnaryOp("UncompilableUnary", a, builder.opts().WithName("B")); ops::UnaryOp("UncompilableUnary", a, builder.opts().WithName("B"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C")); ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C"));
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph); MarkForCompilation(&graph);
@ -122,7 +122,7 @@ TEST(XlaCompilationTest, CompilableCycles) {
.WithAttr("value", Tensor())); .WithAttr("value", Tensor()));
Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B")); Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C")); ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C"));
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph); MarkForCompilation(&graph);
@ -145,7 +145,7 @@ TEST(XlaCompilationTest, UnsupportedTypes) {
.WithAttr("value", Tensor(DT_COMPLEX64, TensorShape()))); .WithAttr("value", Tensor(DT_COMPLEX64, TensorShape())));
Node* b = ops::UnaryOp("Neg", a, builder.opts().WithName("B")); Node* b = ops::UnaryOp("Neg", a, builder.opts().WithName("B"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C")); ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C"));
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph); MarkForCompilation(&graph);
@ -174,7 +174,7 @@ TEST(XlaCompilationTest, ConcatWithConstArg) {
concat_builder.Input(dim).Input({a, a}).Attr("N", 2); concat_builder.Input(dim).Input({a, a}).Attr("N", 2);
builder.opts().FinalizeBuilder(&concat_builder); builder.opts().FinalizeBuilder(&concat_builder);
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph); MarkForCompilation(&graph);
@ -201,7 +201,7 @@ TEST(XlaCompilationTest, FunctionCalls) {
Node* b = ops::BinaryOp("CompilableFn", a, a, builder.opts().WithName("B")); Node* b = ops::BinaryOp("CompilableFn", a, a, builder.opts().WithName("B"));
Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C")); Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C"));
ops::UnaryOp("UncompilableFn", c, builder.opts().WithName("D")); ops::UnaryOp("UncompilableFn", c, builder.opts().WithName("D"));
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph, &flib_def); MarkForCompilation(&graph, &flib_def);
@ -231,8 +231,8 @@ TEST(XlaCompilationTest, MetadataOpsDontStartClusters) {
Node* b = ops::UnaryOp("Shape", a, builder.opts().WithName("B")); Node* b = ops::UnaryOp("Shape", a, builder.opts().WithName("B"));
Node* c = ops::UnaryOp("Rank", b, builder.opts().WithName("C")); Node* c = ops::UnaryOp("Rank", b, builder.opts().WithName("C"));
Node* d = ops::UnaryOp("Size", c, builder.opts().WithName("D")); Node* d = ops::UnaryOp("Size", c, builder.opts().WithName("D"));
ops::UnaryOp("Shape", d, builder.opts().WithName("C")); ops::UnaryOp("Shape", d, builder.opts().WithName("E"));
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph); MarkForCompilation(&graph);
auto clusters = GetClusters(*graph); auto clusters = GetClusters(*graph);
@ -318,7 +318,7 @@ TEST(XlaCompilationTest, SymbolicGradients) {
d_builder.Input({c, c}); d_builder.Input({c, c});
builder.opts().FinalizeBuilder(&d_builder); builder.opts().FinalizeBuilder(&d_builder);
builder.ToGraph(graph.get()); TF_EXPECT_OK(builder.ToGraph(graph.get()));
} }
MarkForCompilation(&graph); MarkForCompilation(&graph);
@ -344,7 +344,7 @@ TEST(XlaCompilationTest, Loops) {
auto d = ops::Add(root.WithOpName("D"), c, exit); auto d = ops::Add(root.WithOpName("D"), c, exit);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
root.ToGraph(graph.get()); TF_EXPECT_OK(root.ToGraph(graph.get()));
MarkForCompilation(&graph); MarkForCompilation(&graph);
auto clusters = GetClusters(*graph); auto clusters = GetClusters(*graph);

View File

@ -43,7 +43,7 @@ class RetvalOp : public XlaOpKernel {
if (frame) { if (frame) {
// If 'frame' is non-null, this is an inner function call inside a JIT // If 'frame' is non-null, this is an inner function call inside a JIT
// compilation. // compilation.
frame->SetRetval(index_, input); OP_REQUIRES_OK(ctx, frame->SetRetval(index_, input));
} else { } else {
xla::ComputationDataHandle input = ctx->Input(0); xla::ComputationDataHandle input = ctx->Input(0);
const TensorShape input_shape = ctx->InputShape(0); const TensorShape input_shape = ctx->InputShape(0);
@ -58,7 +58,7 @@ class RetvalOp : public XlaOpKernel {
if (input_shape.num_elements() == 0 || is_constant.ValueOrDie()) { if (input_shape.num_elements() == 0 || is_constant.ValueOrDie()) {
xla::Literal literal; xla::Literal literal;
OP_REQUIRES_OK(ctx, ctx->ConstantInput(0, &literal)); OP_REQUIRES_OK(ctx, ctx->ConstantInput(0, &literal));
tc.AddConstRetval(index_, dtype_, literal); OP_REQUIRES_OK(ctx, tc.AddConstRetval(index_, dtype_, literal));
} else { } else {
tc.AddRetval(index_, input); tc.AddRetval(index_, input);
} }

View File

@ -276,20 +276,19 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
TF_RETURN_IF_ERROR(root->Accept(&fused_emitter)); TF_RETURN_IF_ERROR(root->Accept(&fused_emitter));
Shape input_shape = root->operand(0)->shape(); Shape input_shape = root->operand(0)->shape();
// EmitRedutionToVector requires the input shape to have a layout, but // EmitReductionToVector requires the input shape to have a layout, but
// fused instructions don't have one. So we determine its layout from // fused instructions don't have one. So we determine its layout from
// the fusion's operands. The choice of the layout only affects // the fusion's operands. The choice of the layout only affects
// performance but not correctness. // performance but not correctness.
auto choose_input_layout = []( auto choose_input_layout = [](
tensorflow::gtl::ArraySlice<const HloInstruction*> operands, tensorflow::gtl::ArraySlice<const HloInstruction*> operands,
Shape* input_shape) { Shape* input_shape) -> Status {
// Prefer the layout of an operand whose shape is compatible with // Prefer the layout of an operand whose shape is compatible with
// input_shape. // input_shape.
for (const HloInstruction* operand : operands) { for (const HloInstruction* operand : operands) {
if (ShapeUtil::Compatible(*input_shape, operand->shape())) { if (ShapeUtil::Compatible(*input_shape, operand->shape())) {
LayoutUtil::CopyLayoutBetweenShapes(operand->shape(), return LayoutUtil::CopyLayoutBetweenShapes(operand->shape(),
input_shape); input_shape);
return;
} }
} }
// If no operand has a compatible shape, prefer an operand that has // If no operand has a compatible shape, prefer an operand that has
@ -300,19 +299,20 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
// Do not use CopyLayoutBetweenShapes because input_shape and // Do not use CopyLayoutBetweenShapes because input_shape and
// operand->shape() may be incompatible. // operand->shape() may be incompatible.
*input_shape->mutable_layout() = operand->shape().layout(); *input_shape->mutable_layout() = operand->shape().layout();
return; return Status::OK();
} }
} }
// When all the above fails, which is rare, set the default layout. // When all the above fails, which is rare, set the default layout.
LayoutUtil::SetToDefaultLayout(input_shape); LayoutUtil::SetToDefaultLayout(input_shape);
return Status::OK();
}; };
choose_input_layout(fusion->operands(), &input_shape); TF_RETURN_IF_ERROR(
choose_input_layout(fusion->operands(), &input_shape));
return EmitReductionToVector( return EmitReductionToVector(
root, input_shape, fused_emitter.GetGenerator(root->operand(0)), root, input_shape, fused_emitter.GetGenerator(root->operand(0)),
fused_emitter.GetGenerator(root->operand(1)), root->dimensions(), fused_emitter.GetGenerator(root->operand(1)), root->dimensions(),
root->to_apply()); root->to_apply());
break;
} }
default: default:
LOG(FATAL) << "Bad opcode for input fusion: " LOG(FATAL) << "Bad opcode for input fusion: "

View File

@ -232,7 +232,7 @@ REGISTER_OP("CudnnRNNParamsToCanonical")
ShapeHandle unused; ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &unused));
int num_params; int num_params;
c->GetAttr("num_params", &num_params); TF_RETURN_IF_ERROR(c->GetAttr("num_params", &num_params));
// Set shape for weight matrices // Set shape for weight matrices
for (int i = 0; i < num_params; i++) { for (int i = 0; i < num_params; i++) {
c->set_output(i, c->set_output(i,

View File

@ -53,7 +53,7 @@ class FileDeleter {
explicit FileDeleter(const string& filename) : filename_(filename) {} explicit FileDeleter(const string& filename) : filename_(filename) {}
~FileDeleter() { ~FileDeleter() {
Env& env = *Env::Default(); Env& env = *Env::Default();
env.DeleteFile(filename_); env.DeleteFile(filename_).IgnoreError();
} }
private: private:

View File

@ -596,7 +596,7 @@ class SliceHelper {
CHECK(aligned.shape().IsSameSize(t.shape())); CHECK(aligned.shape().IsSameSize(t.shape()));
CHECK_EQ(aligned.dtype(), t.dtype()); CHECK_EQ(aligned.dtype(), t.dtype());
} else { // allocate a new temporary tensor } else { // allocate a new temporary tensor
ctx_->allocate_temp(t.dtype(), t.shape(), &aligned); TF_CHECK_OK(ctx_->allocate_temp(t.dtype(), t.shape(), &aligned));
pool_.emplace(name, std::make_pair(aligned, true)); pool_.emplace(name, std::make_pair(aligned, true));
} }
functor::TensorCopyUnaligned<Device, T>()(device_, t.unaligned_flat<T>(), functor::TensorCopyUnaligned<Device, T>()(device_, t.unaligned_flat<T>(),

View File

@ -319,7 +319,7 @@ Status BuildTensorNameToDtypeMap(
// Converts SessionBundle signatures to SavedModel signature-defs. // Converts SessionBundle signatures to SavedModel signature-defs.
Status ConvertSignaturesToSignatureDefs(MetaGraphDef* meta_graph_def) { Status ConvertSignaturesToSignatureDefs(MetaGraphDef* meta_graph_def) {
Signatures signatures; Signatures signatures;
GetSignatures(*meta_graph_def, &signatures); GetSignatures(*meta_graph_def, &signatures).IgnoreError();
// Build a map of tensor-names to the corresponding tensor-info with `name` // Build a map of tensor-names to the corresponding tensor-info with `name`
// and `dtype` fields. // and `dtype` fields.

View File

@ -144,7 +144,7 @@ TEST(BundleShimTest, AddOutputToSignatureDef) {
TEST(BundleShimTest, DefaultSignatureMissing) { TEST(BundleShimTest, DefaultSignatureMissing) {
MetaGraphDef meta_graph_def; MetaGraphDef meta_graph_def;
// Signatures signatures; // Signatures signatures;
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(0, meta_graph_def.signature_def_size()); EXPECT_EQ(0, meta_graph_def.signature_def_size());
} }
@ -158,7 +158,7 @@ TEST(BundleShimTest, DefaultSignatureEmpty) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(0, meta_graph_def.signature_def_size()); EXPECT_EQ(0, meta_graph_def.signature_def_size());
} }
@ -174,7 +174,7 @@ TEST(BundleShimTest, DefaultSignatureRegression) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(1, meta_graph_def.signature_def_size()); EXPECT_EQ(1, meta_graph_def.signature_def_size());
const auto actual_signature_def = const auto actual_signature_def =
meta_graph_def.signature_def().find(kDefaultServingSignatureDefKey); meta_graph_def.signature_def().find(kDefaultServingSignatureDefKey);
@ -202,7 +202,7 @@ TEST(BundleShimTest, DefaultSignatureClassification) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(1, meta_graph_def.signature_def_size()); EXPECT_EQ(1, meta_graph_def.signature_def_size());
const auto actual_signature_def = const auto actual_signature_def =
meta_graph_def.signature_def().find(kDefaultServingSignatureDefKey); meta_graph_def.signature_def().find(kDefaultServingSignatureDefKey);
@ -237,7 +237,7 @@ TEST(BundleShimTest, DefaultSignatureGeneric) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(0, meta_graph_def.signature_def_size()); EXPECT_EQ(0, meta_graph_def.signature_def_size());
} }
@ -261,7 +261,7 @@ TEST(BundleShimTest, NamedRegressionSignatures) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
ASSERT_EQ(2, meta_graph_def.signature_def_size()); ASSERT_EQ(2, meta_graph_def.signature_def_size());
ValidateSignatureDef(meta_graph_def, "foo", ValidateSignatureDef(meta_graph_def, "foo",
@ -315,7 +315,7 @@ TEST(BundleShimTest, NamedClassificationSignatures) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
ASSERT_EQ(2, meta_graph_def.signature_def_size()); ASSERT_EQ(2, meta_graph_def.signature_def_size());
ValidateSignatureDef(meta_graph_def, "foo", ValidateSignatureDef(meta_graph_def, "foo",
@ -374,7 +374,7 @@ TEST(BundleShimTest, NamedSignatureGenericInputsAndOutputs) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(1, meta_graph_def.signature_def_size()); EXPECT_EQ(1, meta_graph_def.signature_def_size());
const auto actual_signature_def = const auto actual_signature_def =
meta_graph_def.signature_def().find(kDefaultServingSignatureDefKey); meta_graph_def.signature_def().find(kDefaultServingSignatureDefKey);
@ -413,7 +413,7 @@ TEST(BundleShimTest, NamedSignatureGenericNoInputsOrOutputs) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(0, meta_graph_def.signature_def_size()); EXPECT_EQ(0, meta_graph_def.signature_def_size());
} }
@ -434,7 +434,7 @@ TEST(BundleShimTest, NamedSignatureGenericOnlyInput) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(0, meta_graph_def.signature_def_size()); EXPECT_EQ(0, meta_graph_def.signature_def_size());
} }
@ -473,7 +473,7 @@ TEST(BundleShimTest, DefaultAndNamedSignatureWithPredict) {
.mutable_any_list() .mutable_any_list()
->add_value() ->add_value()
->PackFrom(signatures); ->PackFrom(signatures);
ConvertSignaturesToSignatureDefs(&meta_graph_def); TF_EXPECT_OK(ConvertSignaturesToSignatureDefs(&meta_graph_def));
EXPECT_EQ(2, meta_graph_def.signature_def_size()); EXPECT_EQ(2, meta_graph_def.signature_def_size());
// Verify that the default regression signature is converted to a // Verify that the default regression signature is converted to a

View File

@ -50,7 +50,7 @@ struct SessionBundle {
// resource leaks, we explicitly call Close on Sessions that we create. // resource leaks, we explicitly call Close on Sessions that we create.
~SessionBundle() { ~SessionBundle() {
if (session) { if (session) {
session->Close(); session->Close().IgnoreError();
} }
} }

View File

@ -99,7 +99,7 @@ Status DeviceFactory::AddDevices(const SessionOptions& options,
"CPU Factory not registered. Did you link in threadpool_device?"); "CPU Factory not registered. Did you link in threadpool_device?");
} }
size_t init_size = devices->size(); size_t init_size = devices->size();
cpu_factory->CreateDevices(options, name_prefix, devices); TF_RETURN_IF_ERROR(cpu_factory->CreateDevices(options, name_prefix, devices));
if (devices->size() == init_size) { if (devices->size() == init_size) {
return errors::NotFound("No CPU devices are available in this process"); return errors::NotFound("No CPU devices are available in this process");
} }
@ -126,7 +126,7 @@ Device* DeviceFactory::NewDevice(const string& type,
SessionOptions opt = options; SessionOptions opt = options;
(*opt.config.mutable_device_count())[type] = 1; (*opt.config.mutable_device_count())[type] = 1;
std::vector<Device*> devices; std::vector<Device*> devices;
device_factory->CreateDevices(opt, name_prefix, &devices); TF_CHECK_OK(device_factory->CreateDevices(opt, name_prefix, &devices));
CHECK_EQ(devices.size(), size_t{1}); CHECK_EQ(devices.size(), size_t{1});
return devices[0]; return devices[0];
} }

View File

@ -271,7 +271,7 @@ DirectSession::DirectSession(const SessionOptions& options,
} }
DirectSession::~DirectSession() { DirectSession::~DirectSession() {
if (!closed_) Close(); if (!closed_) Close().IgnoreError();
for (auto& it : partial_runs_) { for (auto& it : partial_runs_) {
it.second.reset(nullptr); it.second.reset(nullptr);
} }

View File

@ -762,7 +762,7 @@ TEST(DirectSessionTest, TimeoutSession) {
// Verifies that the error code is DEADLINE_EXCEEDED. // Verifies that the error code is DEADLINE_EXCEEDED.
Status s = session->Run({}, {}, {"fifo_queue_Dequeue"}, nullptr); Status s = session->Run({}, {}, {"fifo_queue_Dequeue"}, nullptr);
ASSERT_EQ(error::DEADLINE_EXCEEDED, s.code()); ASSERT_EQ(error::DEADLINE_EXCEEDED, s.code());
session->Close(); TF_ASSERT_OK(session->Close());
// Creates a session with no operation_timeout_in_ms. // Creates a session with no operation_timeout_in_ms.
session.reset(CreateSession()); session.reset(CreateSession());
@ -774,7 +774,7 @@ TEST(DirectSessionTest, TimeoutSession) {
Status s2 = session->Run(run_options, {}, {}, {"fifo_queue_Dequeue"}, nullptr, Status s2 = session->Run(run_options, {}, {}, {"fifo_queue_Dequeue"}, nullptr,
nullptr); nullptr);
ASSERT_EQ(error::DEADLINE_EXCEEDED, s2.code()); ASSERT_EQ(error::DEADLINE_EXCEEDED, s2.code());
session->Close(); TF_ASSERT_OK(session->Close());
} }
// Accesses the cancellation manager for the step after the step has been // Accesses the cancellation manager for the step after the step has been
@ -827,7 +827,7 @@ TEST(DirectSessionTest, TestTimeoutCleanShutdown) {
// Verify that the op ran to completion. // Verify that the op ran to completion.
ASSERT_TRUE(CancellationMgrPollingOp::notification.HasBeenNotified()); ASSERT_TRUE(CancellationMgrPollingOp::notification.HasBeenNotified());
session->Close(); TF_ASSERT_OK(session->Close());
} }
class BlockingOpState { class BlockingOpState {
@ -1058,7 +1058,7 @@ TEST(DirectSessionTest, TestDirectSessionRunClose) {
outputs.clear(); outputs.clear();
// Close the session. // Close the session.
session->Close(); TF_ASSERT_OK(session->Close());
// Run the read on the variable to get an error. // Run the read on the variable to get an error.
Status s = session->Run({} /* inputs */, {}, Status s = session->Run({} /* inputs */, {},
@ -1105,7 +1105,7 @@ TEST(DirectSessionTest, TestDirectSessionPRunClose) {
value_22.scalar<float>()() = 22.0; value_22.scalar<float>()() = 22.0;
// Close the session. // Close the session.
session->Close(); TF_ASSERT_OK(session->Close());
// Feed first_const, fetch first_identity // Feed first_const, fetch first_identity
s = session->PRun(handle, {{first_const->name(), value_11}}, s = session->PRun(handle, {{first_const->name(), value_11}},
@ -1142,7 +1142,7 @@ TEST(DirectSessionTest, TestDirectSessionReset) {
outputs.clear(); outputs.clear();
// Reset the containers. // Reset the containers.
Reset(options, {}); TF_EXPECT_OK(Reset(options, {}));
// Run the read on the variable to get an error. // Run the read on the variable to get an error.
// TODO(suharshs): This test only works because we close the Session in Reset. // TODO(suharshs): This test only works because we close the Session in Reset.

View File

@ -557,7 +557,7 @@ Status ExecutorImpl::Initialize() {
// Build the information about frames in this subgraph. // Build the information about frames in this subgraph.
ControlFlowInfo cf_info; ControlFlowInfo cf_info;
BuildControlFlowInfo(graph_, &cf_info); TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph_, &cf_info));
// Cache this value so we make this virtual function call once, rather // Cache this value so we make this virtual function call once, rather
// that O(# steps * # nodes per step) times. // that O(# steps * # nodes per step) times.
@ -1747,7 +1747,8 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
if (item.num_outputs == 0 && impl_->params_.node_outputs_cb != nullptr) { if (item.num_outputs == 0 && impl_->params_.node_outputs_cb != nullptr) {
// If the node has no output, invoke the callback with output slot set to // If the node has no output, invoke the callback with output slot set to
// -1, signifying that this is a no-output node. // -1, signifying that this is a no-output node.
impl_->params_.node_outputs_cb(item.node->name(), -1, nullptr, false, ctx); s.Update(impl_->params_.node_outputs_cb(item.node->name(), -1, nullptr,
false, ctx));
} }
for (int i = 0; i < item.num_outputs; ++i) { for (int i = 0; i < item.num_outputs; ++i) {
@ -1792,8 +1793,8 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
// Experimental: debugger (tfdb) access to intermediate node outputs. // Experimental: debugger (tfdb) access to intermediate node outputs.
if (impl_->params_.node_outputs_cb != nullptr) { if (impl_->params_.node_outputs_cb != nullptr) {
impl_->params_.node_outputs_cb(item.node->name(), i, out->ref, true, s.Update(impl_->params_.node_outputs_cb(item.node->name(), i,
ctx); out->ref, true, ctx));
} }
} else { } else {
// NOTE that std::move is used here, so val.tensor goes to // NOTE that std::move is used here, so val.tensor goes to
@ -1809,8 +1810,8 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
// Experimental: debugger access to intermediate node outputs. // Experimental: debugger access to intermediate node outputs.
if (impl_->params_.node_outputs_cb != nullptr) { if (impl_->params_.node_outputs_cb != nullptr) {
impl_->params_.node_outputs_cb(item.node->name(), i, out->val.get(), s.Update(impl_->params_.node_outputs_cb(
false, ctx); item.node->name(), i, out->val.get(), false, ctx));
} }
} }
} else { } else {

View File

@ -36,7 +36,7 @@ class GpuStreamUtilTest : public OpsTestBase {
TEST_F(GpuStreamUtilTest, BogusOpts) { TEST_F(GpuStreamUtilTest, BogusOpts) {
auto root = Scope::NewRootScope().ExitOnError(); auto root = Scope::NewRootScope().ExitOnError();
Graph g(OpRegistry::Global()); Graph g(OpRegistry::Global());
root.ToGraph(&g); TF_ASSERT_OK(root.ToGraph(&g));
std::unordered_map<int, int> node_to_stream_id; std::unordered_map<int, int> node_to_stream_id;
gpu_stream_util::AssignStreamsOpts opts; gpu_stream_util::AssignStreamsOpts opts;
Status status; Status status;
@ -56,7 +56,7 @@ TEST_F(GpuStreamUtilTest, BogusOpts) {
TEST_F(GpuStreamUtilTest, EmptyGraph) { TEST_F(GpuStreamUtilTest, EmptyGraph) {
auto root = Scope::NewRootScope().ExitOnError(); auto root = Scope::NewRootScope().ExitOnError();
Graph g(OpRegistry::Global()); Graph g(OpRegistry::Global());
root.ToGraph(&g); TF_ASSERT_OK(root.ToGraph(&g));
std::unordered_map<int, int> node_to_stream_id; std::unordered_map<int, int> node_to_stream_id;
gpu_stream_util::AssignStreamsOpts opts; gpu_stream_util::AssignStreamsOpts opts;
TF_ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id)); TF_ASSERT_OK(gpu_stream_util::AssignStreams(&g, opts, &node_to_stream_id));

View File

@ -144,13 +144,13 @@ void Benchmark::RunWithArgs(
for (const auto& p : in) { for (const auto& p : in) {
Rendezvous::ParsedKey parsed; Rendezvous::ParsedKey parsed;
TF_CHECK_OK(Rendezvous::ParseKey(p.first, &parsed)); TF_CHECK_OK(Rendezvous::ParseKey(p.first, &parsed));
rendez_->Send(parsed, Rendezvous::Args(), p.second, false); TF_CHECK_OK(rendez_->Send(parsed, Rendezvous::Args(), p.second, false));
} }
TF_CHECK_OK(exec_->Run(args)); TF_CHECK_OK(exec_->Run(args));
for (const string& key : out) { for (const string& key : out) {
Rendezvous::ParsedKey parsed; Rendezvous::ParsedKey parsed;
TF_CHECK_OK(Rendezvous::ParseKey(key, &parsed)); TF_CHECK_OK(Rendezvous::ParseKey(key, &parsed));
rendez_->Recv(parsed, Rendezvous::Args(), &unused, &is_dead); TF_CHECK_OK(rendez_->Recv(parsed, Rendezvous::Args(), &unused, &is_dead));
} }
} }
TF_CHECK_OK(device_->Sync()); TF_CHECK_OK(device_->Sync());
@ -161,13 +161,13 @@ void Benchmark::RunWithArgs(
for (const auto& p : in) { for (const auto& p : in) {
Rendezvous::ParsedKey parsed; Rendezvous::ParsedKey parsed;
TF_CHECK_OK(Rendezvous::ParseKey(p.first, &parsed)); TF_CHECK_OK(Rendezvous::ParseKey(p.first, &parsed));
rendez_->Send(parsed, Rendezvous::Args(), p.second, false); TF_CHECK_OK(rendez_->Send(parsed, Rendezvous::Args(), p.second, false));
} }
TF_CHECK_OK(exec_->Run(args)); TF_CHECK_OK(exec_->Run(args));
for (const string& key : out) { for (const string& key : out) {
Rendezvous::ParsedKey parsed; Rendezvous::ParsedKey parsed;
TF_CHECK_OK(Rendezvous::ParseKey(key, &parsed)); TF_CHECK_OK(Rendezvous::ParseKey(key, &parsed));
rendez_->Recv(parsed, Rendezvous::Args(), &unused, &is_dead); TF_CHECK_OK(rendez_->Recv(parsed, Rendezvous::Args(), &unused, &is_dead));
} }
} }

View File

@ -46,8 +46,7 @@ TEST(OptimizationRegistry, OptimizationPass) {
class UpdateFuncLibPass : public GraphOptimizationPass { class UpdateFuncLibPass : public GraphOptimizationPass {
public: public:
Status Run(const GraphOptimizationPassOptions& options) { Status Run(const GraphOptimizationPassOptions& options) {
options.flib_def->AddFunctionDef(test::function::WXPlusB()); return options.flib_def->AddFunctionDef(test::function::WXPlusB());
return Status::OK();
} }
}; };

View File

@ -37,7 +37,7 @@ DebuggerState::DebuggerState(const DebugOptions& debug_options)
DebuggerState::~DebuggerState() { DebuggerState::~DebuggerState() {
for (const string& debug_url : debug_urls_) { for (const string& debug_url : debug_urls_) {
DebugIO::CloseDebugURL(debug_url); DebugIO::CloseDebugURL(debug_url).IgnoreError();
} }
} }
@ -177,8 +177,8 @@ Status DebugNodeInserter::InsertNodes(
const DataType src_dt = src_node->output_type(src_output_slot); const DataType src_dt = src_node->output_type(src_output_slot);
MemoryType memory_type; MemoryType memory_type;
MemoryTypeForOutput(device_type, graph, src_node, src_output_slot, TF_RETURN_IF_ERROR(MemoryTypeForOutput(device_type, graph, src_node,
&memory_type); src_output_slot, &memory_type));
// Create the copy node for the watched tensor. // Create the copy node for the watched tensor.
Node* copy_node; Node* copy_node;

View File

@ -55,8 +55,8 @@ class GrpcDebugTest : public ::testing::Test {
// Clean up server dump directory. // Clean up server dump directory.
int64 undeleted_files = -1; int64 undeleted_files = -1;
int64 undeleted_dirs = -1; int64 undeleted_dirs = -1;
Env::Default()->DeleteRecursively(server_client_pair->dump_root, TF_CHECK_OK(Env::Default()->DeleteRecursively(
&undeleted_files, &undeleted_dirs); server_client_pair->dump_root, &undeleted_files, &undeleted_dirs));
ASSERT_EQ(0, undeleted_files); ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs); ASSERT_EQ(0, undeleted_dirs);
@ -85,7 +85,7 @@ TEST_F(GrpcDebugTest, AttemptToSendToNonexistentGrpcAddress) {
"Channel at the following gRPC stream URL is not ready: " "Channel at the following gRPC stream URL is not ready: "
"grpc://0.0.0.0:0")); "grpc://0.0.0.0:0"));
DebugIO::CloseDebugURL(kInvalidGrpcUrl); TF_ASSERT_OK(DebugIO::CloseDebugURL(kInvalidGrpcUrl));
} }
TEST_F(GrpcDebugTest, SendSingleDebugTensorViaGrpcTest) { TEST_F(GrpcDebugTest, SendSingleDebugTensorViaGrpcTest) {
@ -97,7 +97,8 @@ TEST_F(GrpcDebugTest, SendSingleDebugTensorViaGrpcTest) {
// Verify that the expected dump file exists. // Verify that the expected dump file exists.
std::vector<string> dump_files; std::vector<string> dump_files;
Env::Default()->GetChildren(server_client_pair->dump_root, &dump_files); TF_EXPECT_OK(
Env::Default()->GetChildren(server_client_pair->dump_root, &dump_files));
ASSERT_EQ(1, dump_files.size()); ASSERT_EQ(1, dump_files.size());
ASSERT_EQ(0, dump_files[0].find("prep_node_0_DebugIdentity_")); ASSERT_EQ(0, dump_files[0].find("prep_node_0_DebugIdentity_"));
@ -179,7 +180,8 @@ TEST_F(GrpcDebugTest, SendMultipleDebugTensorsSynchronizedViaGrpcTest) {
// Load the dump files generated by the server upon receiving the tensors // Load the dump files generated by the server upon receiving the tensors
// via the Event stream. // via the Event stream.
std::vector<string> dump_files; std::vector<string> dump_files;
Env::Default()->GetChildren(server_client_pair->dump_root, &dump_files); TF_EXPECT_OK(
Env::Default()->GetChildren(server_client_pair->dump_root, &dump_files));
// One prep tensor plus kSends concurrent tensors are expected. // One prep tensor plus kSends concurrent tensors are expected.
ASSERT_EQ(1 + kSends, dump_files.size()); ASSERT_EQ(1 + kSends, dump_files.size());

View File

@ -51,7 +51,8 @@ namespace test {
string dump_path; string dump_path;
DebugFileIO::DumpTensorToDir(node_name, output_slot, debug_op, tensor, DebugFileIO::DumpTensorToDir(node_name, output_slot, debug_op, tensor,
event.wall_time(), dump_root, &dump_path); event.wall_time(), dump_root, &dump_path)
.IgnoreError();
} }
return ::grpc::Status::OK; return ::grpc::Status::OK;

View File

@ -264,7 +264,8 @@ Status DebugIO::PublishGraph(const Graph& graph,
status.Update( status.Update(
DebugFileIO::DumpEventProtoToFile(event, dump_root_dir, file_name)); DebugFileIO::DumpEventProtoToFile(event, dump_root_dir, file_name));
} else if (debug_url.find(kGrpcURLScheme) == 0) { } else if (debug_url.find(kGrpcURLScheme) == 0) {
DebugGrpcIO::SendEventProtoThroughGrpcStream(event, debug_url); status.Update(
DebugGrpcIO::SendEventProtoThroughGrpcStream(event, debug_url));
} }
} }
@ -331,7 +332,7 @@ Status DebugFileIO::DumpEventProtoToFile(const Event& event_proto,
std::unique_ptr<WritableFile> f = nullptr; std::unique_ptr<WritableFile> f = nullptr;
TF_CHECK_OK(env->NewWritableFile(file_path, &f)); TF_CHECK_OK(env->NewWritableFile(file_path, &f));
f->Append(event_str); f->Append(event_str).IgnoreError();
TF_CHECK_OK(f->Close()); TF_CHECK_OK(f->Close());
return Status::OK(); return Status::OK();
@ -372,7 +373,7 @@ Status DebugFileIO::RecursiveCreateDir(Env* env, const string& dir) {
" because the path exists as a file ")); " because the path exists as a file "));
} }
env->CreateDir(dir); env->CreateDir(dir).IgnoreError();
// Guard against potential race in creating directories by doing a check // Guard against potential race in creating directories by doing a check
// after the CreateDir call. // after the CreateDir call.
if (env->FileExists(dir).ok() && env->IsDirectory(dir).ok()) { if (env->FileExists(dir).ok() && env->IsDirectory(dir).ok()) {

View File

@ -150,9 +150,9 @@ TEST_F(DebugIOUtilsTest, DumpTensorToFileCannotCreateDirectory) {
std::unique_ptr<WritableFile> file; std::unique_ptr<WritableFile> file;
ASSERT_TRUE(env_->NewWritableFile(txt_file_name, &file).ok()); ASSERT_TRUE(env_->NewWritableFile(txt_file_name, &file).ok());
file->Append("text in baz"); TF_EXPECT_OK(file->Append("text in baz"));
file->Flush(); TF_EXPECT_OK(file->Flush());
file->Close(); TF_ASSERT_OK(file->Close());
// Verify that the path exists and that it is a file, not a directory. // Verify that the path exists and that it is a file, not a directory.
ASSERT_TRUE(env_->FileExists(txt_file_name).ok()); ASSERT_TRUE(env_->FileExists(txt_file_name).ok());

View File

@ -439,7 +439,8 @@ void GraphMgr::BuildCostModel(Item* item, StepStatsCollector* collector,
if (cost_graph != nullptr) { if (cost_graph != nullptr) {
for (const auto& unit : item->units) { for (const auto& unit : item->units) {
cost_model_manager_.AddToCostGraphDef(unit.graph, cost_graph); cost_model_manager_.AddToCostGraphDef(unit.graph, cost_graph)
.IgnoreError();
} }
} }
} }

View File

@ -101,7 +101,7 @@ void Master::GC() {
<< "Note that if you are starting multiple replicas " << "Note that if you are starting multiple replicas "
<< "on a staggered delay, session_gc_seconds may need " << "on a staggered delay, session_gc_seconds may need "
<< "to be raised."; << "to be raised.";
sess->Close(); sess->Close().IgnoreError();
sess->Unref(); sess->Unref();
}); });
} }
@ -297,7 +297,7 @@ void Master::CreateSession(const CreateSessionRequest* req,
const_cast<CreateSessionRequest*>(req)->mutable_graph_def(); const_cast<CreateSessionRequest*>(req)->mutable_graph_def();
Status create_status = session->Create(gdef); Status create_status = session->Create(gdef);
if (!create_status.ok()) { if (!create_status.ok()) {
session->Close(); session->Close().IgnoreError();
session->Unref(); session->Unref();
done(create_status); done(create_status);
return; return;

View File

@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/core/distributed_runtime/message_wrappers.h" #include "tensorflow/core/distributed_runtime/message_wrappers.h"
#include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test.h"
namespace tensorflow { namespace tensorflow {
@ -51,9 +52,9 @@ static void CheckRunStepRequest(const RunStepRequestWrapper& request) {
EXPECT_EQ("feed_a:0", request.feed_name(0)); EXPECT_EQ("feed_a:0", request.feed_name(0));
EXPECT_EQ("feed_b:0", request.feed_name(1)); EXPECT_EQ("feed_b:0", request.feed_name(1));
Tensor val; Tensor val;
request.FeedValue(0, &val); TF_EXPECT_OK(request.FeedValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val); test::ExpectTensorEqual<int32>(TensorA(), val);
request.FeedValue(1, &val); TF_EXPECT_OK(request.FeedValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val); test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_EQ(2, request.num_fetches()); EXPECT_EQ(2, request.num_fetches());
@ -70,8 +71,10 @@ static void BuildRunGraphRequest(
run_graph_request->set_graph_handle("graph_handle"); run_graph_request->set_graph_handle("graph_handle");
run_graph_request->set_step_id(13); run_graph_request->set_step_id(13);
run_graph_request->mutable_exec_opts()->set_record_timeline(true); run_graph_request->mutable_exec_opts()->set_record_timeline(true);
run_graph_request->AddSendFromRunStepRequest(run_step_request, 0, "send_0"); TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 0,
run_graph_request->AddSendFromRunStepRequest(run_step_request, 1, "send_1"); "send_0"));
TF_EXPECT_OK(run_graph_request->AddSendFromRunStepRequest(run_step_request, 1,
"send_1"));
run_graph_request->add_recv_key("recv_2"); run_graph_request->add_recv_key("recv_2");
run_graph_request->add_recv_key("recv_3"); run_graph_request->add_recv_key("recv_3");
run_graph_request->set_is_partial(true); run_graph_request->set_is_partial(true);
@ -84,9 +87,9 @@ static void CheckRunGraphRequest(const RunGraphRequestWrapper& request) {
EXPECT_TRUE(request.exec_opts().record_timeline()); EXPECT_TRUE(request.exec_opts().record_timeline());
EXPECT_EQ(2, request.num_sends()); EXPECT_EQ(2, request.num_sends());
Tensor val; Tensor val;
request.SendValue(0, &val); TF_EXPECT_OK(request.SendValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val); test::ExpectTensorEqual<int32>(TensorA(), val);
request.SendValue(1, &val); TF_EXPECT_OK(request.SendValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val); test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_TRUE(request.is_partial()); EXPECT_TRUE(request.is_partial());
EXPECT_FALSE(request.is_last_partial_run()); EXPECT_FALSE(request.is_last_partial_run());
@ -106,9 +109,9 @@ static void CheckRunGraphResponse(MutableRunGraphResponseWrapper* response) {
EXPECT_EQ("recv_2", response->recv_key(0)); EXPECT_EQ("recv_2", response->recv_key(0));
EXPECT_EQ("recv_3", response->recv_key(1)); EXPECT_EQ("recv_3", response->recv_key(1));
Tensor val; Tensor val;
response->RecvValue(0, &val); TF_EXPECT_OK(response->RecvValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val); test::ExpectTensorEqual<int32>(TensorA(), val);
response->RecvValue(1, &val); TF_EXPECT_OK(response->RecvValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val); test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_EQ(1, response->mutable_step_stats()->dev_stats_size()); EXPECT_EQ(1, response->mutable_step_stats()->dev_stats_size());
EXPECT_EQ("/cpu:0", response->mutable_step_stats()->dev_stats(0).device()); EXPECT_EQ("/cpu:0", response->mutable_step_stats()->dev_stats(0).device());
@ -119,10 +122,10 @@ static void CheckRunGraphResponse(MutableRunGraphResponseWrapper* response) {
static void BuildRunStepResponse( static void BuildRunStepResponse(
MutableRunGraphResponseWrapper* run_graph_response, MutableRunGraphResponseWrapper* run_graph_response,
MutableRunStepResponseWrapper* run_step_response) { MutableRunStepResponseWrapper* run_step_response) {
run_step_response->AddTensorFromRunGraphResponse("fetch_x:0", TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
run_graph_response, 0); "fetch_x:0", run_graph_response, 0));
run_step_response->AddTensorFromRunGraphResponse("fetch_y:0", TF_EXPECT_OK(run_step_response->AddTensorFromRunGraphResponse(
run_graph_response, 1); "fetch_y:0", run_graph_response, 1));
*run_step_response->mutable_metadata()->mutable_step_stats() = *run_step_response->mutable_metadata()->mutable_step_stats() =
*run_graph_response->mutable_step_stats(); *run_graph_response->mutable_step_stats();
} }
@ -133,9 +136,9 @@ static void CheckRunStepResponse(
EXPECT_EQ("fetch_x:0", response.tensor_name(0)); EXPECT_EQ("fetch_x:0", response.tensor_name(0));
EXPECT_EQ("fetch_y:0", response.tensor_name(1)); EXPECT_EQ("fetch_y:0", response.tensor_name(1));
Tensor val; Tensor val;
response.TensorValue(0, &val); TF_EXPECT_OK(response.TensorValue(0, &val));
test::ExpectTensorEqual<int32>(TensorA(), val); test::ExpectTensorEqual<int32>(TensorA(), val);
response.TensorValue(1, &val); TF_EXPECT_OK(response.TensorValue(1, &val));
test::ExpectTensorEqual<int32>(TensorB(), val); test::ExpectTensorEqual<int32>(TensorB(), val);
EXPECT_EQ(1, response.metadata().step_stats().dev_stats_size()); EXPECT_EQ(1, response.metadata().step_stats().dev_stats_size());
EXPECT_EQ("/cpu:0", response.metadata().step_stats().dev_stats(0).device()); EXPECT_EQ("/cpu:0", response.metadata().step_stats().dev_stats(0).device());

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <string> #include <string>
#include <vector> #include <vector>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/core/util/device_name_utils.h"
@ -56,7 +57,8 @@ TEST(GrpcChannelTest, IsSameAddressSpace) {
TEST(GrpcChannelTest, HostPorts) { TEST(GrpcChannelTest, HostPorts) {
GrpcChannelSpec spec; GrpcChannelSpec spec;
spec.AddHostPortsJob("mnist", {"a:1", "b:2", "c:3", "d:4", "e:5", "f:6"}); TF_EXPECT_OK(spec.AddHostPortsJob(
"mnist", {"a:1", "b:2", "c:3", "d:4", "e:5", "f:6"}));
std::unique_ptr<GrpcChannelCache> cc( std::unique_ptr<GrpcChannelCache> cc(
NewGrpcChannelCache(spec, NewHostPortGrpcChannel)); NewGrpcChannelCache(spec, NewHostPortGrpcChannel));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target")); EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target"));
@ -96,7 +98,8 @@ TEST(GrpcChannelTest, HostPorts) {
TEST(GrpcChannelTest, SparseHostPorts) { TEST(GrpcChannelTest, SparseHostPorts) {
GrpcChannelSpec spec; GrpcChannelSpec spec;
spec.AddHostPortsJob("mnist", {{0, "a:1"}, {3, "d:4"}, {4, "e:5"}}); TF_EXPECT_OK(
spec.AddHostPortsJob("mnist", {{0, "a:1"}, {3, "d:4"}, {4, "e:5"}}));
std::unique_ptr<GrpcChannelCache> cc( std::unique_ptr<GrpcChannelCache> cc(
NewGrpcChannelCache(spec, NewHostPortGrpcChannel)); NewGrpcChannelCache(spec, NewHostPortGrpcChannel));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target")); EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target"));

View File

@ -190,7 +190,7 @@ Status GrpcServer::Init() {
host_port = task.second; host_port = task.second;
} }
} }
channel_spec.AddHostPortsJob(job.name(), host_ports); TF_RETURN_IF_ERROR(channel_spec.AddHostPortsJob(job.name(), host_ports));
} }
std::unique_ptr<GrpcChannelCache> channel_cache(NewGrpcChannelCache( std::unique_ptr<GrpcChannelCache> channel_cache(NewGrpcChannelCache(

View File

@ -120,7 +120,7 @@ int main(int argc, char* argv[]) {
return -1; return -1;
} }
std::unique_ptr<tensorflow::ServerInterface> server; std::unique_ptr<tensorflow::ServerInterface> server;
tensorflow::NewServer(server_def, &server); TF_QCHECK_OK(tensorflow::NewServer(server_def, &server));
server->Start(); TF_QCHECK_OK(server->Start());
server->Join(); TF_QCHECK_OK(server->Join());
} }

View File

@ -111,8 +111,8 @@ int main(int argc, char* argv[]) {
LOG(ERROR) << "Could not create server: " << s.error_message(); LOG(ERROR) << "Could not create server: " << s.error_message();
return -1; return -1;
} }
svr->Start(); TF_QCHECK_OK(svr->Start());
svr->Join(); TF_QCHECK_OK(svr->Join());
// NOTE(mrry): Unreachable code. // NOTE(mrry): Unreachable code.
return 0; return 0;

View File

@ -84,8 +84,8 @@ void MakeGRPCCluster(const SessionOptions& options, int n,
std::unique_ptr<ServerInterface> svr; std::unique_ptr<ServerInterface> svr;
TF_CHECK_OK(NewServer(server, &svr)); TF_CHECK_OK(NewServer(server, &svr));
svr->Start(); TF_CHECK_OK(svr->Start());
svr->Join(); TF_CHECK_OK(svr->Join());
}); });
} }

View File

@ -172,7 +172,7 @@ void Worker::DoRunGraph(CallOptions* opts, RunGraphRequestWrapper* request,
cost_graph, cm, in, [this, step_id, response, cm, out, token, collector, cost_graph, cm, in, [this, step_id, response, cm, out, token, collector,
opts, done](Status s) { opts, done](Status s) {
if (s.ok()) { if (s.ok()) {
env_->graph_mgr->RecvOutputs(step_id, out); s = env_->graph_mgr->RecvOutputs(step_id, out);
} }
opts->ClearCancelCallback(); opts->ClearCancelCallback();
{ {

View File

@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test.h"
@ -44,7 +45,7 @@ class ExtractExampleParserConfigurationTest : public ::testing::Test {
ReadFileToStringOrDie(Env::Default(), filename, &proto_string); ReadFileToStringOrDie(Env::Default(), filename, &proto_string);
protobuf::TextFormat::ParseFromString(proto_string, &graph_def_); protobuf::TextFormat::ParseFromString(proto_string, &graph_def_);
session_.reset(CreateSession()); session_.reset(CreateSession());
session_->Create(graph_def_); TF_CHECK_OK(session_->Create(graph_def_));
} }
NodeDef* parse_example_node() { NodeDef* parse_example_node() {
@ -194,8 +195,8 @@ class ExampleParserConfigurationProtoToFeatureVectorsTest
TEST_F(ExampleParserConfigurationProtoToFeatureVectorsTest, Basic) { TEST_F(ExampleParserConfigurationProtoToFeatureVectorsTest, Basic) {
std::vector<FixedLenFeature> fixed_len_features; std::vector<FixedLenFeature> fixed_len_features;
std::vector<VarLenFeature> var_len_features; std::vector<VarLenFeature> var_len_features;
ExampleParserConfigurationProtoToFeatureVectors( TF_ASSERT_OK(ExampleParserConfigurationProtoToFeatureVectors(
config_proto_, &fixed_len_features, &var_len_features); config_proto_, &fixed_len_features, &var_len_features));
ASSERT_EQ(1, fixed_len_features.size()); ASSERT_EQ(1, fixed_len_features.size());
ASSERT_EQ(1, var_len_features.size()); ASSERT_EQ(1, var_len_features.size());

View File

@ -155,7 +155,8 @@ TEST_F(NodeDefBuilderTest, Simple) {
{ // Finalize() twice. { // Finalize() twice.
NodeDefBuilder& builder = Builder(); NodeDefBuilder& builder = Builder();
builder.Input(FakeInput()).Finalize(nullptr); // First call to Finalize() // First call to Finalize()
TF_EXPECT_OK(builder.Input(FakeInput()).Finalize(nullptr));
// ExpectSuccess() also calls Finalize(). // ExpectSuccess() also calls Finalize().
ExpectSuccess(builder, {DT_INT32}, {DT_FLOAT}, R"proto( ExpectSuccess(builder, {DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple" input: "a" )proto"); op: "Simple" input: "a" )proto");

View File

@ -738,7 +738,7 @@ Status FindKernelRegistration(DeviceType device_type, const NodeDef& node_def,
*reg = nullptr; *reg = nullptr;
*was_attr_mismatch = false; *was_attr_mismatch = false;
string label; // Label defaults to empty if not found in NodeDef. string label; // Label defaults to empty if not found in NodeDef.
GetNodeAttr(node_def, "_kernel", &label); GetNodeAttr(node_def, "_kernel", &label).IgnoreError();
const string key = Key(node_def.op(), device_type, label); const string key = Key(node_def.op(), device_type, label);
auto regs = GlobalKernelRegistryTyped()->equal_range(key); auto regs = GlobalKernelRegistryTyped()->equal_range(key);
for (auto iter = regs.first; iter != regs.second; ++iter) { for (auto iter = regs.first; iter != regs.second; ++iter) {

View File

@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test.h"
namespace tensorflow { namespace tensorflow {
@ -47,10 +48,11 @@ TEST(OpRegistrationTest, TestDuplicate) {
Status s = registry->ProcessRegistrations(); Status s = registry->ProcessRegistrations();
EXPECT_TRUE(s.ok()); EXPECT_TRUE(s.ok());
registry->SetWatcher([](const Status& s, const OpDef& op_def) -> Status { TF_EXPECT_OK(
EXPECT_TRUE(errors::IsAlreadyExists(s)); registry->SetWatcher([](const Status& s, const OpDef& op_def) -> Status {
return Status::OK(); EXPECT_TRUE(errors::IsAlreadyExists(s));
}); return Status::OK();
}));
Register("Foo", registry.get()); Register("Foo", registry.get());
s = registry->ProcessRegistrations(); s = registry->ProcessRegistrations();
EXPECT_TRUE(s.ok()); EXPECT_TRUE(s.ok());

View File

@ -92,7 +92,8 @@ Status InferenceContext::set_output(StringPiece output_name,
const int start = result->second.first; const int start = result->second.first;
const int size = result->second.second - start; const int size = result->second.second - start;
if (size != shapes.size()) { if (size != shapes.size()) {
errors::InvalidArgument("Must have exactly ", shapes.size(), " shapes."); return errors::InvalidArgument("Must have exactly ", shapes.size(),
" shapes.");
} }
for (int i = 0; i < size; ++i) { for (int i = 0; i < size; ++i) {
outputs_[i + start] = shapes[i]; outputs_[i + start] = shapes[i];

View File

@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h" #include "tensorflow/core/public/version.h"
@ -51,9 +52,9 @@ class EqualGraphDefTest : public ::testing::Test {
bool Match() { bool Match() {
GraphDef expected; GraphDef expected;
e_.ToGraphDef(&expected); TF_EXPECT_OK(e_.ToGraphDef(&expected));
GraphDef actual; GraphDef actual;
a_.ToGraphDef(&actual); TF_EXPECT_OK(a_.ToGraphDef(&actual));
return EqualGraphDef(actual, expected, &diff_); return EqualGraphDef(actual, expected, &diff_);
} }

View File

@ -25,6 +25,7 @@ limitations under the License.
#include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
@ -1430,7 +1431,8 @@ TEST_F(GraphConstructorTest, ImportGraphDef_ControlDepsErrors) {
TEST_F(GraphConstructorTest, ImportGraphDef_ErrorsDoNoChangeTheGraph) { TEST_F(GraphConstructorTest, ImportGraphDef_ErrorsDoNoChangeTheGraph) {
GraphDef def; GraphDef def;
NodeDefBuilder("scope/A", "TestParams").Finalize(def.add_node()); TF_EXPECT_OK(
NodeDefBuilder("scope/A", "TestParams").Finalize(def.add_node()));
ImportGraphDefOptions opts; ImportGraphDefOptions opts;
const string& source = graph_.FindNodeId(Graph::kSourceId)->name(); const string& source = graph_.FindNodeId(Graph::kSourceId)->name();
const string& sink = graph_.FindNodeId(Graph::kSinkId)->name(); const string& sink = graph_.FindNodeId(Graph::kSinkId)->name();

View File

@ -170,20 +170,20 @@ class GraphPartitionTest : public ::testing::Test {
"/job:a/replica:0/task:0/cpu:1")) {} "/job:a/replica:0/task:0/cpu:1")) {}
const GraphDef& ToGraphDef() { const GraphDef& ToGraphDef() {
in_.ToGraphDef(&in_graph_def_); TF_EXPECT_OK(in_.ToGraphDef(&in_graph_def_));
return in_graph_def_; return in_graph_def_;
} }
void ExpectMatchA() { void ExpectMatchA() {
GraphDef graph_def; GraphDef graph_def;
scope_a_.ToGraphDef(&graph_def); TF_EXPECT_OK(scope_a_.ToGraphDef(&graph_def));
string a = "/job:a/replica:0/task:0/cpu:0"; string a = "/job:a/replica:0/task:0/cpu:0";
TF_EXPECT_GRAPH_EQ(graph_def, partitions_[a]); TF_EXPECT_GRAPH_EQ(graph_def, partitions_[a]);
} }
void ExpectMatchB() { void ExpectMatchB() {
GraphDef graph_def; GraphDef graph_def;
scope_b_.ToGraphDef(&graph_def); TF_EXPECT_OK(scope_b_.ToGraphDef(&graph_def));
string b = "/job:a/replica:0/task:0/cpu:1"; string b = "/job:a/replica:0/task:0/cpu:1";
TF_EXPECT_GRAPH_EQ(graph_def, partitions_[b]); TF_EXPECT_GRAPH_EQ(graph_def, partitions_[b]);
} }

View File

@ -29,10 +29,10 @@ static Graph* BM_AdjustContrast(int batches, int width, int height) {
factor.flat<float>().setConstant(1.2); factor.flat<float>().setConstant(1.2);
Node* ret; Node* ret;
NodeBuilder(g->NewName("n"), "AdjustContrastv2") TF_CHECK_OK(NodeBuilder(g->NewName("n"), "AdjustContrastv2")
.Input(test::graph::Constant(g, in)) .Input(test::graph::Constant(g, in))
.Input(test::graph::Constant(g, factor)) .Input(test::graph::Constant(g, factor))
.Finalize(g, &ret); .Finalize(g, &ret));
return g; return g;
} }

View File

@ -387,7 +387,7 @@ TEST_F(BigQueryTableAccessorTest, SwitchingPartitionsTest) {
partition.set_start_index(3); partition.set_start_index(3);
partition.set_end_index(-1); partition.set_end_index(-1);
accessor_->SetPartition(partition); TF_EXPECT_OK(accessor_->SetPartition(partition));
TF_EXPECT_OK(accessor_->ReadRow(&row_id, &example)); TF_EXPECT_OK(accessor_->ReadRow(&row_id, &example));
EXPECT_EQ(3, row_id); EXPECT_EQ(3, row_id);
EXPECT_TRUE(accessor_->Done()); EXPECT_TRUE(accessor_->Done());
@ -396,7 +396,7 @@ TEST_F(BigQueryTableAccessorTest, SwitchingPartitionsTest) {
partition.set_start_index(0); partition.set_start_index(0);
partition.set_end_index(1); partition.set_end_index(1);
accessor_->SetPartition(partition); TF_EXPECT_OK(accessor_->SetPartition(partition));
TF_EXPECT_OK(accessor_->ReadRow(&row_id, &example)); TF_EXPECT_OK(accessor_->ReadRow(&row_id, &example));
EXPECT_EQ(0, row_id); EXPECT_EQ(0, row_id);
EXPECT_FALSE(accessor_->Done()); EXPECT_FALSE(accessor_->Done());

View File

@ -105,7 +105,7 @@ TEST_F(AbortOpTest, pass_error_msg) {
.Attr("error_msg", "abort_op_test") .Attr("error_msg", "abort_op_test")
.Finalize(node_def())); .Finalize(node_def()));
TF_ASSERT_OK(InitOp()); TF_ASSERT_OK(InitOp());
EXPECT_EXIT(RunOpKernel(), KilledBySignal(SIGABRT), EXPECT_EXIT(RunOpKernel().IgnoreError(), KilledBySignal(SIGABRT),
"Abort_op intentional failure; abort_op_test"); "Abort_op intentional failure; abort_op_test");
} }
@ -113,7 +113,7 @@ TEST_F(AbortOpTest, pass_error_msg) {
TEST_F(AbortOpTest, default_msg) { TEST_F(AbortOpTest, default_msg) {
TF_ASSERT_OK(NodeDefBuilder("abort_op", "Abort").Finalize(node_def())); TF_ASSERT_OK(NodeDefBuilder("abort_op", "Abort").Finalize(node_def()));
TF_ASSERT_OK(InitOp()); TF_ASSERT_OK(InitOp());
EXPECT_EXIT(RunOpKernel(), KilledBySignal(SIGABRT), EXPECT_EXIT(RunOpKernel().IgnoreError(), KilledBySignal(SIGABRT),
"Abort_op intentional failure; "); "Abort_op intentional failure; ");
} }
@ -123,7 +123,7 @@ TEST_F(AbortOpTest, exit_normally) {
.Attr("exit_without_error", true) .Attr("exit_without_error", true)
.Finalize(node_def())); .Finalize(node_def()));
TF_ASSERT_OK(InitOp()); TF_ASSERT_OK(InitOp());
EXPECT_EXIT(RunOpKernel(), ::testing::ExitedWithCode(0), ""); EXPECT_EXIT(RunOpKernel().IgnoreError(), ::testing::ExitedWithCode(0), "");
} }
} // namespace } // namespace

View File

@ -85,7 +85,8 @@ Status Conv2DBackpropComputeDimensions(
return errors::InvalidArgument(label, ": filter must be 4-dimensional"); return errors::InvalidArgument(label, ": filter must be 4-dimensional");
} }
if (out_backprop_shape.dims() != 4) { if (out_backprop_shape.dims() != 4) {
errors::InvalidArgument(label, ": out_backprop must be 4-dimensional"); return errors::InvalidArgument(label,
": out_backprop must be 4-dimensional");
} }
dims->batch_size = GetTensorDim(input_shape, data_format, 'N'); dims->batch_size = GetTensorDim(input_shape, data_format, 'N');
if (dims->batch_size != GetTensorDim(out_backprop_shape, data_format, 'N')) { if (dims->batch_size != GetTensorDim(out_backprop_shape, data_format, 'N')) {

View File

@ -62,7 +62,7 @@ class DecodeCSVOp : public OpKernel {
for (int i = 0; i < static_cast<int>(out_type_.size()); ++i) { for (int i = 0; i < static_cast<int>(out_type_.size()); ++i) {
Tensor* out = nullptr; Tensor* out = nullptr;
output.allocate(i, records->shape(), &out); OP_REQUIRES_OK(ctx, output.allocate(i, records->shape(), &out));
} }
for (int64 i = 0; i < records_size; ++i) { for (int64 i = 0; i < records_size; ++i) {

View File

@ -353,7 +353,7 @@ class SingleSequenceExampleParserOp : public OpKernel {
for (const int dim : attrs_.context_dense_shapes[d].dim_sizes()) for (const int dim : attrs_.context_dense_shapes[d].dim_sizes())
out_shape.AddDim(dim); out_shape.AddDim(dim);
Tensor* out = nullptr; Tensor* out = nullptr;
context_dense_values.allocate(d, out_shape, &out); OP_REQUIRES_OK(ctx, context_dense_values.allocate(d, out_shape, &out));
} }
for (int d = 0; d < attrs_.num_context_dense; ++d) { for (int d = 0; d < attrs_.num_context_dense; ++d) {
@ -411,9 +411,11 @@ class SingleSequenceExampleParserOp : public OpKernel {
TensorShape indices_shape({num_elements, 1}); TensorShape indices_shape({num_elements, 1});
Tensor* sp_indices_d = nullptr; Tensor* sp_indices_d = nullptr;
Tensor* sp_shape_d = nullptr; Tensor* sp_shape_d = nullptr;
context_sparse_indices.allocate(d, indices_shape, &sp_indices_d); OP_REQUIRES_OK(ctx, context_sparse_indices.allocate(d, indices_shape,
&sp_indices_d));
context_sparse_values.set(d, feature_values); context_sparse_values.set(d, feature_values);
context_sparse_shapes.allocate(d, TensorShape({1}), &sp_shape_d); OP_REQUIRES_OK(ctx, context_sparse_shapes.allocate(d, TensorShape({1}),
&sp_shape_d));
auto shape_t = sp_shape_d->vec<int64>(); auto shape_t = sp_shape_d->vec<int64>();
shape_t(0) = num_elements; shape_t(0) = num_elements;
auto indices_t = sp_indices_d->matrix<int64>(); auto indices_t = sp_indices_d->matrix<int64>();
@ -424,9 +426,12 @@ class SingleSequenceExampleParserOp : public OpKernel {
Tensor* sp_indices_d = nullptr; Tensor* sp_indices_d = nullptr;
Tensor* sp_values_d = nullptr; Tensor* sp_values_d = nullptr;
Tensor* sp_shape_d = nullptr; Tensor* sp_shape_d = nullptr;
context_sparse_indices.allocate(d, indices_shape, &sp_indices_d); OP_REQUIRES_OK(ctx, context_sparse_indices.allocate(d, indices_shape,
context_sparse_values.allocate(d, values_shape, &sp_values_d); &sp_indices_d));
context_sparse_shapes.allocate(d, TensorShape({1}), &sp_shape_d); OP_REQUIRES_OK(
ctx, context_sparse_values.allocate(d, values_shape, &sp_values_d));
OP_REQUIRES_OK(ctx, context_sparse_shapes.allocate(d, TensorShape({1}),
&sp_shape_d));
auto shape_t = sp_shape_d->vec<int64>(); auto shape_t = sp_shape_d->vec<int64>();
shape_t(0) = 0; shape_t(0) = 0;
} }
@ -468,7 +473,8 @@ class SingleSequenceExampleParserOp : public OpKernel {
out_shape.AddDim(dim); out_shape.AddDim(dim);
} }
Tensor* out = nullptr; Tensor* out = nullptr;
feature_list_dense_values.allocate(d, out_shape, &out); OP_REQUIRES_OK(ctx,
feature_list_dense_values.allocate(d, out_shape, &out));
for (int64 t = 0; t < fl.feature_size(); ++t) { for (int64 t = 0; t < fl.feature_size(); ++t) {
const Feature& f = fl.feature(t); const Feature& f = fl.feature(t);
@ -530,9 +536,12 @@ class SingleSequenceExampleParserOp : public OpKernel {
Tensor* sp_indices_d = nullptr; Tensor* sp_indices_d = nullptr;
Tensor* sp_values_d = nullptr; Tensor* sp_values_d = nullptr;
Tensor* sp_shape_d = nullptr; Tensor* sp_shape_d = nullptr;
feature_list_sparse_indices.allocate(d, indices_shape, &sp_indices_d); OP_REQUIRES_OK(ctx, feature_list_sparse_indices.allocate(d, indices_shape,
feature_list_sparse_values.allocate(d, values_shape, &sp_values_d); &sp_indices_d));
feature_list_sparse_shapes.allocate(d, TensorShape({2}), &sp_shape_d); OP_REQUIRES_OK(ctx, feature_list_sparse_values.allocate(d, values_shape,
&sp_values_d));
OP_REQUIRES_OK(ctx, feature_list_sparse_shapes.allocate(
d, TensorShape({2}), &sp_shape_d));
auto shape_t = sp_shape_d->vec<int64>(); auto shape_t = sp_shape_d->vec<int64>();
shape_t(0) = feature_list_size; shape_t(0) = feature_list_size;
shape_t(1) = max_num_features; shape_t(1) = max_num_features;

View File

@ -229,7 +229,13 @@ void FIFOQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
// an optimized case where the queue 'knows' what attributes to // an optimized case where the queue 'knows' what attributes to
// use, and plumbs them through here. // use, and plumbs them through here.
Tensor element; Tensor element;
ctx->allocate_temp(component_dtypes_[i], ManyOutShape(i, 0), &element); Status status = ctx->allocate_temp(component_dtypes_[i],
ManyOutShape(i, 0), &element);
if (!status.ok()) {
ctx->SetStatus(status);
callback(Tuple());
return;
}
tuple.emplace_back(element); tuple.emplace_back(element);
} }
callback(tuple); callback(tuple);
@ -309,8 +315,10 @@ void FIFOQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
const TensorShape shape = const TensorShape shape =
ManyOutShape(i, attempt->elements_requested); ManyOutShape(i, attempt->elements_requested);
Tensor element; Tensor element;
attempt->context->allocate_temp(component_dtypes_[i], attempt->context->SetStatus(
shape, &element); attempt->context->allocate_temp(component_dtypes_[i],
shape, &element));
if (!attempt->context->status().ok()) return kComplete;
attempt->tuple.emplace_back(element); attempt->tuple.emplace_back(element);
} }
} }

View File

@ -95,7 +95,7 @@ GraphTransferUtils::GetTopNFloatResults(const float* const data,
CHECK(scope.ok()); CHECK(scope.ok());
GraphDef fusedGraphDef; GraphDef fusedGraphDef;
root.ToGraphDef(&fusedGraphDef); TF_CHECK_OK(root.ToGraphDef(&fusedGraphDef));
return fusedGraphDef; return fusedGraphDef;
} }

View File

@ -606,15 +606,15 @@ void GraphTransferer::RegisterNodeWithPaddingAndStrides(
CHECK_GT(node.def().attr().count(PADDING_ATTR_NAME), 0); CHECK_GT(node.def().attr().count(PADDING_ATTR_NAME), 0);
// TODO(satok): Use context->GetAttr(...) instead? // TODO(satok): Use context->GetAttr(...) instead?
Padding padding; Padding padding;
context->GetAttr(PADDING_ATTR_NAME, &padding); TF_CHECK_OK(context->GetAttr(PADDING_ATTR_NAME, &padding));
CHECK_GT(node.def().attr().count(STRIDES_ATTR_NAME), 0); CHECK_GT(node.def().attr().count(STRIDES_ATTR_NAME), 0);
std::vector<int32> strides; std::vector<int32> strides;
context->GetAttr(STRIDES_ATTR_NAME, &strides); TF_CHECK_OK(context->GetAttr(STRIDES_ATTR_NAME, &strides));
const int stride_id = RegisterConstantShape(strides); const int stride_id = RegisterConstantShape(strides);
std::vector<int> extra_inputs{stride_id}; std::vector<int> extra_inputs{stride_id};
if (node.def().attr().count(KSIZE_ATTR_NAME) > 0) { if (node.def().attr().count(KSIZE_ATTR_NAME) > 0) {
std::vector<int32> kernel_sizes; std::vector<int32> kernel_sizes;
context->GetAttr(KSIZE_ATTR_NAME, &kernel_sizes); TF_CHECK_OK(context->GetAttr(KSIZE_ATTR_NAME, &kernel_sizes));
const int ksize_id = RegisterConstantShape(kernel_sizes); const int ksize_id = RegisterConstantShape(kernel_sizes);
extra_inputs.insert(extra_inputs.begin(), ksize_id); extra_inputs.insert(extra_inputs.begin(), ksize_id);
} }

View File

@ -76,7 +76,8 @@ void PaddingFIFOQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
Tensor element; Tensor element;
// Here, ManyOutShape returns zeros for undetermined shapes, // Here, ManyOutShape returns zeros for undetermined shapes,
// which is exactly what we want to use. // which is exactly what we want to use.
ctx->allocate_temp(component_dtypes_[i], ManyOutShape(i, 0), &element); OP_REQUIRES_OK(ctx, ctx->allocate_temp(component_dtypes_[i],
ManyOutShape(i, 0), &element));
tuple.emplace_back(element); tuple.emplace_back(element);
} }
callback(tuple); callback(tuple);
@ -179,8 +180,9 @@ void PaddingFIFOQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
} }
Tensor element; Tensor element;
attempt->context->allocate_temp(component_dtypes_[i], shape, attempt->context->SetStatus(attempt->context->allocate_temp(
&element); component_dtypes_[i], shape, &element));
if (!attempt->context->status().ok()) return kComplete;
bool has_dynamic_shape = !partial_shape.IsFullyDefined(); bool has_dynamic_shape = !partial_shape.IsFullyDefined();
if (has_dynamic_shape) { if (has_dynamic_shape) {

View File

@ -272,7 +272,13 @@ void PriorityQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
// an optimized case where the queue 'knows' what attributes to // an optimized case where the queue 'knows' what attributes to
// use, and plumbs them through here. // use, and plumbs them through here.
Tensor element; Tensor element;
ctx->allocate_temp(component_dtypes_[i], ManyOutShape(i, 0), &element); Status status = ctx->allocate_temp(component_dtypes_[i],
ManyOutShape(i, 0), &element);
if (!status.ok()) {
ctx->SetStatus(status);
callback(Tuple());
return;
}
tuple.emplace_back(element); tuple.emplace_back(element);
} }
callback(tuple); callback(tuple);
@ -339,8 +345,9 @@ void PriorityQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
const TensorShape shape = const TensorShape shape =
ManyOutShape(i, attempt->elements_requested); ManyOutShape(i, attempt->elements_requested);
Tensor element; Tensor element;
attempt->context->allocate_temp(component_dtypes_[i], shape, attempt->context->SetStatus(attempt->context->allocate_temp(
&element); component_dtypes_[i], shape, &element));
if (!attempt->context->status().ok()) return kComplete;
attempt->tuple.emplace_back(element); attempt->tuple.emplace_back(element);
} }
} }

View File

@ -197,7 +197,7 @@ TEST_F(QuantizeAndDequantizeTest, Invalid_range_given) {
ops::QuantizeAndDequantize(root, {-3.5} /* input */); \ ops::QuantizeAndDequantize(root, {-3.5} /* input */); \
TF_CHECK_OK(root.status()); \ TF_CHECK_OK(root.status()); \
Graph* g = new Graph(OpRegistry::Global()); \ Graph* g = new Graph(OpRegistry::Global()); \
root.ToGraph(g); \ TF_CHECK_OK(root.ToGraph(g)); \
test::Benchmark(#DEVICE, g).Run(iters); \ test::Benchmark(#DEVICE, g).Run(iters); \
} \ } \
BENCHMARK(BM_SIMPLE_QUAN_DEQUAN_##DEVICE); BENCHMARK(BM_SIMPLE_QUAN_DEQUAN_##DEVICE);

View File

@ -308,7 +308,13 @@ void RandomShuffleQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
// an optimized case where the queue 'knows' what attributes to // an optimized case where the queue 'knows' what attributes to
// use, and plumbs them through here. // use, and plumbs them through here.
Tensor element; Tensor element;
ctx->allocate_temp(component_dtypes_[i], ManyOutShape(i, 0), &element); Status s = ctx->allocate_temp(component_dtypes_[i], ManyOutShape(i, 0),
&element);
if (!s.ok()) {
ctx->SetStatus(s);
callback(Tuple());
return;
}
tuple.emplace_back(element); tuple.emplace_back(element);
} }
callback(tuple); callback(tuple);
@ -387,8 +393,10 @@ void RandomShuffleQueue::TryDequeueMany(int num_elements, OpKernelContext* ctx,
const TensorShape shape = const TensorShape shape =
ManyOutShape(i, attempt->elements_requested); ManyOutShape(i, attempt->elements_requested);
Tensor element; Tensor element;
attempt->context->allocate_temp(component_dtypes_[i], attempt->context->SetStatus(
shape, &element); attempt->context->allocate_temp(component_dtypes_[i],
shape, &element));
if (!attempt->context->status().ok()) return kComplete;
attempt->tuple.emplace_back(element); attempt->tuple.emplace_back(element);
} }
} }

View File

@ -65,7 +65,7 @@ Status ReaderBase::RestoreState(const string& state) {
mutex_lock lock(mu_); mutex_lock lock(mu_);
Status status = RestoreStateLocked(state); Status status = RestoreStateLocked(state);
if (!status.ok()) { if (!status.ok()) {
ResetLocked(); ResetLocked().IgnoreError();
} }
return status; return status;
} }

View File

@ -27,7 +27,8 @@ class RemoteFusedGraphExecuteOp : public OpKernel {
explicit RemoteFusedGraphExecuteOp(OpKernelConstruction* const ctx) explicit RemoteFusedGraphExecuteOp(OpKernelConstruction* const ctx)
: OpKernel(ctx), graph_transferer_() { : OpKernel(ctx), graph_transferer_() {
string serialized_proto; string serialized_proto;
ctx->GetAttr("serialized_graph_transfer_info", &serialized_proto); OP_REQUIRES_OK(
ctx, ctx->GetAttr("serialized_graph_transfer_info", &serialized_proto));
graph_transferer_.SetSerializedGraphTransferInfo(serialized_proto); graph_transferer_.SetSerializedGraphTransferInfo(serialized_proto);
const GraphTransferInfo& gt_info = graph_transferer_.GetGraphTransferInfo(); const GraphTransferInfo& gt_info = graph_transferer_.GetGraphTransferInfo();
switch (gt_info.destination()) { switch (gt_info.destination()) {

View File

@ -681,7 +681,7 @@ static void BM_LargeTensorWrite(int iters, int num_elements) {
TF_CHECK_OK(root.status()); TF_CHECK_OK(root.status());
Graph* g = new Graph(OpRegistry::Global()); Graph* g = new Graph(OpRegistry::Global());
root.ToGraph(g); TF_CHECK_OK(root.ToGraph(g));
VLOG(1) << "Save op's output path: " << temp_filename; VLOG(1) << "Save op's output path: " << temp_filename;
VLOG(1) << "# nodes in Graph: " << g->num_nodes(); VLOG(1) << "# nodes in Graph: " << g->num_nodes();

View File

@ -126,9 +126,10 @@ class SaveV2 : public OpKernel {
shape_spec, ", tensor: ", shape_spec, ", tensor: ",
tensor.shape().DebugString())); tensor.shape().DebugString()));
writer.AddSlice(tensor_name, shape, slice, tensor); OP_REQUIRES_OK(context,
writer.AddSlice(tensor_name, shape, slice, tensor));
} else { } else {
writer.Add(tensor_name, tensor); OP_REQUIRES_OK(context, writer.Add(tensor_name, tensor));
} }
} }
OP_REQUIRES_OK(context, writer.Finish()); OP_REQUIRES_OK(context, writer.Finish());
@ -186,7 +187,8 @@ class MergeV2Checkpoints : public OpKernel {
public: public:
explicit MergeV2Checkpoints(OpKernelConstruction* context) explicit MergeV2Checkpoints(OpKernelConstruction* context)
: OpKernel(context) { : OpKernel(context) {
context->GetAttr("delete_old_dirs", &delete_old_dirs_); OP_REQUIRES_OK(context,
context->GetAttr("delete_old_dirs", &delete_old_dirs_));
} }
void Compute(OpKernelContext* context) override { void Compute(OpKernelContext* context) override {

View File

@ -156,7 +156,8 @@ class ScatterNdOp : public OpKernel {
errors::InvalidArgument("Shape must be a vector")); errors::InvalidArgument("Shape must be a vector"));
auto vec = shape_input.flat<Index>(); auto vec = shape_input.flat<Index>();
TensorShape shape; TensorShape shape;
TensorShapeUtils::MakeShape(vec.data(), vec.size(), &shape); OP_REQUIRES_OK(c,
TensorShapeUtils::MakeShape(vec.data(), vec.size(), &shape));
int64 slice_dim; int64 slice_dim;
Index num_updates; Index num_updates;

View File

@ -109,8 +109,8 @@ Status ModelWeights::Initialize(OpKernelContext* const context) {
for (int i = 0; i < sparse_weights_inputs.size(); ++i) { for (int i = 0; i < sparse_weights_inputs.size(); ++i) {
Tensor* delta_t; Tensor* delta_t;
sparse_weights_outputs.allocate(i, sparse_weights_inputs[i].shape(), TF_RETURN_IF_ERROR(sparse_weights_outputs.allocate(
&delta_t); i, sparse_weights_inputs[i].shape(), &delta_t));
// Convert the input vector to a row matrix in internal representation. // Convert the input vector to a row matrix in internal representation.
auto deltas = delta_t->shaped<float, 2>({1, delta_t->NumElements()}); auto deltas = delta_t->shaped<float, 2>({1, delta_t->NumElements()});
deltas.setZero(); deltas.setZero();
@ -127,7 +127,8 @@ Status ModelWeights::Initialize(OpKernelContext* const context) {
std::vector<FeatureWeightsDenseStorage>* const feature_weights) { std::vector<FeatureWeightsDenseStorage>* const feature_weights) {
for (int i = 0; i < weight_inputs.size(); ++i) { for (int i = 0; i < weight_inputs.size(); ++i) {
Tensor* delta_t; Tensor* delta_t;
weight_outputs->allocate(i, weight_inputs[i].shape(), &delta_t); TF_RETURN_IF_ERROR(
weight_outputs->allocate(i, weight_inputs[i].shape(), &delta_t));
// Convert the input vector to a row matrix in internal representation. // Convert the input vector to a row matrix in internal representation.
auto deltas = delta_t->shaped<float, 2>({1, delta_t->NumElements()}); auto deltas = delta_t->shaped<float, 2>({1, delta_t->NumElements()});
deltas.setZero(); deltas.setZero();
@ -136,12 +137,11 @@ Status ModelWeights::Initialize(OpKernelContext* const context) {
{1, weight_inputs[i].NumElements()}), {1, weight_inputs[i].NumElements()}),
deltas}); deltas});
} }
return Status::OK();
}; };
initialize_weights(dense_weights_inputs, &dense_weights_outputs, return initialize_weights(dense_weights_inputs, &dense_weights_outputs,
&dense_weights_); &dense_weights_);
return Status::OK();
} }
// Computes the example statistics for given example, and model. Defined here // Computes the example statistics for given example, and model. Defined here

View File

@ -142,7 +142,8 @@ void DoCompute(const ComputeOptions& options, OpKernelContext* const context) {
Tensor mutable_example_state_data_t(*example_state_data_t); Tensor mutable_example_state_data_t(*example_state_data_t);
auto example_state_data = mutable_example_state_data_t.matrix<float>(); auto example_state_data = mutable_example_state_data_t.matrix<float>();
context->set_output("out_example_state_data", mutable_example_state_data_t); OP_REQUIRES_OK(context, context->set_output("out_example_state_data",
mutable_example_state_data_t));
if (options.adaptative) { if (options.adaptative) {
OP_REQUIRES_OK(context, OP_REQUIRES_OK(context,

View File

@ -37,19 +37,19 @@ static Graph* ConstructSpaceToBatchGraph(
if (dtype == DT_FLOAT) { if (dtype == DT_FLOAT) {
Tensor input(DT_FLOAT, input_shape); Tensor input(DT_FLOAT, input_shape);
input.flat<float>().setRandom(); input.flat<float>().setRandom();
NodeBuilder(g->NewName("n"), op_name) TF_CHECK_OK(NodeBuilder(g->NewName("n"), op_name)
.Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, paddings_tensor)) .Input(test::graph::Constant(g, paddings_tensor))
.Attr("block_size", block_size) .Attr("block_size", block_size)
.Finalize(g, &ret); .Finalize(g, &ret));
} else if (dtype == DT_HALF) { } else if (dtype == DT_HALF) {
Tensor input(DT_HALF, input_shape); Tensor input(DT_HALF, input_shape);
input.flat<Eigen::half>().setRandom(); input.flat<Eigen::half>().setRandom();
NodeBuilder(g->NewName("n"), op_name) TF_CHECK_OK(NodeBuilder(g->NewName("n"), op_name)
.Input(test::graph::Constant(g, input)) .Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, paddings_tensor)) .Input(test::graph::Constant(g, paddings_tensor))
.Attr("block_size", block_size) .Attr("block_size", block_size)
.Finalize(g, &ret); .Finalize(g, &ret));
} }
return g; return g;
} }

View File

@ -84,7 +84,8 @@ class SummaryImageOp : public OpKernel {
return typename TTypes<uint8>::ConstMatrix( return typename TTypes<uint8>::ConstMatrix(
&values(i, 0, 0), Eigen::DSizes<Eigen::DenseIndex, 2>(hw, depth)); &values(i, 0, 0), Eigen::DSizes<Eigen::DenseIndex, 2>(hw, depth));
}; };
AddImages(base_tag, batch_size, w, h, depth, ith_image, &s); OP_REQUIRES_OK(
c, AddImages(base_tag, batch_size, w, h, depth, ith_image, &s));
} else if (tensor.dtype() == DT_HALF) { } else if (tensor.dtype() == DT_HALF) {
NormalizeAndAddImages<Eigen::half>(c, tensor, h, w, hw, depth, batch_size, NormalizeAndAddImages<Eigen::half>(c, tensor, h, w, hw, depth, batch_size,
base_tag, &s); base_tag, &s);
@ -121,7 +122,8 @@ class SummaryImageOp : public OpKernel {
NormalizeFloatImage<T>(hw, depth, values, bad_color, &image); NormalizeFloatImage<T>(hw, depth, values, bad_color, &image);
return image; return image;
}; };
AddImages(base_tag, batch_size, w, h, depth, ith_image, s); OP_REQUIRES_OK(c,
AddImages(base_tag, batch_size, w, h, depth, ith_image, s));
} }
// Add the sequence of images specified by ith_image to the summary. // Add the sequence of images specified by ith_image to the summary.

View File

@ -87,7 +87,8 @@ class TFRecordReaderOp : public ReaderOpKernel {
Env* env = context->env(); Env* env = context->env();
string compression_type; string compression_type;
context->GetAttr("compression_type", &compression_type); OP_REQUIRES_OK(context,
context->GetAttr("compression_type", &compression_type));
SetReaderFactory([this, compression_type, env]() { SetReaderFactory([this, compression_type, env]() {
return new TFRecordReader(name(), compression_type, env); return new TFRecordReader(name(), compression_type, env);

View File

@ -113,6 +113,10 @@ string Status::ToString() const {
} }
} }
void Status::IgnoreError() const {
// no-op
}
std::ostream& operator<<(std::ostream& os, const Status& x) { std::ostream& operator<<(std::ostream& os, const Status& x) {
os << x.ToString(); os << x.ToString();
return os; return os;

View File

@ -72,6 +72,11 @@ class Status {
/// printing. Returns the string `"OK"` for success. /// printing. Returns the string `"OK"` for success.
string ToString() const; string ToString() const;
// Ignores any errors. This method does nothing except potentially suppress
// complaints from any tools that are checking that errors are not dropped on
// the floor.
void IgnoreError() const;
private: private:
static const string& empty_string(); static const string& empty_string();
struct State { struct State {

View File

@ -163,7 +163,7 @@ Status BufferedInputStream::ReadLine(string* result) {
string BufferedInputStream::ReadLineAsString() { string BufferedInputStream::ReadLineAsString() {
string result; string result;
ReadLineHelper(&result, true); ReadLineHelper(&result, true).IgnoreError();
return result; return result;
} }

View File

@ -32,7 +32,7 @@ static std::vector<int> BufferSizes() {
TEST(BufferedInputStream, ReadLine_Empty) { TEST(BufferedInputStream, ReadLine_Empty) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, ""); TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -48,7 +48,8 @@ TEST(BufferedInputStream, ReadLine_Empty) {
TEST(BufferedInputStream, ReadLine1) { TEST(BufferedInputStream, ReadLine1) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, "line one\nline two\nline three\n"); TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -72,7 +73,7 @@ TEST(BufferedInputStream, ReadLine1) {
TEST(BufferedInputStream, ReadLine_NoTrailingNewLine) { TEST(BufferedInputStream, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, "line one\nline two\nline three"); TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -96,7 +97,8 @@ TEST(BufferedInputStream, ReadLine_NoTrailingNewLine) {
TEST(BufferedInputStream, ReadLine_EmptyLines) { TEST(BufferedInputStream, ReadLine_EmptyLines) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"); TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -124,7 +126,8 @@ TEST(BufferedInputStream, ReadLine_EmptyLines) {
TEST(BufferedInputStream, ReadLine_CRLF) { TEST(BufferedInputStream, ReadLine_CRLF) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, "line one\r\n\r\n\r\nline two\r\nline three"); TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -152,7 +155,7 @@ TEST(BufferedInputStream, ReadLine_CRLF) {
TEST(BufferedInputStream, ReadNBytes) { TEST(BufferedInputStream, ReadNBytes) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffer_test"; string fname = testing::TmpDir() + "/buffer_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -189,7 +192,7 @@ TEST(BufferedInputStream, ReadNBytes) {
TEST(BufferedInputStream, SkipNBytes) { TEST(BufferedInputStream, SkipNBytes) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -226,7 +229,7 @@ TEST(BufferedInputStream, SkipNBytes) {
TEST(BufferedInputStream, ReadNBytesRandomAccessFile) { TEST(BufferedInputStream, ReadNBytesRandomAccessFile) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffer_test"; string fname = testing::TmpDir() + "/buffer_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -261,7 +264,7 @@ TEST(BufferedInputStream, ReadNBytesRandomAccessFile) {
TEST(BufferedInputStream, SkipNBytesRandomAccessFile) { TEST(BufferedInputStream, SkipNBytesRandomAccessFile) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -296,7 +299,7 @@ TEST(BufferedInputStream, SkipNBytesRandomAccessFile) {
TEST(BufferedInputStream, Seek) { TEST(BufferedInputStream, Seek) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/buffered_inputstream_test"; string fname = testing::TmpDir() + "/buffered_inputstream_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));

View File

@ -37,7 +37,7 @@ static std::vector<int> BufferSizes() {
TEST(InputBuffer, ReadLine_Empty) { TEST(InputBuffer, ReadLine_Empty) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, ""); TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
@ -51,7 +51,8 @@ TEST(InputBuffer, ReadLine_Empty) {
TEST(InputBuffer, ReadLine1) { TEST(InputBuffer, ReadLine1) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, "line one\nline two\nline three\n"); TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
@ -73,7 +74,7 @@ TEST(InputBuffer, ReadLine1) {
TEST(InputBuffer, ReadLine_NoTrailingNewLine) { TEST(InputBuffer, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, "line one\nline two\nline three"); TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
@ -95,7 +96,8 @@ TEST(InputBuffer, ReadLine_NoTrailingNewLine) {
TEST(InputBuffer, ReadLine_EmptyLines) { TEST(InputBuffer, ReadLine_EmptyLines) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"); TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
@ -121,7 +123,8 @@ TEST(InputBuffer, ReadLine_EmptyLines) {
TEST(InputBuffer, ReadLine_CRLF) { TEST(InputBuffer, ReadLine_CRLF) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, "line one\r\n\r\n\r\nline two\r\nline three"); TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
@ -147,7 +150,7 @@ TEST(InputBuffer, ReadLine_CRLF) {
TEST(InputBuffer, ReadNBytes) { TEST(InputBuffer, ReadNBytes) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
// ReadNBytes(int64, string*). // ReadNBytes(int64, string*).
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
@ -220,7 +223,7 @@ TEST(InputBuffer, ReadNBytes) {
TEST(InputBuffer, SkipNBytes) { TEST(InputBuffer, SkipNBytes) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
@ -255,7 +258,7 @@ TEST(InputBuffer, SkipNBytes) {
TEST(InputBuffer, Seek) { TEST(InputBuffer, Seek) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/inputbuffer_test"; string fname = testing::TmpDir() + "/inputbuffer_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) { for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;

View File

@ -26,7 +26,7 @@ namespace {
TEST(RandomInputStream, ReadNBytes) { TEST(RandomInputStream, ReadNBytes) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test"; string fname = testing::TmpDir() + "/random_inputbuffer_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -55,7 +55,7 @@ TEST(RandomInputStream, ReadNBytes) {
TEST(RandomInputStream, SkipNBytes) { TEST(RandomInputStream, SkipNBytes) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test"; string fname = testing::TmpDir() + "/random_inputbuffer_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
@ -86,7 +86,7 @@ TEST(RandomInputStream, SkipNBytes) {
TEST(RandomInputStream, Seek) { TEST(RandomInputStream, Seek) {
Env* env = Env::Default(); Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_seek_test"; string fname = testing::TmpDir() + "/random_inputbuffer_seek_test";
WriteStringToFile(env, fname, "0123456789"); TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file)); TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));

View File

@ -45,8 +45,8 @@ TEST(RecordReaderWriterTest, TestBasics) {
io::RecordWriterOptions options; io::RecordWriterOptions options;
options.zlib_options.output_buffer_size = buf_size; options.zlib_options.output_buffer_size = buf_size;
io::RecordWriter writer(file.get(), options); io::RecordWriter writer(file.get(), options);
writer.WriteRecord("abc"); TF_EXPECT_OK(writer.WriteRecord("abc"));
writer.WriteRecord("defg"); TF_EXPECT_OK(writer.WriteRecord("defg"));
TF_CHECK_OK(writer.Flush()); TF_CHECK_OK(writer.Flush());
} }
@ -82,8 +82,8 @@ TEST(RecordReaderWriterTest, TestZlib) {
options.compression_type = io::RecordWriterOptions::ZLIB_COMPRESSION; options.compression_type = io::RecordWriterOptions::ZLIB_COMPRESSION;
options.zlib_options.output_buffer_size = buf_size; options.zlib_options.output_buffer_size = buf_size;
io::RecordWriter writer(file.get(), options); io::RecordWriter writer(file.get(), options);
writer.WriteRecord("abc"); TF_EXPECT_OK(writer.WriteRecord("abc"));
writer.WriteRecord("defg"); TF_EXPECT_OK(writer.WriteRecord("defg"));
TF_CHECK_OK(writer.Flush()); TF_CHECK_OK(writer.Flush());
} }

View File

@ -281,10 +281,10 @@ REGISTER_OP("FusedBatchNorm")
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &x)); TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &x));
bool is_training; bool is_training;
c->GetAttr("is_training", &is_training); TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training));
int number_inputs = (is_training) ? 3 : 5; int number_inputs = (is_training) ? 3 : 5;
string data_format; string data_format;
c->GetAttr("data_format", &data_format); TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format));
DimensionHandle channel_dim = DimensionHandle channel_dim =
(data_format == "NHWC") ? c->Dim(x, 3) : c->Dim(x, 1); (data_format == "NHWC") ? c->Dim(x, 3) : c->Dim(x, 1);
@ -360,8 +360,8 @@ REGISTER_OP("FusedBatchNormGrad")
bool is_training; bool is_training;
string data_format; string data_format;
c->GetAttr("is_training", &is_training); TF_RETURN_IF_ERROR(c->GetAttr("is_training", &is_training));
c->GetAttr("data_format", &data_format); TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format));
DimensionHandle channel_dim = (data_format == "NHWC") DimensionHandle channel_dim = (data_format == "NHWC")
? c->Dim(y_backprop, 3) ? c->Dim(y_backprop, 3)
: c->Dim(y_backprop, 1); : c->Dim(y_backprop, 1);

View File

@ -34,10 +34,10 @@ REGISTER_OP("VarHandleOp")
.SetShapeFn([](shape_inference::InferenceContext* c) { .SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Scalar()); c->set_output(0, c->Scalar());
DataType t; DataType t;
c->GetAttr("dtype", &t); TF_RETURN_IF_ERROR(c->GetAttr("dtype", &t));
c->set_output_handle_dtype(0, t); c->set_output_handle_dtype(0, t);
TensorShapeProto p; TensorShapeProto p;
c->GetAttr("shape", &p); TF_RETURN_IF_ERROR(c->GetAttr("shape", &p));
shape_inference::ShapeHandle s; shape_inference::ShapeHandle s;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeProto(p, &s)); TF_RETURN_IF_ERROR(c->MakeShapeFromShapeProto(p, &s));
c->set_output_handle_shape(0, s); c->set_output_handle_shape(0, s);
@ -60,7 +60,7 @@ REGISTER_OP("ReadVariableOp")
.SetShapeFn([](InferenceContext* c) { .SetShapeFn([](InferenceContext* c) {
DataType handle_dtype = c->input_handle_dtype(0); DataType handle_dtype = c->input_handle_dtype(0);
DataType value_dtype; DataType value_dtype;
c->GetAttr("dtype", &value_dtype); TF_RETURN_IF_ERROR(c->GetAttr("dtype", &value_dtype));
if (handle_dtype != value_dtype) { if (handle_dtype != value_dtype) {
return errors::InvalidArgument( return errors::InvalidArgument(
"Trying to read variable with wrong dtype. " "Trying to read variable with wrong dtype. "
@ -103,7 +103,7 @@ ignore_lookup_error: whether to ignore the error when the resource
Status CreateAssignShapeFn(InferenceContext* c) { Status CreateAssignShapeFn(InferenceContext* c) {
DataType handle_dtype = c->input_handle_dtype(0); DataType handle_dtype = c->input_handle_dtype(0);
DataType value_dtype; DataType value_dtype;
c->GetAttr("dtype", &value_dtype); TF_RETURN_IF_ERROR(c->GetAttr("dtype", &value_dtype));
if (handle_dtype != value_dtype) { if (handle_dtype != value_dtype) {
return errors::InvalidArgument( return errors::InvalidArgument(
"Trying to initialize handle for variable with wrong dtype. " "Trying to initialize handle for variable with wrong dtype. "

View File

@ -26,11 +26,12 @@ using shape_inference::InferenceContext;
static Status ApplySdcaOptimizerShapeFn(InferenceContext* c) { static Status ApplySdcaOptimizerShapeFn(InferenceContext* c) {
std::vector<ShapeHandle> sparse_handles; std::vector<ShapeHandle> sparse_handles;
if (c->input("sparse_weights", &sparse_handles).ok()) { if (c->input("sparse_weights", &sparse_handles).ok()) {
c->set_output("out_delta_sparse_weights", sparse_handles); TF_RETURN_IF_ERROR(
c->set_output("out_delta_sparse_weights", sparse_handles));
} }
std::vector<ShapeHandle> dense_handles; std::vector<ShapeHandle> dense_handles;
if (c->input("dense_weights", &dense_handles).ok()) { if (c->input("dense_weights", &dense_handles).ok()) {
c->set_output("out_delta_dense_weights", dense_handles); TF_RETURN_IF_ERROR(c->set_output("out_delta_dense_weights", dense_handles));
} }
return c->set_output( return c->set_output(
"out_example_state_data", "out_example_state_data",

View File

@ -336,7 +336,7 @@ class GcsWritableFile : public WritableFile {
std::ofstream::binary | std::ofstream::app); std::ofstream::binary | std::ofstream::app);
} }
~GcsWritableFile() { Close(); } ~GcsWritableFile() override { Close().IgnoreError(); }
Status Append(const StringPiece& data) override { Status Append(const StringPiece& data) override {
TF_RETURN_IF_ERROR(CheckWritable()); TF_RETURN_IF_ERROR(CheckWritable());
@ -767,8 +767,9 @@ Status GcsFileSystem::BucketExists(const string& bucket, bool* result) {
std::unique_ptr<HttpRequest> request(http_request_factory_->Create()); std::unique_ptr<HttpRequest> request(http_request_factory_->Create());
TF_RETURN_IF_ERROR(request->Init()); TF_RETURN_IF_ERROR(request->Init());
request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket)); TF_RETURN_IF_ERROR(
request->AddAuthBearerHeader(auth_token); request->SetUri(strings::StrCat(kGcsUriBase, "b/", bucket)));
TF_RETURN_IF_ERROR(request->AddAuthBearerHeader(auth_token));
const Status status = request->Send(); const Status status = request->Send();
switch (status.code()) { switch (status.code()) {
case errors::Code::OK: case errors::Code::OK:

View File

@ -447,8 +447,8 @@ TEST(HttpRequestTest, WrongSequenceOfCalls_TwoSends) {
HttpRequest http_request(&libcurl); HttpRequest http_request(&libcurl);
TF_EXPECT_OK(http_request.Init()); TF_EXPECT_OK(http_request.Init());
http_request.SetUri("http://www.google.com"); TF_EXPECT_OK(http_request.SetUri("http://www.google.com"));
http_request.Send(); TF_EXPECT_OK(http_request.Send());
auto s = http_request.Send(); auto s = http_request.Send();
ASSERT_TRUE(errors::IsFailedPrecondition(s)); ASSERT_TRUE(errors::IsFailedPrecondition(s));
EXPECT_TRUE(StringPiece(s.error_message()) EXPECT_TRUE(StringPiece(s.error_message())
@ -460,8 +460,8 @@ TEST(HttpRequestTest, WrongSequenceOfCalls_ReusingAfterSend) {
HttpRequest http_request(&libcurl); HttpRequest http_request(&libcurl);
TF_EXPECT_OK(http_request.Init()); TF_EXPECT_OK(http_request.Init());
http_request.SetUri("http://www.google.com"); TF_EXPECT_OK(http_request.SetUri("http://www.google.com"));
http_request.Send(); TF_EXPECT_OK(http_request.Send());
auto s = http_request.SetUri("http://mail.google.com"); auto s = http_request.SetUri("http://mail.google.com");
ASSERT_TRUE(errors::IsFailedPrecondition(s)); ASSERT_TRUE(errors::IsFailedPrecondition(s));
EXPECT_TRUE(StringPiece(s.error_message()) EXPECT_TRUE(StringPiece(s.error_message())
@ -473,7 +473,7 @@ TEST(HttpRequestTest, WrongSequenceOfCalls_SettingMethodTwice) {
HttpRequest http_request(&libcurl); HttpRequest http_request(&libcurl);
TF_EXPECT_OK(http_request.Init()); TF_EXPECT_OK(http_request.Init());
http_request.SetDeleteRequest(); TF_EXPECT_OK(http_request.SetDeleteRequest());
auto s = http_request.SetPostEmptyBody(); auto s = http_request.SetPostEmptyBody();
ASSERT_TRUE(errors::IsFailedPrecondition(s)); ASSERT_TRUE(errors::IsFailedPrecondition(s));
EXPECT_TRUE(StringPiece(s.error_message()) EXPECT_TRUE(StringPiece(s.error_message())

View File

@ -91,7 +91,7 @@ class RetryingWritableFile : public WritableFile {
~RetryingWritableFile() { ~RetryingWritableFile() {
// Makes sure the retrying version of Close() is called in the destructor. // Makes sure the retrying version of Close() is called in the destructor.
Close(); Close().IgnoreError();
} }
Status Append(const StringPiece& data) override { Status Append(const StringPiece& data) override {

View File

@ -32,7 +32,7 @@ namespace {
string CreateTestFile(Env* env, const string& filename, int length) { string CreateTestFile(Env* env, const string& filename, int length) {
string input(length, 0); string input(length, 0);
for (int i = 0; i < length; i++) input[i] = i; for (int i = 0; i < length; i++) input[i] = i;
WriteStringToFile(env, filename, input); TF_CHECK_OK(WriteStringToFile(env, filename, input));
return input; return input;
} }
@ -53,11 +53,12 @@ string BaseDir() { return io::JoinPath(testing::TmpDir(), "base_dir"); }
class DefaultEnvTest : public ::testing::Test { class DefaultEnvTest : public ::testing::Test {
protected: protected:
void SetUp() override { env_->CreateDir(BaseDir()); } void SetUp() override { TF_CHECK_OK(env_->CreateDir(BaseDir())); }
void TearDown() override { void TearDown() override {
int64 undeleted_files, undeleted_dirs; int64 undeleted_files, undeleted_dirs;
env_->DeleteRecursively(BaseDir(), &undeleted_files, &undeleted_dirs); TF_CHECK_OK(
env_->DeleteRecursively(BaseDir(), &undeleted_files, &undeleted_dirs));
} }
Env* env_ = Env::Default(); Env* env_ = Env::Default();

View File

@ -259,7 +259,7 @@ class HDFSWritableFile : public WritableFile {
~HDFSWritableFile() override { ~HDFSWritableFile() override {
if (file_ != nullptr) { if (file_ != nullptr) {
Close(); Close().IgnoreError();
} }
} }

View File

@ -97,7 +97,7 @@ void EventsWriter::WriteSerializedEvent(StringPiece event_str) {
} }
} }
num_outstanding_events_++; num_outstanding_events_++;
recordio_writer_->WriteRecord(event_str); recordio_writer_->WriteRecord(event_str).IgnoreError();
} }
// NOTE(touts); This is NOT the function called by the Python code. // NOTE(touts); This is NOT the function called by the Python code.

View File

@ -140,7 +140,7 @@ TEST(EventWriter, FailFlush) {
string filename = writer.FileName(); string filename = writer.FileName();
WriteFile(&writer); WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename)); TF_EXPECT_OK(env()->FileExists(filename));
env()->DeleteFile(filename); TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code()); EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code());
EXPECT_FALSE(writer.Flush()); EXPECT_FALSE(writer.Flush());
EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code()); EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code());
@ -152,7 +152,7 @@ TEST(EventWriter, FailClose) {
string filename = writer.FileName(); string filename = writer.FileName();
WriteFile(&writer); WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename)); TF_EXPECT_OK(env()->FileExists(filename));
env()->DeleteFile(filename); TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code()); EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code());
EXPECT_FALSE(writer.Close()); EXPECT_FALSE(writer.Close());
EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code()); EXPECT_EQ(errors::Code::NOT_FOUND, env()->FileExists(filename).code());
@ -187,7 +187,7 @@ TEST(EventWriter, NameClose) {
string filename = writer.FileName(); string filename = writer.FileName();
EXPECT_TRUE(writer.Close()); EXPECT_TRUE(writer.Close());
TF_EXPECT_OK(env()->FileExists(filename)); TF_EXPECT_OK(env()->FileExists(filename));
env()->DeleteFile(filename); TF_ASSERT_OK(env()->DeleteFile(filename));
} }
TEST(EventWriter, FileDeletionBeforeWriting) { TEST(EventWriter, FileDeletionBeforeWriting) {
@ -197,7 +197,7 @@ TEST(EventWriter, FileDeletionBeforeWriting) {
TF_EXPECT_OK(env()->FileExists(filename0)); TF_EXPECT_OK(env()->FileExists(filename0));
env()->SleepForMicroseconds( env()->SleepForMicroseconds(
2000000); // To make sure timestamp part of filename will differ. 2000000); // To make sure timestamp part of filename will differ.
env()->DeleteFile(filename0); TF_ASSERT_OK(env()->DeleteFile(filename0));
EXPECT_TRUE(writer.Init()); // Init should reopen file. EXPECT_TRUE(writer.Init()); // Init should reopen file.
WriteFile(&writer); WriteFile(&writer);
EXPECT_TRUE(writer.Flush()); EXPECT_TRUE(writer.Flush());

View File

@ -359,9 +359,9 @@ Status BatchExampleProtoToTensors(
for (size_t b = 0; b < examples.size(); ++b) { for (size_t b = 0; b < examples.size(); ++b) {
const Example& ex = *(examples[b]); const Example& ex = *(examples[b]);
const string& example_name = (has_names) ? names[b] : "<unknown>"; const string& example_name = (has_names) ? names[b] : "<unknown>";
SingleExampleProtoToTensors( TF_RETURN_IF_ERROR(SingleExampleProtoToTensors(
ex, example_name, b, fixed_len_features, var_len_features, ex, example_name, b, fixed_len_features, var_len_features,
&output_dense_values_tensor_ptrs, &sparse_values_tmp); &output_dense_values_tensor_ptrs, &sparse_values_tmp));
} }
for (size_t d = 0; d < var_len_features.size(); ++d) { for (size_t d = 0; d < var_len_features.size(); ++d) {
@ -370,8 +370,9 @@ Status BatchExampleProtoToTensors(
const std::vector<Tensor>& sparse_values_tensor = sparse_values_tmp[d]; const std::vector<Tensor>& sparse_values_tensor = sparse_values_tmp[d];
VarLenFeatureBatchShapes sparse_tensor_batch_shapes; VarLenFeatureBatchShapes sparse_tensor_batch_shapes;
GetSparseTensorShapes(feature_config, sparse_values_tensor, batch_size, TF_RETURN_IF_ERROR(GetSparseTensorShapes(feature_config,
&sparse_tensor_batch_shapes); sparse_values_tensor, batch_size,
&sparse_tensor_batch_shapes));
const TensorShape& indices_shape = sparse_tensor_batch_shapes.indices_shape; const TensorShape& indices_shape = sparse_tensor_batch_shapes.indices_shape;
const TensorShape& values_shape = sparse_tensor_batch_shapes.values_shape; const TensorShape& values_shape = sparse_tensor_batch_shapes.values_shape;

View File

@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/core/util/example_proto_helper.h" #include "tensorflow/core/util/example_proto_helper.h"
#include "tensorflow/core/example/example.pb.h" #include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test.h"
namespace tensorflow { namespace tensorflow {
@ -99,8 +100,9 @@ TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyTrivial) {
} }
std::vector<FixedLenFeature> empty_dense_vec; std::vector<FixedLenFeature> empty_dense_vec;
SingleExampleProtoToTensors(ex, "", 0, empty_dense_vec, sparse_vec_, TF_EXPECT_OK(SingleExampleProtoToTensors(ex, "", 0, empty_dense_vec,
&output_dense_values, &output_sparse_values_tmp); sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0]; const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0];
EXPECT_EQ(1, int64_tensor_vec.size()); EXPECT_EQ(1, int64_tensor_vec.size());
@ -124,8 +126,9 @@ TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyEmpty) {
} }
std::vector<FixedLenFeature> empty_dense_vec; std::vector<FixedLenFeature> empty_dense_vec;
SingleExampleProtoToTensors(empty, "", 0, empty_dense_vec, sparse_vec_, TF_EXPECT_OK(SingleExampleProtoToTensors(empty, "", 0, empty_dense_vec,
&output_dense_values, &output_sparse_values_tmp); sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
// Each feature will still have a tensor vector, however the tensor // Each feature will still have a tensor vector, however the tensor
// in the vector will be empty. // in the vector will be empty.
@ -167,8 +170,9 @@ TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyTrivial) {
std::vector<VarLenFeature> empty_sparse_vec; std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp; std::vector<std::vector<Tensor>> output_sparse_values_tmp;
SingleExampleProtoToTensors(ex, "", 0, dense_vec_, empty_sparse_vec, TF_EXPECT_OK(SingleExampleProtoToTensors(
&output_dense_values, &output_sparse_values_tmp); ex, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_TRUE(output_sparse_values_tmp.empty()); EXPECT_TRUE(output_sparse_values_tmp.empty());
EXPECT_EQ(1, int64_dense_output.matrix<int64>().size()); EXPECT_EQ(1, int64_dense_output.matrix<int64>().size());
@ -196,8 +200,9 @@ TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyDefaults) {
std::vector<VarLenFeature> empty_sparse_vec; std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp; std::vector<std::vector<Tensor>> output_sparse_values_tmp;
SingleExampleProtoToTensors(empty, "", 0, dense_vec_, empty_sparse_vec, TF_EXPECT_OK(SingleExampleProtoToTensors(
&output_dense_values, &output_sparse_values_tmp); empty, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_EQ(1, int64_dense_output.matrix<int64>().size()); EXPECT_EQ(1, int64_dense_output.matrix<int64>().size());
EXPECT_EQ(0, int64_dense_output.matrix<int64>()(0, 0)); EXPECT_EQ(0, int64_dense_output.matrix<int64>()(0, 0));

View File

@ -142,7 +142,7 @@ TEST(MemmappedFileSystemTest, ProxyToDefault) {
// Making sure to clean up after the test finishes. // Making sure to clean up after the test finishes.
const auto adh = [&memmapped_env, &filename](WritableFile* f) { const auto adh = [&memmapped_env, &filename](WritableFile* f) {
delete f; delete f;
memmapped_env.DeleteFile(filename); TF_CHECK_OK(memmapped_env.DeleteFile(filename));
}; };
std::unique_ptr<WritableFile, decltype(adh)> writable_file( std::unique_ptr<WritableFile, decltype(adh)> writable_file(
writable_file_temp.release(), adh); writable_file_temp.release(), adh);

View File

@ -233,7 +233,7 @@ bool IsFullSlice(const TensorSlice& slice_spec,
return true; return true;
} else { } else {
TensorShape sliced_shape; TensorShape sliced_shape;
slice_spec.SliceTensorShape(full_tensor_shape, &sliced_shape); slice_spec.SliceTensorShape(full_tensor_shape, &sliced_shape).IgnoreError();
return sliced_shape == full_tensor_shape; return sliced_shape == full_tensor_shape;
} }
} }
@ -348,7 +348,7 @@ Status BundleWriter::Finish() {
status_ = Env::Default()->RenameFile(tmp_data_path_, status_ = Env::Default()->RenameFile(tmp_data_path_,
DataFilename(prefix_, 0, 1)); DataFilename(prefix_, 0, 1));
} else { } else {
Env::Default()->DeleteFile(tmp_data_path_); Env::Default()->DeleteFile(tmp_data_path_).IgnoreError();
} }
} }
if (!status_.ok()) return status_; if (!status_.ok()) return status_;
@ -381,7 +381,7 @@ Status BundleWriter::Finish() {
} }
status_.Update(file->Close()); status_.Update(file->Close());
if (!status_.ok()) { if (!status_.ok()) {
Env::Default()->DeleteFile(tmp_metadata_path_); Env::Default()->DeleteFile(tmp_metadata_path_).IgnoreError();
return status_; return status_;
} else { } else {
status_ = status_ =
@ -515,7 +515,7 @@ Status MergeBundles(Env* env, gtl::ArraySlice<string> prefixes,
// Merges all metadata tables. // Merges all metadata tables.
// TODO(zhifengc): KeyValue sorter if it becomes too big. // TODO(zhifengc): KeyValue sorter if it becomes too big.
MergeState merge; MergeState merge;
env->CreateDir(io::Dirname(merged_prefix).ToString()); // Ignores errors. env->CreateDir(io::Dirname(merged_prefix).ToString()).IgnoreError();
for (int i = 0; i < prefixes.size(); ++i) { for (int i = 0; i < prefixes.size(); ++i) {
TF_RETURN_IF_ERROR(MergeOneBundle(env, prefixes[i], &merge)); TF_RETURN_IF_ERROR(MergeOneBundle(env, prefixes[i], &merge));
} }
@ -554,7 +554,7 @@ Status MergeBundles(Env* env, gtl::ArraySlice<string> prefixes,
// Cleanup: best effort based and ignores errors. // Cleanup: best effort based and ignores errors.
for (const string& prefix : prefixes) { for (const string& prefix : prefixes) {
env->DeleteFile(MetaFilename(prefix)); env->DeleteFile(MetaFilename(prefix)).IgnoreError();
} }
return status; return status;
} }
@ -737,15 +737,15 @@ Status BundleReader::GetSliceValue(StringPiece full_tensor_key,
// Special case: a writer has saved a tensor fully, but the reader wants // Special case: a writer has saved a tensor fully, but the reader wants
// to read in slices. We therefore register the full slice on-demand here // to read in slices. We therefore register the full slice on-demand here
// without further complicating the on-disk bundle format. // without further complicating the on-disk bundle format.
RegisterTensorSlice(full_tensor_key_string, full_shape, TF_RETURN_IF_ERROR(RegisterTensorSlice(
full_tensor_entry.dtype(), /* tag */ "", full_tensor_key_string, full_shape, full_tensor_entry.dtype(),
/* full slice */ TensorSlice(full_shape.dims()), /* tag */ "",
&tensor_slices_); /* full slice */ TensorSlice(full_shape.dims()), &tensor_slices_));
} }
for (const TensorSliceProto& slice : full_tensor_entry.slices()) { for (const TensorSliceProto& slice : full_tensor_entry.slices()) {
RegisterTensorSlice(full_tensor_key_string, full_shape, TF_RETURN_IF_ERROR(RegisterTensorSlice(
full_tensor_entry.dtype(), full_tensor_key_string, full_shape, full_tensor_entry.dtype(),
/* tag */ "", TensorSlice(slice), &tensor_slices_); /* tag */ "", TensorSlice(slice), &tensor_slices_));
} }
tss = gtl::FindPtrOrNull(tensor_slices_, full_tensor_key_string); tss = gtl::FindPtrOrNull(tensor_slices_, full_tensor_key_string);
CHECK_NE(tss, nullptr); CHECK_NE(tss, nullptr);

View File

@ -123,10 +123,10 @@ template <typename T>
void TestBasic() { void TestBasic() {
{ {
BundleWriter writer(Env::Default(), Prefix("foo")); BundleWriter writer(Env::Default(), Prefix("foo"));
writer.Add("foo_003", Constant_2x3<T>(3)); TF_EXPECT_OK(writer.Add("foo_003", Constant_2x3<T>(3)));
writer.Add("foo_000", Constant_2x3<T>(0)); TF_EXPECT_OK(writer.Add("foo_000", Constant_2x3<T>(0)));
writer.Add("foo_002", Constant_2x3<T>(2)); TF_EXPECT_OK(writer.Add("foo_002", Constant_2x3<T>(2)));
writer.Add("foo_001", Constant_2x3<T>(1)); TF_EXPECT_OK(writer.Add("foo_001", Constant_2x3<T>(1)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
} }
{ {
@ -142,10 +142,10 @@ void TestBasic() {
} }
{ {
BundleWriter writer(Env::Default(), Prefix("bar")); BundleWriter writer(Env::Default(), Prefix("bar"));
writer.Add("bar_003", Constant_2x3<T>(3)); TF_EXPECT_OK(writer.Add("bar_003", Constant_2x3<T>(3)));
writer.Add("bar_000", Constant_2x3<T>(0)); TF_EXPECT_OK(writer.Add("bar_000", Constant_2x3<T>(0)));
writer.Add("bar_002", Constant_2x3<T>(2)); TF_EXPECT_OK(writer.Add("bar_002", Constant_2x3<T>(2)));
writer.Add("bar_001", Constant_2x3<T>(1)); TF_EXPECT_OK(writer.Add("bar_001", Constant_2x3<T>(1)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
} }
{ {
@ -183,9 +183,11 @@ template <typename T>
void TestNonStandardShapes() { void TestNonStandardShapes() {
{ {
BundleWriter writer(Env::Default(), Prefix("nonstandard")); BundleWriter writer(Env::Default(), Prefix("nonstandard"));
writer.Add("scalar", Constant<T>(0, TensorShape())); TF_EXPECT_OK(writer.Add("scalar", Constant<T>(0, TensorShape())));
writer.Add("non_standard0", Constant<T>(0, TensorShape({0, 1618}))); TF_EXPECT_OK(
writer.Add("non_standard1", Constant<T>(0, TensorShape({16, 0, 18}))); writer.Add("non_standard0", Constant<T>(0, TensorShape({0, 1618}))));
TF_EXPECT_OK(
writer.Add("non_standard1", Constant<T>(0, TensorShape({16, 0, 18}))));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
} }
{ {
@ -323,12 +325,14 @@ TEST(TensorBundleTest, NonStandardShapes) {
TEST(TensorBundleTest, StringTensors) { TEST(TensorBundleTest, StringTensors) {
{ {
BundleWriter writer(Env::Default(), Prefix("foo")); BundleWriter writer(Env::Default(), Prefix("foo"));
writer.Add("string_tensor", Tensor(DT_STRING, TensorShape({1}))); // Empty. TF_EXPECT_OK(writer.Add("string_tensor",
writer.Add("scalar", test::AsTensor<string>({"hello"})); Tensor(DT_STRING, TensorShape({1})))); // Empty.
writer.Add("strs", test::AsTensor<string>( TF_EXPECT_OK(writer.Add("scalar", test::AsTensor<string>({"hello"})));
{"hello", "", "x01", string(1 << 25, 'c')})); TF_EXPECT_OK(writer.Add(
"strs",
test::AsTensor<string>({"hello", "", "x01", string(1 << 25, 'c')})));
// Mixes in some floats. // Mixes in some floats.
writer.Add("floats", Constant_2x3<float>(16.18)); TF_EXPECT_OK(writer.Add("floats", Constant_2x3<float>(16.18)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
} }
{ {
@ -355,7 +359,8 @@ TEST(TensorBundleTest, DirectoryStructure) {
Prefix("worker1")}; Prefix("worker1")};
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
BundleWriter writer(env, kBundlePrefixes[i]); BundleWriter writer(env, kBundlePrefixes[i]);
writer.Add(strings::StrCat("tensor", i), Constant_2x3<float>(0.)); TF_EXPECT_OK(
writer.Add(strings::StrCat("tensor", i), Constant_2x3<float>(0.)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
} }
@ -396,8 +401,8 @@ TEST(TensorBundleTest, DirectoryStructure) {
TEST(TensorBundleTest, Error) { TEST(TensorBundleTest, Error) {
{ // Dup keys. { // Dup keys.
BundleWriter writer(Env::Default(), Prefix("dup")); BundleWriter writer(Env::Default(), Prefix("dup"));
writer.Add("foo", Constant_2x3(1.f)); TF_EXPECT_OK(writer.Add("foo", Constant_2x3(1.f)));
writer.Add("foo", Constant_2x3(2.f)); EXPECT_FALSE(writer.Add("foo", Constant_2x3(2.f)).ok());
EXPECT_TRUE( EXPECT_TRUE(
StringPiece(writer.status().ToString()).contains("duplicate key")); StringPiece(writer.status().ToString()).contains("duplicate key"));
EXPECT_FALSE(writer.Finish().ok()); EXPECT_FALSE(writer.Finish().ok());
@ -446,7 +451,7 @@ TEST(TensorBundleTest, Checksum) {
// Corrupts a float tensor. // Corrupts a float tensor.
{ {
BundleWriter writer(Env::Default(), Prefix("singleton")); BundleWriter writer(Env::Default(), Prefix("singleton"));
writer.Add("foo", Constant_2x3(1.f)); TF_EXPECT_OK(writer.Add("foo", Constant_2x3(1.f)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
FlipByte("singleton", 0 /* corrupts any byte */); FlipByte("singleton", 0 /* corrupts any byte */);
@ -458,7 +463,8 @@ TEST(TensorBundleTest, Checksum) {
{ {
auto WriteStrings = []() { auto WriteStrings = []() {
BundleWriter writer(Env::Default(), Prefix("strings")); BundleWriter writer(Env::Default(), Prefix("strings"));
writer.Add("foo", test::AsTensor<string>({"hello", "world"})); TF_EXPECT_OK(
writer.Add("foo", test::AsTensor<string>({"hello", "world"})));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
}; };
// Corrupts the first two bytes, which are the varint32-encoded lengths // Corrupts the first two bytes, which are the varint32-encoded lengths
@ -482,7 +488,7 @@ TEST(TensorBundleTest, Checksum) {
TEST(TensorBundleTest, Endianness) { TEST(TensorBundleTest, Endianness) {
BundleWriter writer(Env::Default(), Prefix("end")); BundleWriter writer(Env::Default(), Prefix("end"));
writer.Add("key", Constant_2x3<float>(1.0)); TF_EXPECT_OK(writer.Add("key", Constant_2x3<float>(1.0)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
// Flips the endianness bit. // Flips the endianness bit.
@ -497,7 +503,7 @@ TEST(TensorBundleTest, Endianness) {
TEST(TensorBundleTest, TruncatedTensorContents) { TEST(TensorBundleTest, TruncatedTensorContents) {
Env* env = Env::Default(); Env* env = Env::Default();
BundleWriter writer(env, Prefix("end")); BundleWriter writer(env, Prefix("end"));
writer.Add("key", Constant_2x3<float>(1.0)); TF_EXPECT_OK(writer.Add("key", Constant_2x3<float>(1.0)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
// Truncates the data file by one byte, so that we hit EOF. // Truncates the data file by one byte, so that we hit EOF.
@ -517,7 +523,7 @@ TEST(TensorBundleTest, TruncatedTensorContents) {
TEST(TensorBundleTest, HeaderEntry) { TEST(TensorBundleTest, HeaderEntry) {
{ {
BundleWriter writer(Env::Default(), Prefix("b")); BundleWriter writer(Env::Default(), Prefix("b"));
writer.Add("key", Constant_2x3<float>(1.0)); TF_EXPECT_OK(writer.Add("key", Constant_2x3<float>(1.0)));
TF_ASSERT_OK(writer.Finish()); TF_ASSERT_OK(writer.Finish());
} }

View File

@ -410,7 +410,7 @@ static void VersionTest(const VersionDef& versions, const string& error) {
TF_ASSERT_OK(CreateTableTensorSliceBuilder(path, &builder)); TF_ASSERT_OK(CreateTableTensorSliceBuilder(path, &builder));
builder->Add(kSavedTensorSlicesKey, contents); builder->Add(kSavedTensorSlicesKey, contents);
int64 file_size; int64 file_size;
builder->Finish(&file_size); TF_EXPECT_OK(builder->Finish(&file_size));
delete builder; delete builder;
} }

View File

@ -242,7 +242,7 @@ static void BM_RegisterOneByOne(int parts) {
TensorSliceSet slice_set(shape, DT_INT32); TensorSliceSet slice_set(shape, DT_INT32);
for (int i = 0; i < parts; ++i) { for (int i = 0; i < parts; ++i) {
TensorSlice part({{i, 1}, {0, -1}}); TensorSlice part({{i, 1}, {0, -1}});
slice_set.Register(part, part.DebugString(), nullptr); TF_CHECK_OK(slice_set.Register(part, part.DebugString(), nullptr));
} }
} }

View File

@ -123,7 +123,7 @@ Status TensorSliceWriter::Finish() {
LOG(ERROR) << "Failed to rename file " << tmpname_ << " to " << filename_; LOG(ERROR) << "Failed to rename file " << tmpname_ << " to " << filename_;
} }
} else { } else {
Env::Default()->DeleteFile(tmpname_); Env::Default()->DeleteFile(tmpname_).IgnoreError();
} }
return s; return s;
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
#include <array> #include <array>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/logging.h"
@ -270,12 +271,14 @@ size_t BytesPerElementHelper(DT value) {
SavedSlice ss; SavedSlice ss;
std::array<DT, 1> lo_data; std::array<DT, 1> lo_data;
std::fill(lo_data.begin(), lo_data.end(), value); std::fill(lo_data.begin(), lo_data.end(), value);
TensorSliceWriter::SaveData(lo_data.data(), lo_data.size(), &ss); TF_EXPECT_OK(
TensorSliceWriter::SaveData(lo_data.data(), lo_data.size(), &ss));
int lo_byte_size = ss.ByteSize(); int lo_byte_size = ss.ByteSize();
std::array<DT, 1001> hi_data; std::array<DT, 1001> hi_data;
std::fill(hi_data.begin(), hi_data.end(), value); std::fill(hi_data.begin(), hi_data.end(), value);
TensorSliceWriter::SaveData(hi_data.data(), hi_data.size(), &ss); TF_EXPECT_OK(
TensorSliceWriter::SaveData(hi_data.data(), hi_data.size(), &ss));
int hi_byte_size = ss.ByteSize(); int hi_byte_size = ss.ByteSize();
return (hi_byte_size - lo_byte_size) / (hi_data.size() - lo_data.size()); return (hi_byte_size - lo_byte_size) / (hi_data.size() - lo_data.size());

View File

@ -49,7 +49,8 @@ string TryFindKernelClass(const string& serialized_node_def) {
} }
string class_name = ""; string class_name = "";
tensorflow::FindKernelDef(tensorflow::DeviceType(parsed_name.type.c_str()), tensorflow::FindKernelDef(tensorflow::DeviceType(parsed_name.type.c_str()),
node_def, nullptr /* kernel_def */, &class_name); node_def, nullptr /* kernel_def */, &class_name)
.IgnoreError();
return class_name; return class_name;
} }

View File

@ -141,7 +141,8 @@ port::StatusOr<StreamExecutor*> CudaPlatform::GetExecutor(
} }
StreamExecutor* naked_executor = executor.ValueOrDie().get(); StreamExecutor* naked_executor = executor.ValueOrDie().get();
executor_cache_.Insert(config, executor.ConsumeValueOrDie()); SE_RETURN_IF_ERROR(
executor_cache_.Insert(config, executor.ConsumeValueOrDie()));
return naked_executor; return naked_executor;
} }

View File

@ -454,9 +454,9 @@ int Main(int argc, char** argv) {
// Report the stats. // Report the stats.
TestReporter reporter(output_prefix, benchmark_name); TestReporter reporter(output_prefix, benchmark_name);
reporter.Initialize(); TF_QCHECK_OK(reporter.Initialize());
reporter.Benchmark(num_runs, -1.0, wall_time, throughput); TF_QCHECK_OK(reporter.Benchmark(num_runs, -1.0, wall_time, throughput));
reporter.Close(); TF_QCHECK_OK(reporter.Close());
} }
return 0; return 0;

Some files were not shown because too many files have changed in this diff Show More