From f5669d905a28893c71ff44245da6ed5e13d55d1c Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 13 Nov 2017 23:14:06 -0800 Subject: [PATCH 001/104] In the Grappler arithmetic optimizer, make sure the two new nodes have unique names when hoisting common factors out of aggregates. Fix a few missed optimization opportunities due to control dependencies. Fix a bug in counting the number of inputs for trivial aggregate rewriting. PiperOrigin-RevId: 175639520 --- .../optimizers/arithmetic_optimizer.cc | 97 +++++++---- .../optimizers/arithmetic_optimizer_test.cc | 164 +++++++++++++----- 2 files changed, 190 insertions(+), 71 deletions(-) diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc index e8ef0e94b54..5cce34e2a61 100644 --- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc @@ -188,7 +188,7 @@ bool IsInnerMatrixTransposeNode(const NodeDef& transpose_node, // Follow a chain (through input(0)) of ops starting at `source->input(0)` as // long as they // 1. preserve the values of their first input, -// 2. have a single output, +// 2. have a single (non-control) output, // 3. are not in nodes_to_preserve. // Returns the last node in the chain satisfying these properties or source // itself if a chain of length zero was found. @@ -200,20 +200,55 @@ NodeDef* GetTailOfValuePreservingChain( const NodeDef* source, const NodeMap* node_map, const std::unordered_set& nodes_to_preserve) { const NodeDef* source_parent = source; - source = node_map->GetNode(source->input(0)); - while (IsValuePreserving(*source) && - node_map->GetOutputs(source->name()).size() == 1 && - // Do not skip over preserved nodes, because folding will change - // the results of these skipped data-reordering nodes. - // TODO(jingyue): A more elegant way is to copy this chain of - // data-reordering nodes and modify only the copy. - !nodes_to_preserve.count(source->name())) { - source_parent = source; + if (!IsControlInput(source->input(0))) { source = node_map->GetNode(source->input(0)); + while (IsValuePreserving(*source) && + node_map->GetOutputs(source->name()).size() == 1 && + // Do not skip over preserved nodes, because folding will change + // the results of these skipped data-reordering nodes. + // TODO(jingyue): A more elegant way is to copy this chain of + // data-reordering nodes and modify only the copy. + !nodes_to_preserve.count(source->name())) { + source_parent = source; + if (IsControlInput(source->input(0))) { + break; + } + source = node_map->GetNode(source->input(0)); + } } return const_cast(source_parent); } +bool MaybeAddControlInput(const string& new_input, NodeDef* node, + GraphDef* graph, NodeMap* node_map) { + bool already_exists = false; + for (const string& input : node->input()) { + if (input == new_input || AsControlDependency(input) == new_input) { + already_exists = true; + break; + } + } + if (!already_exists) { + const string ctrl_dep = + ConstantFolding::AddControlDependency(new_input, graph, node_map); + node->add_input(ctrl_dep); + node_map->AddOutput(NodeName(new_input), node->name()); + } + return !already_exists; +} + +int CopyControlInputs(const NodeDef& from, NodeDef* to, GraphDef* graph, + NodeMap* node_map) { + int num_copied = 0; + for (const string& input : from.input()) { + if (IsControlInput(input) && + MaybeAddControlInput(input, to, graph, node_map)) { + ++num_copied; + } + } + return num_copied; +} + // Returns the data type in attribute `attr_name` of `node`. If that attribute // doesn't exist, returns DT_INVALID. DataType GetDataTypeFromAttr(const NodeDef& node, const string& attr_name) { @@ -848,7 +883,12 @@ string ArithmeticOptimizer::TrySimplifyAndReplaceUses( // Mul(Const(N), x)) // bool all_equal = true; + int num_inputs = 1; for (int i = 1; i < node->input_size(); ++i) { + if (IsControlInput(node->input(i))) { + break; + } + ++num_inputs; if (node->input(i) != node->input(0)) { all_equal = false; break; @@ -856,10 +896,9 @@ string ArithmeticOptimizer::TrySimplifyAndReplaceUses( } if (all_equal && node_map->GetNode(node->name() + "_const") == nullptr) { // 1. Create constant node with value N. - const int N = node->input_size(); const auto type = GetDataTypeFromAttr(*node, "T"); Tensor t(type, TensorShape({})); - Status status = SetTensorValue(type, N, &t); + Status status = SetTensorValue(type, num_inputs, &t); if (!status.ok()) { LOG(WARNING) << "Failed to create const node: " << status.error_message(); @@ -885,6 +924,7 @@ string ArithmeticOptimizer::TrySimplifyAndReplaceUses( new_mul_node->add_input(node->input(0)); node_map->AddOutput(node->input(0), new_mul_node->name()); + CopyControlInputs(*node, new_mul_node, graph_def, node_map); AddFrameControlDeps(node, {new_const_node, new_mul_node}, node->input(0), {new_const_node}, graph_def, node_map, frame_map); return new_mul_node->name(); @@ -896,11 +936,12 @@ string ArithmeticOptimizer::TrySimplifyAndReplaceUses( // where all the inputs are Mul nodes. This pattern occurs frequently in // regularization terms for the gradients during training. if (node->input_size() > 1 && IsAggregate(*node) && - node_map->GetNode(node->name() + "_hoist") == nullptr) { + node_map->GetNode(node->name() + "_hoist_add") == nullptr) { // Determine the set of common factors if the input nodes are all Mul nodes. std::set common_factors; int i = 0; - while (i < node->input_size() && (i == 0 || !common_factors.empty())) { + while (i < node->input_size() && (i == 0 || !common_factors.empty()) && + !IsControlInput(node->input(i))) { const NodeDef* input = node_map->GetNode(node->input(i)); if (input->op() == "Mul") { std::set factors_i{input->input(0), input->input(1)}; @@ -930,31 +971,34 @@ string ArithmeticOptimizer::TrySimplifyAndReplaceUses( NodeDef* new_mul_node = graph_def->add_node(); NodeDef* new_add_node = graph_def->add_node(); *new_add_node = *node; - new_add_node->set_name(node->name() + "_hoist"); + new_add_node->set_name(node->name() + "_hoist_add"); new_nodes->push_back(new_add_node); node_map->AddNode(new_add_node->name(), new_add_node); for (int i = 0; i < node->input_size(); ++i) { - NodeDef* mul_node = node_map->GetNode(node->input(i)); + const string& input = node->input(i); + if (IsControlInput(input)) { + MaybeAddControlInput(input, new_add_node, graph_def, node_map); + continue; + } + NodeDef* mul_node = node_map->GetNode(input); int unique_factor_index = mul_node->input(0) == common_factor ? 1 : 0; const string unique_factor = mul_node->input(unique_factor_index); new_add_node->set_input(i, unique_factor); // 2. Use a copy of the first Mul node for the outer multiplication. if (i == 0) { *new_mul_node = *mul_node; - new_mul_node->set_name(new_mul_node->name() + "_hoist"); + new_mul_node->set_device(node->device()); + new_mul_node->set_name(node->name() + "_hoist_mul"); new_mul_node->set_input(0, common_factor); new_mul_node->set_input(1, new_add_node->name()); node_map->AddNode(new_mul_node->name(), new_mul_node); } } - // 3. Set the device of the new nodes to that of the common factor "x". - NodeDef* common_factor_node = node_map->GetNode(common_factor); - new_add_node->set_device(common_factor_node->device()); - new_mul_node->set_device(common_factor_node->device()); - // 4. Add frame dependencies that the original node might have had. + // 3. Add frame dependencies that the original node might have had. AddFrameControlDeps(node, {new_add_node, new_mul_node}, common_factor, {new_add_node}, graph_def, node_map, frame_map); + return new_mul_node->name(); } } @@ -1117,15 +1161,11 @@ Status ArithmeticOptimizer::SimplifyArithmeticOps( << consumer->name() << " to " << simplified_tensor; } node_map.UpdateInput(consumer->name(), node->name(), simplified_tensor); - if (!nodes_to_simplify.Exists(consumer)) { - nodes_to_simplify.PushBack(consumer); - } + nodes_to_simplify.PushBack(consumer); } } for (const NodeDef* new_node : new_nodes) { - if (!nodes_to_simplify.Exists(new_node)) { - nodes_to_simplify.PushBack(new_node); - } + nodes_to_simplify.PushBack(new_node); } } return Status::OK(); @@ -1136,7 +1176,6 @@ Status ArithmeticOptimizer::Optimize(Cluster* /*cluster*/, GraphDef* optimized_graph) { *optimized_graph = item.graph; nodes_to_preserve_ = item.NodesToPreserve(); - GraphProperties graph_properties(item); TF_RETURN_IF_ERROR(graph_properties.InferStatically()); TF_RETURN_IF_ERROR(graph_properties.AnnotateOutputShapes(optimized_graph)); diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc index 4fcbb0120e6..354a3069052 100644 --- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc +++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer_test.cc @@ -164,6 +164,37 @@ TEST_F(ArithmeticOptimizerTest, SimplifyInvolutionsWithChain) { EXPECT_EQ("c", output.node(2).input(0)); } +TEST_F(ArithmeticOptimizerTest, SimplifyInvolutionsWithControlChain) { + tensorflow::Scope s = tensorflow::Scope::NewRootScope(); + Output c = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2}); + Output recip1 = ops::Reciprocal(s.WithOpName("recip1"), c); + Output id1 = ops::Identity(s.WithOpName("id1"), recip1); + Output squeeze = ops::Squeeze(s.WithOpName("squeeze"), id1); + Output recip2 = ops::Reciprocal( + s.WithOpName("recip2").WithControlDependencies(squeeze), c); + Output id2 = ops::Identity(s.WithOpName("id2"), recip2); + GrapplerItem item; + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + + ArithmeticOptimizer optimizer; + GraphDef output; + Status status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + // The optimizer should be a noop. + EXPECT_EQ(item.graph.node_size(), output.node_size()); + for (int i = 0; i < item.graph.node_size(); ++i) { + const NodeDef& original = item.graph.node(i); + const NodeDef& optimized = output.node(i); + EXPECT_EQ(original.name(), optimized.name()); + EXPECT_EQ(original.op(), optimized.op()); + EXPECT_EQ(original.input_size(), optimized.input_size()); + for (int j = 0; j < original.input_size(); ++j) { + EXPECT_EQ(original.input(j), optimized.input(j)); + } + } +} + TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); @@ -185,6 +216,9 @@ TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) { EXPECT_EQ(5, output.node_size()); const NodeDef& new_const = output.node(3); EXPECT_EQ("add_const", new_const.name()); + EXPECT_EQ("^x", new_const.input(0)); + EXPECT_EQ(std::string("\0\0\0@", 4), + new_const.attr().at("value").tensor().tensor_content()); const NodeDef& new_mul = output.node(4); EXPECT_EQ("add_mul", new_mul.name()); EXPECT_EQ("add_const", new_mul.input(0)); @@ -194,6 +228,41 @@ TEST_F(ArithmeticOptimizerTest, TrivialSumsSimple) { EXPECT_EQ("add_mul", new_id.input(0)); } +TEST_F(ArithmeticOptimizerTest, TrivialSumsSimpleWithControlDep) { + tensorflow::Scope s = tensorflow::Scope::NewRootScope(); + Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2}); + Output x = ops::Const(s.WithOpName("x"), {3.0f, 4.0f}, {1, 2}); + Output add = ops::Add(s.WithOpName("add").WithControlDependencies(y), x, x); + Output id = ops::Identity(s.WithOpName("id"), add); + + GrapplerItem item; + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + + ArithmeticOptimizer optimizer; + GraphDef output; + Status status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + // Run the optimizer twice to make sure the rewrite is idempotent. + item.graph.Swap(&output); + status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + EXPECT_EQ(6, output.node_size()); + const NodeDef& new_const = output.node(4); + EXPECT_EQ("add_const", new_const.name()); + EXPECT_EQ("^x", new_const.input(0)); + EXPECT_EQ(std::string("\0\0\0@", 4), + new_const.attr().at("value").tensor().tensor_content()); + const NodeDef& new_mul = output.node(5); + EXPECT_EQ("add_mul", new_mul.name()); + EXPECT_EQ("add_const", new_mul.input(0)); + EXPECT_EQ("x", new_mul.input(1)); + EXPECT_EQ("^y", new_mul.input(2)); + const NodeDef& new_id = output.node(3); + EXPECT_EQ("id", new_id.name()); + EXPECT_EQ("add_mul", new_id.input(0)); +} + TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) { // Test case from b/69059093. tensorflow::Scope s = tensorflow::Scope::NewRootScope(); @@ -207,6 +276,13 @@ TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) { GrapplerItem item; TF_CHECK_OK(s.ToGraphDef(&item.graph)); + const std::vector devices{ + "/device:CPU:0", "/device:GPU:0", "/device:CPU:0", "/device:GPU:1", + "/device:CPU:0", "/device:CPU:0", "/device:CPU:0", + }; + for (int i = 0; i < item.graph.node_size(); ++i) { + item.graph.mutable_node(i)->set_device(devices[i]); + } ArithmeticOptimizer optimizer; GraphDef output; Status status = optimizer.Optimize(nullptr, item, &output); @@ -216,36 +292,48 @@ TEST_F(ArithmeticOptimizerTest, TrivialSumsRepeatedAdd) { status = optimizer.Optimize(nullptr, item, &output); TF_EXPECT_OK(status); - EXPECT_EQ(11, output.node_size()); - const NodeDef& new_id = output.node(4); - EXPECT_EQ("id", new_id.name()); - EXPECT_EQ("Add_6_mul", new_id.input(0)); - - // Add4 and add5 get deduped, and we rewrite each of the 3 remaining add nodes - // of the form Add(x,x) into Mul(Const(2), x). - const NodeDef& new_add_4_const = output.node(5); - EXPECT_EQ("Add_4_const", new_add_4_const.name()); - EXPECT_EQ("^Add", new_add_4_const.input(0)); - const NodeDef& new_add_4_mul = output.node(6); - EXPECT_EQ("Add_4_mul", new_add_4_mul.name()); - EXPECT_EQ("Add_4_const", new_add_4_mul.input(0)); - EXPECT_EQ("Add_mul", new_add_4_mul.input(1)); - - const NodeDef& new_add_6_const = output.node(7); - EXPECT_EQ("Add_6_const", new_add_6_const.name()); - EXPECT_EQ("^Add_4_mul", new_add_6_const.input(0)); - const NodeDef& new_add_6_mul = output.node(8); - EXPECT_EQ("Add_6_mul", new_add_6_mul.name()); - EXPECT_EQ("Add_6_const", new_add_6_mul.input(0)); - EXPECT_EQ("Add_4_mul", new_add_6_mul.input(1)); - - const NodeDef& new_add_const = output.node(9); - EXPECT_EQ("Add_const", new_add_const.name()); - EXPECT_EQ("^Placeholder", new_add_const.input(0)); - const NodeDef& new_add_mul = output.node(10); - EXPECT_EQ("Add_mul", new_add_mul.name()); - EXPECT_EQ("Add_const", new_add_mul.input(0)); - EXPECT_EQ("Placeholder", new_add_mul.input(1)); + EXPECT_EQ(17, output.node_size()); + // The graph gets optimized to + // Mul(p, + // Add(Add(Const(2), Const(2)), + // Add(Const(2), Const(2)))) + for (const auto& node : output.node()) { + if ("id" == node.name()) { + EXPECT_EQ(1, node.input_size()); + EXPECT_EQ("Add_6_hoist_mul", node.input(0)); + } else if ("Add_6_hoist_mul" == node.name()) { + EXPECT_EQ("Mul", node.op()); + EXPECT_EQ(2, node.input_size()); + EXPECT_EQ("Placeholder", node.input(0)); + EXPECT_EQ("Add_6_hoist_add", node.input(1)); + } else if ("Add_6_hoist_add" == node.name()) { + EXPECT_EQ("Add", node.op()); + EXPECT_EQ(3, node.input_size()); + EXPECT_EQ("Add_4_hoist_add", node.input(0)); + EXPECT_EQ("Add_5_hoist_add", node.input(1)); + EXPECT_EQ("^Placeholder", node.input(2)); + } else if ("Add_4_hoist_add" == node.name()) { + EXPECT_EQ("Add", node.op()); + EXPECT_EQ(3, node.input_size()); + EXPECT_EQ("Add_const", node.input(0)); + EXPECT_EQ("Add_1_const", node.input(1)); + EXPECT_EQ("^Placeholder", node.input(2)); + } else if ("Add_5_hoist_add" == node.name()) { + EXPECT_EQ("Add", node.op()); + EXPECT_EQ(3, node.input_size()); + EXPECT_EQ("Add_const", node.input(0)); + EXPECT_EQ("Add_1_const", node.input(1)); + EXPECT_EQ("^Placeholder", node.input(2)); + } else if ("Add_const" == node.name()) { + EXPECT_EQ("Const", node.op()); + EXPECT_EQ(1, node.input_size()); + EXPECT_EQ("^Placeholder", node.input(0)); + } else if ("Add_1_const" == node.name()) { + EXPECT_EQ("Const", node.op()); + EXPECT_EQ(1, node.input_size()); + EXPECT_EQ("^Placeholder", node.input(0)); + } + } } TEST_F(ArithmeticOptimizerTest, HoistFactor) { @@ -272,16 +360,16 @@ TEST_F(ArithmeticOptimizerTest, HoistFactor) { EXPECT_EQ(9, output.node_size()); const NodeDef& new_add = output.node(8); - EXPECT_EQ("add_hoist", new_add.name()); + EXPECT_EQ("add_hoist_add", new_add.name()); EXPECT_EQ("y1", new_add.input(0)); EXPECT_EQ("y2", new_add.input(1)); const NodeDef& new_mul = output.node(7); - EXPECT_EQ("mul1_hoist", new_mul.name()); + EXPECT_EQ("add_hoist_mul", new_mul.name()); EXPECT_EQ("x", new_mul.input(0)); - EXPECT_EQ("add_hoist", new_mul.input(1)); + EXPECT_EQ("add_hoist_add", new_mul.input(1)); const NodeDef& new_id = output.node(6); EXPECT_EQ("id", new_id.name()); - EXPECT_EQ("mul1_hoist", new_id.input(0)); + EXPECT_EQ("add_hoist_mul", new_id.input(0)); } TEST_F(ArithmeticOptimizerTest, FuseConjAndTranspose) { @@ -463,10 +551,6 @@ TEST_F(ArithmeticOptimizerTest, IdentityReshape) { item.graph = output; TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output)); - for (const auto& node : output.node()) { - LOG(INFO) << node.DebugString(); - } - EXPECT_EQ(0, std::count_if( output.node().begin(), output.node().end(), [](const NodeDef& node) { return node.op() == "Reshape"; })); @@ -492,10 +576,6 @@ TEST_F(ArithmeticOptimizerTest, NotIdentityReshape) { item.graph = output; TF_EXPECT_OK(ModelPruner().Optimize(nullptr, item, &output)); - for (const auto& node : output.node()) { - LOG(INFO) << node.DebugString(); - } - EXPECT_EQ(1, std::count_if( output.node().begin(), output.node().end(), [](const NodeDef& node) { return node.op() == "Reshape"; })); From c674e27bfd68a6c990e694b6afd901bfeeaa006d Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 10:54:28 -0800 Subject: [PATCH 002/104] Merged commit includes the following changes: 175703479 by yifeif: Internal change. -- 175695370 by A. Unique TensorFlower: Implements _shared_embedding_columns and adds some tests. -- 175695349 by A. Unique TensorFlower: Implements tf.metrics.true_negatives, adds missing tests and does some cleanup in tf.contrib.metrics. -- PiperOrigin-RevId: 175703479 --- tensorflow/contrib/lite/README.md | 200 +++++++ .../lite/g3doc/TFLite-Architecture.jpg | Bin 0 -> 48710 bytes tensorflow/contrib/lite/g3doc/apis.md | 359 ++++++++++++ .../contrib/lite/g3doc/custom_operators.md | 91 ++++ tensorflow/contrib/lite/g3doc/ios.md | 67 +++ tensorflow/contrib/lite/g3doc/models.md | 22 + .../lite/g3doc/tf_ops_compatibility.md | 417 ++++++++++++++ tensorflow/contrib/lite/java/demo/README.md | 36 ++ .../lite/models/smartreply/g3doc/README.md | 146 +++++ .../lite/models/testdata/g3doc/README.md | 102 ++++ .../lite/models/testdata/g3doc/asr_am.svg | 4 + .../lite/models/testdata/g3doc/hotword.svg | 4 + .../lite/models/testdata/g3doc/speakerid.svg | 4 + .../lite/models/testdata/g3doc/tts.svg | 4 + .../models/testdata/smartreply_samples.tsv | 50 ++ tensorflow/contrib/lite/nnapi/README.md | 15 + tensorflow/contrib/lite/toco/README.md | 26 + .../lite/toco/g3doc/cmdline_examples.md | 509 ++++++++++++++++++ .../lite/toco/g3doc/cmdline_reference.md | 238 ++++++++ .../contrib/lite/toco/g3doc/python_api.md | 62 +++ .../contrib/metrics/python/ops/metric_ops.py | 191 ++----- .../python/feature_column/feature_column.py | 224 +++++++- .../feature_column/feature_column_test.py | 267 +++++++++ .../python/kernel_tests/metrics_test.py | 200 +++++++ tensorflow/python/ops/metrics_impl.py | 50 ++ .../tools/api/golden/tensorflow.metrics.pbtxt | 4 + 26 files changed, 3118 insertions(+), 174 deletions(-) create mode 100644 tensorflow/contrib/lite/README.md create mode 100644 tensorflow/contrib/lite/g3doc/TFLite-Architecture.jpg create mode 100644 tensorflow/contrib/lite/g3doc/apis.md create mode 100644 tensorflow/contrib/lite/g3doc/custom_operators.md create mode 100644 tensorflow/contrib/lite/g3doc/ios.md create mode 100644 tensorflow/contrib/lite/g3doc/models.md create mode 100644 tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md create mode 100644 tensorflow/contrib/lite/java/demo/README.md create mode 100644 tensorflow/contrib/lite/models/smartreply/g3doc/README.md create mode 100644 tensorflow/contrib/lite/models/testdata/g3doc/README.md create mode 100644 tensorflow/contrib/lite/models/testdata/g3doc/asr_am.svg create mode 100755 tensorflow/contrib/lite/models/testdata/g3doc/hotword.svg create mode 100755 tensorflow/contrib/lite/models/testdata/g3doc/speakerid.svg create mode 100755 tensorflow/contrib/lite/models/testdata/g3doc/tts.svg create mode 100644 tensorflow/contrib/lite/models/testdata/smartreply_samples.tsv create mode 100644 tensorflow/contrib/lite/nnapi/README.md create mode 100644 tensorflow/contrib/lite/toco/README.md create mode 100644 tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md create mode 100644 tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md create mode 100644 tensorflow/contrib/lite/toco/g3doc/python_api.md diff --git a/tensorflow/contrib/lite/README.md b/tensorflow/contrib/lite/README.md new file mode 100644 index 00000000000..b173936f5b9 --- /dev/null +++ b/tensorflow/contrib/lite/README.md @@ -0,0 +1,200 @@ +# TensorFlow Lite +TensorFlow Lite is TensorFlow’s lightweight solution for mobile and embedded devices. It enables low-latency inference of on-device machine learning models with a small binary size and fast performance supporting hardware acceleration. + +TensorFlow Lite uses many techniques for achieving low latency like optimizing the kernels for specific mobile apps, pre-fused activations, quantized kernels that allow smaller and faster (fixed-point math) models, and in the future, leverage specialized machine learning hardware to get the best possible performance for a particular model on a particular device. + +![image](g3doc/TFLite-Architecture.jpg) +# Getting Started with a Demo App + +This section contains an example application using TensorFlow Lite for Android devices. The demo is a sample camera app that classifies images continuously using a quantized Mobilenet model. A device running Android 5.0 ( API 21) or higher is required to run the demo. + +There are 3 ways to get the demo app to your device + - Download the prebuilt binary or + - Use Android Studio to build the application or + - Download the source code for TensorFlow Lite and the demo and build it using bazel + +## Description +In the demo app, inference is done using the TensorFlow Lite Java API. The demo app classifies frames in real-time, displaying the top most probable classifications. It also displays the time taken to detect the object. + +## Downloading the pre-built binary +The fastest path to trying the demo, is to download the pre-built binary +[TfLiteCameraDemo.apk](https://storage.googleapis.com/download.tensorflow.org/deps/tflite/TfLiteCameraDemo.apk) + +Once the apk is installed, click the app icon to start the app. The first-time the app is opened, the app asks for runtime permissions to access the device camera. The demo app opens the back-camera of the device and recognizes the objects in the camera’s field of view. At the bottom of the image (or at the left of the image if the device is in landscape mode), it shows the latency of classification and the top three objects classified. + +## Building in Android Studio using TensorFlow Lite AAR from JCenter +The simplest way to compile the demo app, and try out changes to the project code is to use AndroidStudio. + + - Install the latest version of Android Studio 3 as specified [here](https://developer.android.com/studio/index.html). + - Make sure the Android SDK version is greater than 26 and NDK version is greater than 14 (in the Android Studio Settings). + - Import the tensorflow/contrib/lite/java/demo directory as a new Android Studio project. + - Click through installing all the Gradle extensions it requests. + - Download the quantized Mobilenet TensorFlow Lite model from [here](https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip) + - unzip and copy mobilenet_quant_v1_224.tflite to the assets directory: + tensorflow/contrib/lite/java/demo/app/src/main/assets/ + - Build and run the demo app + +## Building TensorFlow Lite and the demo app from source + +### Clone the TensorFlow repo +- git clone + [https://github.com/tensorflow/tensorflow](https://github.com/tensorflow/tensorflow) + +### Install Bazel +If bazel is not installed on your system, install it now by following [these directions](https://bazel.build/versions/master/docs/install.html) + +NOTE: Bazel does not currently support building for Android on Windows. Full support for gradle/cmake builds is coming soon, but in the meantime Windows users should download the [prebuilt binary](https://storage.googleapis.com/download.tensorflow.org/deps/tflite/demo/TfLiteCameraDemo.apk) instead. + +### Install Android NDK and SDK +Bazel is the primary build system for TensorFlow. Bazel and the Android NDK and SDK must be installed on your system. + - Install the latest version of Bazel as per the instructions on the [Bazel website](https://bazel.build/versions/master/docs/install.html) + - The Android NDK is required to build the native (C/C++) TensorFlow code. The current recommended version is 14b, which may be found [here](https://developer.android.com/tools/revisions/build-tools.html). + - The Android SDK and build tools may be obtained [here](https://developer.android.com/tools/revisions/build-tools.html), or alternatively as part of [Android Studio](https://developer.android.com/studio/index.html). Build tools API >= 23 is required to build the TensorFlow Android demo (though it will run on API >= 21 devices). + + - The Android NDK is required to build the native (C/C++) TensorFlow Lite code. The current recommended version is 14b, which can be found [here](https://developer.android.com/ndk/downloads/older_releases.html#ndk-14b-downloads). + + - The Android SDK and build tools may be obtained [here](https://developer.android.com/tools/revisions/build-tools.html), or alternatively as part of [Android Studio](https://developer.android.com/studio/index.html). Build tools API >= 23 is required to build the TF Android demo (though it will run on API >= 21 devices). + - In the root of the TensorFlow repository update the `WORKSPACE` file with the `api_level` and location of the SDK and NDK. If you installed it with AndroidStudio the SDK path can be found in the SDK manager, and the default NDK path is:`{SDK path}/ndk-bundle.` + +``` + Android_sdk_repository ( + name = "androidsdk", + api_level = 23, + build_tools_version = "23.0.2", + path = "/home/xxxx/android-sdk-linux/", ) + +android_ndk_repository( + name="androidndk", + path="/home/xxxx/android-ndk-r10e/", + api_level=19) + +``` +Additional details on building with Android can be found [here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/java/demo/README.md) + +### Build the source code +Run bazel with the following command to build the demo. + +Build the demo app: +bazel build --cxxopt='--std=c++11' //tensorflow/contrib/lite/java/demo/app/src/main:TfLiteCameraDemo + +### More about the demo +The demo is resizing each camera image frame to (224 width * 224 height) to match the quantized Mobilenet model being used. The resized image is converted into a ByteBuffer row by row of size 1 * 224 * 224 * 3 bytes, where 1 is the number of images in a batch 224 * 224 is the width and height of the image 3 bytes represents three colors of a pixel. This demo uses the TensorFlow Lite Java inference API for models which take a single input and provide a single output. This outputs a two-dimensional array, with the first dimension being the category index and the second dimension being the confidence of classification. The Mobilenet model has 1001 unique categories and the app sorts the probabilities of all the categories and displays the top three. The Mobilenet quantized model is bundled within the assets directory of the app. + +# TensorFlow Lite Quick Start + +## Step 1. Decide which GraphDef to use + Depending on the use case, the developer may choose to use one of the popular + open-sourced models such as InceptionV3 or MobileNets, re-train these models + with their own custom data set or even build their own custom model. + +### Using a pre-trained model + +[MobileNets](https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html) is a family of mobile-first computer vision models for [TensorFlow](https://www.tensorflow.org/) designed to effectively maximize accuracy while being mindful of the restricted resources for an on-device or embedded application. MobileNets are small, low-latency, low-power models parameterized to meet the resource constraints of a variety of use cases. They can be built upon for classification, detection, embeddings and segmentation similar to how other popular large scale models, such as [Inception](https://arxiv.org/pdf/1602.07261.pdf), are used. Google provides 16 pre-trained [ImageNet](http://www.image-net.org/challenges/LSVRC/) classification checkpoints for MobileNets for use in mobile projects of all sizes. + +[Inception-v3](https://arxiv.org/abs/1512.00567) is an image recognition model which achieves fairly high accuracy in recognizing general objects with 1000 classes, like "Zebra", "Dalmatian", and "Dishwasher". The model extracts general features from input images using a convolutional neural network and classifies them based on those features with fully-connected and softmax layers. + +[On Device Smart Reply](https://research.googleblog.com/2017/02/on-device-machine-intelligence.html) is an on-device model which provides one-touch replies for an incoming text message by suggesting contextually relevant messages. The model is built specifically for memory constrained devices such as watches & phones and it has been successfully used to surface [Smart Replies on Android Wear](https://research.googleblog.com/2017/02/on-device-machine-intelligence.html). Note that this model only works on Android as of now. + +These pre-trained models can be downloaded from [here](models.md). + +### Retrain Inception-V3 or MobileNet for a custom data set +The above pre-trained models have been trained on the ImageNet data set, which consists of 1000 predefined classes. A model will need to be re-trained if these classes are not relevant or useful for a given use case. This technique is called transfer learning, which starts with a model that has been already trained on a problem and will then be retrained on a similar problem. Deep learning from scratch can take days, but transfer learning can be done fairly quickly. In order to do this, a developer will need to generate their custom data set labeled with the relevant classes. + +The [TensorFlow for Poets](https://codelabs.developers.google.com/codelabs/tensorflow-for-poets/) codelab walks through this process step-by-step. The retraining code supports retraining for both floating point and quantized inference. + + +### Train a custom model +A developer may choose to train a custom model using Tensorflow. TensorFlow documentation has [several tutorials](https://www.tensorflow.org/tutorials/) for building and training models. If the user has written a model using TensorFlow’s Slim Framework the first step is to export this to a GraphDef file. This is necessary because Slim does not store the model structure outside the code, so to communicate with other parts of the framework it needs to be exported. Documentation for the export can be found [here](https://github.com/tensorflow/models/tree/master/research/slim#Export). The output of this step will be a .pb file for the custom model. + +TensorFlow Lite currently supports a subset of TensorFlow operators. Please refer to [this document](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md) for details of supported operators and their usage. This +set will continue to expand in future releases of Tensorflow Lite. + + +## Step 2. Model format conversion + +The model generated in Step 1 is a standard Tensorflow model. After the completion of Step 1 a user should have a standard .pb or .pbtxt GraphDef file. If the application developer is using a pre-trained model (as defined in Step 1 above), they can download a ready to use, already converted model for use from [here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/g3doc/models.md). Models generated using retraining (aka transfer learning) or custom models will need to be converted using the steps mentioned below. + +A prerequisite to converting the model to the Tensorflow Lite format is to freeze the graph. + +Since we employ several formats, the following definitions may be useful: + - GraphDef (.pb) - a protobuf that represents the TensorFlow training and or computation graph. This contains operators, tensors, and variables definitions. + + - CheckPoint (.ckpt) - Serialized variables from a TensorFlow graph. Note, this does not contain the graph structure, so alone it cannot typically be interpreted. + + - FrozenGraphDef - a subclass of GraphDef that contains no variables. A GraphDef can be converted to a frozen graphdef by taking a checkpoint and a graphdef and converting every variable into a constant with the value looked up in the checkpoint. + + - SavedModel - A collection of GraphDef and CheckPoint together with a signature that labels input and output arguments to a model. A GraphDef and Checkpoint can be extracted from a saved model. + + - TensorFlow lite model (.lite) - a serialized flatbuffer, containing TensorFlow lite operators and Tensors for the TensorFlow lite interpreter. This is most analogous to TensorFlow frozen GraphDefs. + +### Freeze Graph +To use this .pb GraphDef file within TensorFlow Lite, the application developer will need checkpoints containing trained weight parameters. The .pb contains only the structure of the graph. The process of merging the checkpoint values with the graph structure is known as “freezing” the graph. + +The developer should know where the checkpoints folder is present or checkpoints can also be downloaded for a pre-trained model (Example: Here is a link to the [MobileNets](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md) + +Graph freezing can be done using the command below (and modifying the arguments appropriately) + +``` +bazel build tensorflow/python/tools:freeze_graph + +bazel-bin/tensorflow/python/tools/freeze_graph\ + --input_graph=/tmp/mobilenet_v1_224.pb \ + --input_checkpoint=/tmp/checkpoints/mobilenet-10202.ckpt \ + --input_binary=true --output_graph=/tmp/frozen_mobilenet_v1_224.pb \ + --output_node_names=MobileNet/Predictions/Reshape_1 +``` + +The user has to first build the freeze_graph script using bazel and then run the script. The input_binary flag has to be enabled to ensure that the protobuf is read and written in binary format. The user has to input the .pb and the .ckpt files to freeze the graph The output_node_names may not be obvious outside of the code that built the model. The easiest way to find them is to visualize the graph, either with +graphviz, or [in tensorboard](https://codelabs.developers.google.com/codelabs/tensorflow-for-poets-2/#3). + +This frozen Graphdef is now ready to be converted to flatbuffer format (.lite) for use on Android or iOS. On Android users have the flexibility to use either the float or quantized versions of the frozen graphdef, if available, using the Tensorflow Optimizing Converter tool. + +Here is a sample command line to convert the frozen Graphdef to '.lite' format for The Tensorflow Optimizing Converter supports both float and quantized models, however, different configuration parameters are needed depending on whether a FLOAT or QUANTIZED mode is being used. + +``` +bazel build tensorflow/contrib/lite/toco:toco + +bazel run --config=opt tensorflow/contrib/lite/toco:toco -- \ + --input_file=(pwd)/mobilenet_v1_1.0_224/frozen_graph.pb \ + --input_format=TENSORFLOW_GRAPHDEF --output_format=TFLITE \ + --output_file=/tmp/mobilenet_v1_1.0_224.lite --inference_type=FLOAT \ + --input_type=FLOAT --input_arrays=input \ + --output_arrays=MobilenetV1/Predictions/Reshape_1 --input_shapes=1,224,224,3 +``` + +- The input_file argument should point to the frozen GraphDef file that holds the model architecture. +- The output_file argument should point to where the TensorFlow Lite model file should be generated. +- The input_type and inference_type arguments should be set to FLOAT, unless converted a [quantized](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/g3doc/) model. +- Setting the input_array, output_array and input_shape arguments are a bit trickier. The easiest way to find these values is to explore the graph in tensorboard . The user should reuse the arguments that were used for specifying the output nodes for inference in the `freeze_graph`step. + +Note, it is also possible to use the Tensorflow Optimizing Converter through protos either from Python or from the command line see the +documentation [here](https://github.com/tensorflow/tensorflow/tree/mastertensorflow/contrib/lite/python:toco_from_protos target) A developer can then integrate the conversion step into their model design workflow to ensure that a model will be easily convertible to a mobile inference graph. For example, + +``` +import tensorflow as tf + +img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3)) +val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.]) +out = tf.identity(val, name="out") +with tf.Session() as sess: + tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out]) + open("converteds_model.tflite", "wb").write(tflite_model) + +``` +For detailed instructions on how to use the Tensorflow Optimizing Converter, please see [here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md). + +You may refer to the [Ops compatibility guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tf_ops_compatibility.md) for troubleshooting help. If that doesn’t help, please file an [issue](https://github.com/tensorflow/tensorflow/issues). + +## Step 3. Use the TensorFlow Lite model for inference in a mobile app + +After completion of Step 2 the developer should have a .lite model. + +### For Android +Because Android apps need to be written in Java, and core TensorFlow is in C++, a JNI library is provided to interface between the two. Its interface is aimed only at inference, so it provides the ability to load a graph, set up inputs, and run the model to calculate particular outputs. The full documentation for the set of methods can be seen [here](https://github.com/TensorFlow/TensorFlow/blob/master/TensorFlow/contrib/lite/g3doc/). The demo app is also open sourced on [github](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/java/demo/app). + +The [demo app] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/java/demo/app) uses this interface, so it’s a good place to look for example usage. You can also download the prebuilt binary [here](http://download.tensorflow.org/deps/tflite/TfLiteCameraDemo.apk). + +Note that you’d need to follow instructions for installing TensorFlow on Android, setting up bazel and Android Studio outlined [here](https://www.tensorflow.org/mobile/android_build). + +### For iOS +Follow the documentation [here](https://github.com/TensorFlow/TensorFlow/blob/master/TensorFlow/contrib/lite/g3doc/ios.md) to get integrate a TFLite model into your app. diff --git a/tensorflow/contrib/lite/g3doc/TFLite-Architecture.jpg b/tensorflow/contrib/lite/g3doc/TFLite-Architecture.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc83946647c6a923a8a0bd3a041b42e4febe6a31 GIT binary patch literal 48710 zcmeFa1z40@w>Un8f=DPK&8VcLbO}R9OLvZdbV;`(AfiYJ2q-m3BPHD-9nvY?-5ta5 zfAxIlTs`O9@1E~F=efUo{?9i)FY~_h?pkZ@z4ofTycoHd23?btl#v9Xpn^asz(3H% z7)Tt1dHFKN@D z>U9*f>nInEATTh_ODNwypnrT&P|?sYUBa5;{6i+8cNe zLce|q{}#LGWddbGjNA5v96n+1F{#D!KM|?)ZqslYIrv_|xors5$lWu@)uC%u&hl)@ip$$`*inxy7&dRz3z@nAk%2PLeKKG@4W1`h zkdNGWrzCc;Te|y7moW}6zrO&9<7~|{AnOJo%ZAkmf4g2067UFFz&SQ=?d#+-G^GpB z`{zfyM70v2hp>m1;QhOkNU_j$9C74x@~Kvhb3zoK|1|y|wESnb{AUsUzu+x*3e0tU zB|9%bS8%pK3BDC%A+Q5{SOqK+Bs$F$`zj0nh@+?gr=z+e?rv^#89V^Ci)(iTzW|95 zjVYl>5dL_5&oei({CDyYK|1tj7oa&2vNy^SAi}@wQgeQF(9f0iDtVB2fhuS8DVwqZ zFI90y%i}`tis?w35snfF<3Y#OqS}wXtB}guml2+Up!{#6CB=9TFnlpFUl|Z?N)M(G zt|e36@je_`4xcwiT;zxwF2a=9zq9b{NM~G~tQ27a*Yww34I`+6^oT(S#~(iyeeoW(PMl zZ>1b}DAf$vY|sXxP=wnEsx6)LU^ssp)6JXvx1)faCW#g{+cTpetsXm313iA zbG!g$M4a~{GEm_SwI>+7XWhVNhP9FP^4RqPbaU|nUyK+r)Z0-; zo+t*~83x73bnf|g!p2Kf69Ee%R5y}s%vM+TR?KlVV8Zs|_AKQe zz0#5<;UElRKDwsfN-Awcw3u32#o5#JqBurm$0m_}46QMke#jYXS-rs=n#u=MN+>CfFQnxf&R$F2v%rx+sUAE5Q zzU7!Q@nj{KEI^nn)=qv^cO_=V3u5Gl2Vg$iE z{LS8&++%Kz`eY@qhY2>c4Npv36y9|@@*045^w;Ugvm&FIm#jLQVT_e2<~q+Kl2+Fq z6L2)V!Thp}cieNCE;-lI;P&99oY;!{8zp?NH(C#jPx~yXD2JTn>`G29K#X(tf#Q*A z$IKZ!%!2vtCBkKS^5y0?CPtT=Z=o&iv6t?2g6v0yQ{886u(lK|d?#<}7bNhqdd}5? zbL=bP<4VZhloYnogma+r(JTcXWRW_R(-=M+O07Go3NCM{Vd}i6{*_i+XLBc5p!Iv4 z?%NLjjp8>%QDf)Znk+jfTXflB`B1b=#5wJ|L}@i(1CRtveC_~luS%Bya2?Z+ zth@l}fcDR=BYU<2!!dnYsh6#MxAhzQ+yc4XQ%rWpo*P+J4@2hBU>6{REkv;UkuPNa zQ7E#!7^Jn+)Y!XrrlKja8|`l-WSQbZb$i%m_8q>aKOv1t9K%iAFZa$ZVU%C_!Uf1W z`gGWFX6uAF8@3kMb6KN0S1xgRC~9YJqd*el-g+G=B;MhWjJ0@{VRU;u{@L`sKJeQ! ztirkJ3(%NS#L)w4yG0AGS9k=9X^p4e({k2cA84B)*h-hoxmk4UNAtdH(zr@cds&Y~ z24v*P`#vi3<$t|EDUP;o)wPMBpJiIPU#cR-{Ib$>sJZgPjF%Na-5W2F7a(A=r4lNOT>6VH5@D7z>k850{L*mEcO^BS z;@CVIw_O9wd{;f=#Rk#_v0gDA&}Zf@27>(7U269>@XpVO8jdR)qB$(B9Mea+Uf%Ya zvL1oPfR%J;~ zsbdJsd}hL2sWUaGnyVJ~8pxVo!Vcr_$}n3xgi9Pf-M?MU7zo;9dV5bbkI;Aa$~%-U zN>m2n^i&rC)@NKc__|&v5$&ld)bIJ0nH+$EP+aqv_`DY^bP3Jovl_JTqJ2K)&NW_> zb4;8R$Kq_=nNjzMXtGcip&<6-iWh6q4%f|+F$}6#dEi0QbJ3V8zYW1jLv^nAfX^zc8nDUVXv$*RMzC4wrr+2y3 zY6iLFUO1OcIi=*1y2O_OD&+hof-QS_c*?xLs!d%%MN9EB#(VI*W@@hp)#D?oy93E* z>>)Gi&#z!o&yDZsNBJ1K8m6RH@6TFb*j8$um2c@3DPhE0PT~)6tzCeMS7iI{lY3Z0 z?wR#ZCYA{^4N>2SXn5Sa^jhgAM77Do1W+nxV0iKPgQKQ6jy${@%ZW}z-CxPnw;Jkg zaaEoo@y3-w#lFf@2DI?)_~vrj2JwuP4jT$BMGm1VU-s_4kKDD`mKtQq<5~?gAET$p zo7z<4<|wWD84~dX*^f)Li{0q(TR^In4$mewRxKv+dyC0g-08{-V;Fqzj(0S95D}u( zxwS|SuoZU;40UN6?+)0iw{~+6FF$UxOVVy)Bn-Qa^5)=j5kn$2^!oCZTpb3DEA~_m zY(2bj>|G9P7{GdvBKQ6dS&GtPH;n>v?nm zg4c&`FthCi4A_I)JuVR~o$L^wUsgOpFm%7DUFLvw&qy6c5uY@y@`Wx4p$6^&d@17s z^wSH38f>2wvb3FiCUq4tb3h@ro8Tkij{Ic^oIiJUp6ri&d>TT1q9~G7bpaB-eE~{r zy8uD_Nq_DP*;{`B;`5fejz~{bHh}#ywxEANK?t)jgvALbzd)+?Fyujg9-HJZeL;#I z*LH!G8)0{r2{qwMjnJP)_Fr9rNBN*jcz+QV5WftoN$MZ?{&4^Vvp4Y&3V0_7CbLf- zk(=*rT-fM2xg0<#As!n0_FP%eFO?(BIaqIMxQ#QC0+}3dCIvEp{)#ID*k_dK&(crS zA}KCFt27rNCqO0vrEhN`MLUrI`If~hEoYy6-JbbtCt>H9nl``PVjkCU17gCf`xhV> z!m1*dxVd(h-5kDk0SX3L{?X<32g{+fyIp{gBClb?b-)IxlcjqBYAJ$l;w@-+0kX$U zX~^0%Y?(zp2T(Of*^V%F^U&@cKlJ36+@%4~wuJZi=OxD^rSP$+@6n(>tVTte z7^2wE?6)53200cDGwEpu^Qq=^Trzp&^3d!~UsdHtfK1QUzu95|DeHR8%BkuTFra+a z0y4heX@Be{Je7Cm0(1&s{&X3V^CF-1!zD9xq?)u zm4ckUgiJu%pWZD=-L~^XMQK@ zZ~%3!_;{zs;M1$>RRWBK`Zio$x`?z)8Y7rC|7U2peTvC)^Xe%pDckyt}S5DfX=`slHjZoC>2(M6c=)g44Zod1Z7`?(~p7;v- zYmU|>EFly(PvPui#0$}(7r#yaZjF)2&pc&*74 z@zez!YRjHXM&^AEEJ}>L8IqK(r=_pco-dDo8+UTfVzYC+Suw3oKRSI~mq#hsAI-X~~J$+E(B!eqnxJ^cbYEQ#jf4QCA>DI8P*4$W7q0Q zLrCVIlad~!u||9B$>G4Ha4ttBfwqg+cI&>}b5e;fOtvlh ztI^TD*{+;+>ilTt6i9WIQs)g^79NuHQdXt-oU$dVNXv!>mpuzB$H#qFNaaAdAj&M6 zhxtT|?f8SK16_PoLISe(8uD>XM7I48s%p^3>bjC^MU^{D952=RO6xxoBBp#2*}Oa; z7aKXUrk+`9wJIv%q$BKetK6=149sLIsR*ktXpXgacE9KHk&}_1ssXMuswI#g7@@nr zYC@rZN51*YnW0xFDn>7V_3&``BS%)mr)9STBC28=qO?rrySW1-bqGe<=W!NiQfVcO zd@p2^(kY$F@6_S}s}u?_VbODo&LhRX*zF6@IUv!TJAsF)<6dnbLOFRy>CGev zHG2|n{=psQ2W{*3>io?F!-adW-`g0oguiDQCG*}1tgxpr#%;&OaYN&gcFMSyR_US0 z`|pHn&$t~sxrlSHcFOoz z6wm0=&Z$~|yW*jTP*L02OmXT%#knr4VBb|;vnlKxa*r&dc-n){eazu{tzETFQT$-p z{X2`YTys>K?;Gx+Ia8Dund!08U4Whyzu(hqS~%|) zd^IL-gJBYpV+DgMdy8q5a*TE(Q3kGtQj65Y3?9yEj7Xgg#OTkZKi_0cK{dl}3_!BR zTZI#IWYb;ed=aWP@dfhP9?@n;ZuRGnLA$YaB%0q7 z<2aIqwvd3k%H@fX;x}ry0zcTAn%`&Pd>e3|*(J38Z8(=LDMc1;5H4n}$8oon4iUeT zK5ih1qM3qHP4*T&J9JWirO>Iq;sSJ#xjn($HzgKuuL3H>puHJ}VN6`@}BC`2u9Q_C#|SiPCW%vtetumN|Icp6xkHv^+Bny503S zFl;CUuP?Nwu&o=5?@g6;GAdWn zaEIAMaRywoB7vmfft^9M9(&>RO!}Q?Gf5 zQWfjt6L34}`K}v98{uY%$LonF#rB_PV`N!!T&I-pRoGY#E7q;2#yU60gxWDblG!bK zM$mJR>)Ht0C6C=ks*7&rK8iH)e24w2x0E6@Rf49QX)8+7MM#b!W}#`0$lQVENZ@Xn zl%%D6&ht|7{WJuyIF*5QtB-Mz%6!+I^3FXJWEvp&R^YAamy;F3TNgS3`S15@lE-8E5bm*EEWc#TuZj68TJX2m$gv=(_=*T=|2Bieh zAA#Q(0G5I}hV&(dyw0r&)XQI|=q_)C)uRCRPSLCIR^#sdjVQv@TyV8(q2p!>R%(12Ti(2U+I103G!W z%g^GSpz3*J?B!yWv1`GNS=9+Wj*P4l(=ipgn z3mi;}k+H@pEd!==<6q@8B>GO#4gcB2B|?b&UdPo_{NBx#@f%h9dmYzR{W~95B982z zu@3*?x*vvz70ql-wa84MRo8Yga_$H+6(keWw+GhT0yF{ImL3?p9+8tr^oyCAk5Lmh z!{p||r6`8^4Q>deFv{}@!Zjn6G9@E|C2YhhL$P74M35=FB|RZ0GQ}6S`m=9LOozQN za=>ii`lwE=kUGB}5ThJ#Nl`R7(N!Oqo<7Bm5O)1CNds{zKbw**H&}m^k{cQkM?u@c zQXk0hf#%QZUjMBA-%BqeRQDV^(-*)K*fa^ew{fWwF53mn^*q2(0^h#?)hHe-UG5|b z23+n2F9Gw`$_2;^zMYK3uO>jk`jID)84+?W4PFGT2R-5flzzSos}DVv-B$#>Mv71#?FMx-uQSdBuJ6FvY7xNQk7QhX^Z%5-sv3ug=<+75RV6#<8OuSR)JYHhI z#iNx?&+;c09hS}s@uaWxSzpe~(T&9p);mjw!W^|vxw`^vC7L$YO_h&~Ki%@~bb@{l zMI7DLje)aQpE>0w*y~GDe(iVAe-AmDX<+k@qXt<0 z_b5-qiD4rD4((Fkqx>n4`9JF$^kc_FPg+z82a+wAm$cOt$SH$CI)8WQ^I^a~GoZ;cZC z)0eYfecuM=@Z(p$9JjZWbdEi&UZlCcyT#70fh#1mN=f0o;d&I6H9cAr<4!U4we!=| zZ#7pVdnetV=(^2>-wB_geVCX>kr2th-4Hr)UIH$%nOHCiPY$Wngt?uU;J2+DQRMwt zRok3kJSAN2wr=CIOtS+@3tKM;cU2DxqYU9^qDKaP+``@D9i6U6p`(WUavc;aD(~%&Q8zZ`O3$;!oD)FV&WBe)c=T1!FpD>{96^8yz4y7u|8V9hj zXFGgP9=%83TCU{;;yDVKrMtge3w@vR6Uw#LDtw}B@KhbGUsIHB)}7TdWOvx%bg)!8 zDmqfuzB?j1jd|JAWU;LQf#Ad^Z!EqB3HX7T;*+*~7G?7~3(5fN%J0`8FL+Q>|74 zbyZ812*c-|lM?l_M%cOLbRWX$7nIOtn%yV{XEJB48e%v-$LuvTC4;i3OLqAn>#=wz z2eOa?Ysk_dQ<^Z?Gg#K#W+Aj)L2k+a@N~d+lrSo))*{et^A-gsz2!^Q=9C3jlSRJ4 z`jW`VsW5=Oe;d5YPcW#DGDWX(*T$bOX?{#aWNk9U(=}k66A7r-1|USAU?|>&Q7hP0 zJb5z}0NCEyEf4iP9y= z_t9LM1V0%6f8(WM`YxPHfAD+Tzd4%9Uln@vk9KfXTP{Gm=zho=45aU%&Jqf^ z##lIa0Xjhyy#PVc0O8#-$L<0|)e9?wM7ohKHK2WI=MAGJREF|-`cK>%64Lk*@0;;N zh7YyKS+G^Lac4a1CACUQB!4y;Q*HvIQhIB&ZrtGAc4z*v72~S#4U>1aHk;Osd<)|y z0c+Z6hj*O36}@&yDaSw*w#$4ND|$IZU+xX5oC#fk2*UY|LPrJMHXl-vmxw!LtT9IT zMd|VCX(Wczljm+SFP4rq#*>w)2ha0KmKzXLHVW ztuG%EbH?e%ca^MZJ$3W#k+@xCjwKipcQ)8Em_w{8cXNl_Ecg|VB!?nkp)a#!oQqYH zU#YA`pi*her{wjYnwGxU99mPiWAT$B^H;%@Fkj9?k3m1ob0kz)7n!WnWS4O4Z{{0{ z)MnZv@5YQLk`W4C=4y31>+tl;7;C*D`Z^mTT>PGLqT;wj2D>eP7K?X1WClpxcU>5o;F#Zj{?{k(63fxk zNg?>rpG8cs`fO>?R`nNGlBjG>k98A!IcLld6m<&gsu#SF?&a-f(8FgBJo?ZkGtueE zYXiwmf2qIUnxePH7q;~VOB@dB8)1PU40K$-$sCnW z74{Y^*5aA3f<|q^go9JHCt4^uGH zc4wB|bEZt3cI?-h3rOftQpxstp~dX#>caa^w@x2EoGnTl%#q_-%epJZ_fqK*xV>Q0 zSG&`w*?c#vk2+KOH3bUXlMkNNd>n`_O36z@KFwzp9V+X6Kk*{FzT(87ZJ&>Xa{6 zNtuS-CRu9Fz`%kuS2I7A#6(wdw>WP?WW^FZOMfY&w(O+T*yVpBkYEq!L@d#WJvj0N zJST;qDS_^F!OrGk!5)w(_&BsZlm${&8wr#$o~eXXtswjJ&Z)Z~GqgbTWdaZ$JVlK# z2b8R+4x~FEOL~Rw_khTQ8mZ&n z@4Gb!{i$?^;wIaI)S)nW;aS6(LhXJi-w#a#DzYxy$r9vz7k~-;Q{kV+xbDSkb`)?J z4_l%7WAK1Sj%~ZQ@HPZ4Kz;Y-zH2x1SjM_`DGsp4wE{s~n+MvRQSmH|(y_xN?K?x| zuw$7#*wP#DQIq3P-E^j)3}-2hD*200E*4|X-!$^(AC2?_K*Ss&-2t)1OH(Fkycii* z_&IEgS*pFtUN-9LB@K;0S8Ry~wp0{*#Ua$cY3Ap#?8ClMf=UNw4`Ws#?cA1cRF?%QKLwp?YLH{d;lRuW zmhu8K4aUze0igidi$BK@{ORwXqrd*IM{NEx4MAto>;-K8iv2$Lv*soJtrw7=?+xgT z5%;|%a)vcU{KnR%;AfIiFQ-s3MrtFUe~sy$KI5WaO}S)lKls)Sei5U{q#m z0&y;Ez)CiVaJ9hMs1z5l6+iJ?WeON{dRf#~V_n7kLE?#5ZCO6OYn(pS&r#h&kLcden!JWhVcm1}?my;@E$uYUk>XH& zX1i(p4*iRzUR##L(h}{2hl-!25ljGkpRhwdT{(#{1CE9Ark=wN4K##AQ?&F&OC1K}corgS2X8$_|I6>SqG`QZ)8bI*E<=ZMqW4Hs6KCDQoMRPi zREqoV#O1Mj?l_N$z)EGxP$g%?b)VoH*|CZ}GZ&y4mLVXR4;TIp*gXFMz;aTdw|l~_ zC{BEpu>~XFT#Pl6ieYFAr@a%si%`0{P`r6|bnzL1$j7orCuVf-fNlL3NB|4zPtb7> z!0OqEM|JQ@^WsY#(kAVT^Sk!oCv7$KK!tC1H@vO^~?87l5 z{mio^{hRl%5RE0rKg$p-Bg(;+WYkb#WSkm$rkNU`X{qpFrsWlP{bR02^s5%SO&+Up zJVA-46oG37p8bxN(PWQ!R{Ikt6U}mX)%)fLxsO6PuB+VD4z;8W%Ik1rsvI+Y8A27! zowlAcxkL|0Gq$A{AQ2}-C-|2^i9-US*ZbtnT%u8#w?Gc;(vwtn0=>qu4tm&|EfaIx z5zWSSw)ZVRjwmjf0gm6_X+f-p0#>Ylf6qpm@UC~Q=k1TJa+ohu z<($y`?rc%%)w%%Lsw35pV*uM!-)3v=@63d#Ku9SQj1eJzMi=cON*~1iHrdRZj$56S zCZfq{lqzksCX#5bnc9?w2!q-$aKV}FR{a`?rkCCI!c zrIZJ3)S>31?#g__nE+3pVxfR>rmp;ZsNe1E2rX;pA?0yhC8 zzi0m}mH4G#M|>b$;m)XOF}eWp7Oi5DSgNwj{HZ3rgat*ry=yu-O|}^`I$6;|&Zcv; z#{mAZdtf`h zidW+{gvxqmN`S_SqNxHnK5V+r9l49AYfo+@0v3+ic%xP5E>1aMC^IdoBEA6K9sqCg z?&YNc8iqfcYF%e7Ff$SX8EgFB>;J5q|6$z_i~ymnVpIK{&?Oxtle)a*%|w(1T)3*% zVMzX@*yiJ`+u`1qt&#_E!#?PxbY{FbOdG${>EUM1_*VN9w`Wc_N8^#FT)>7kr0mZV3ux>C@Z+^0)>xsE!Ox|O?a{O^dwo- zk9lr?$?|w!Zo%xbf)*rmyJ*}AZK$$*d!t^(az}2-F}S(FF-Rla+B7G%Bsn*QJ&cT5 zG(NvSLLJ<2xF^b~WpL6b_PA97YU9!_WL@_99{&AnWnd>?SaXu^l0vU>c@%lRJ(TwVK%#5u4lGvki*(os}YwC*H%H4YnyH7dyb68wHZKd^Z zBvD5~c=+mCxS<;YPqLE6KH0ml%=?j|dtj>_2~51<9a52?e@8}5{hA*%2k(n{PR2FP zrR-JhY~+~77^t(ybb34a*z_`fi=CUnEPHr%8R1!P>8GLTplfyD&x0$)(kS7N2BSL+ zBZH067Yp+2HWs(X?_Lp&@W%b1P~3~*eK9{8Na~Fu zL|G@gl&n$MU6=2k|F}4XnO&Yv^Y+604IRHW`scu3zB9$g15Tm)ANUn1j)IDF?o& zT+wsuhJ&8=1gTeEO?Fu}*&16hO-Q?U#fQ&bcVC3%&09s}(ckrFkoSbZ4_rLlNC)Gf zMX=#PquXOj3VwZMC)(?S+EWq??P zQAUha@jc-X?~TKqZbm{G;Z2Xh$El$pcw!FZnjUv^9!I!K(=Nk)lg04^-NMe@d^jop zN=e0xzrUVTzFz_Kefti6dca|UrkRkPmC$%SAGHN}k4u)Lxkqr7Jag27vZPEa-?>$4 zZ>)lLo=&fc2DCVX|o0+Z$IhTi(##&W@-t49=(=Y1o<@ckECvPFq1+FP(f_ z<1A%RpNyxhV1TQw>%KeFI_%si)uiv$0$gpwb}eyIm_x5;r8YgR^P#fLhMa-1h7fHo zU}gB4+(6~4B>^=z6^LOTgP$&1S^Xk&HVxFylLMAVy-pi9Hl#!bWYTU4Djx=f!5F0b z#KQ1}14Mzq?=ttlOriaoDX#%Q){W=;d3gs!Mp@s4QO&QyD3m+lD{89Wc;N$FDcE)NP*x1g1+eu5{MR%Dw0Gjnjjwda9I^mJU~~6`K~l-NG*FWK${V zKd_j%@B0NfA=C!eX$xO;HG-w|Kr*^3Vp$3_~thFzYsfszLPcCmP z7TT~xC4>dA*|vYitm7jxI@0-gW=-Z_9+nP`d>w>Sg)Ht=)gQd9?;EOOuftql=|Sr) z;6giXo~6I>q`Irb#!lYF0@?3b7J4Ath&@22N>0ghq#S#*qtP&wMb9awI0)*ofLDN+ zYEObUDYw78VfIeRkP!EhGmzHf!^t@1`o`*MTw$tMoV(h-XN;PLiX~ZO3&PWapv2>b zDw0S*+bwBsbEFCLz!W^Ex7}{9GNH8`WfBnba=cnX1B!DT4z2aJBhH1K)~7-*TdKfv zhekYa!*_D#!iGHCNUBER_uKC9qKrRrnwb`vT65NYeu{^zzUPLf09j+*99wROq*@lx z4?Vg!;q($EhIZ{XUKW{5>JSS&Yc}XmWyZb@2l&9{MhB4Fp-E4A zF35;Wohp!BM~bc?XUx>0)2ce#eHCh{3EtUA9_RUKJ&m{}ETyLKOdq*`XRWG}Dh#7$ zk`J+ILmK5aDAb-@AEIB}(o_RaydXdl*@974rk|IWm&aGPq9I^9s~Nc&z3F)VG>+{l z3zh8C+a<5;f=k%TDd#(20)5YiCdZ9PsDof16V(PZp8dYTTuoJ9)V8&tHA8PkR7jq# zPt#c0!R3CDvIP6@m;V=2F8rSlzX83ztSBq-u$@%;W{|JofM)2zmj@T12{D-Ld=cy# z_cI%uI*Z%Qs+4M{nnHT_E#xKj71~Mohtn2t1B_`hv2AEVh={`*x0%8R5t4hGzMtQ` zdjDi&e?NOePP1$v<)mvQU?Ru~Jg0rIIBTE#WSvJS8&{poCEf;V+jtlqp*^0xT|zwUjPFx?1aeV5}QjD+U z9@tz4jMN|;-rVOM@^{vHJsI|MAlDnb*sx2{ zoP?;Ej(})JvW7f7as!8Gx;C(S!O}$l?usVjV(;-+! zeOt~AwkqKsGwHbQ43jOHI_U*zOI9P%GTZy5{-))Z`2>6>YU?X8Zw8JM1rlPu>cI*R zJjEh$Q#ROKYBkC)X6HC6SqC&HCkn%VV~azh38o894il$#M%SdzaKhjqhLYfPh%R3(N}zw0U>bY( zmsa5}se>F@MxkKdL*);RL%K5ObGL&e$P$^wf>2xN;1WbhJxwJOBY?dQ^B-|ZzlnDI zXTRT}bbYTEmM}ft*DFGA%?-UsGibPN$dS3mtSstR4`>B+GlhvEJc^ zkHXybt!)T4b?R!R>oNz4E)$7`im=N`s08X#^YY8N08{VJIpF@c1gjd`W1kGmBN%?3 z(=oT;i)fD&OIj0k5=wt1WZe$iIR>Dw)xBguTg74mB$0JIe82(b0YzYjhkkeeR5&rI z2Ks&Z@9+Op_x~5TQmBoE_ORKMuJN+2yWWwR3IcnarZr2(Dwc$G>Cb8yuSyf582!sp z!_@kF~9phLWuuw@}O zB)W{$A;!8E|1e6k* ziMp94J%NUuyFkqaYG<_alzUZr#|FLdF6nJ9W)TDZ zN+65(Ckp`+ACO?>^e_Sl>F3^~_kqnV9xc}nJr&gXZ^xY8hMjNxIk=yi+950qz)l>s zgMU3wFG6cJ7D!XH7(6J>aaXk`>{D9L|7fz%y8!vr&VK}g^Yp!xHD-jJ@S18N#X;Mv@WyqugoRXnfKn)zIbL#A13PPYQ$SJZh`xBv5LvorJ7 zHJh1ge>3glR|gR8hB3^LSHD9gmKAYyzbZ|7!zt) zkgOBUJeI}7KZdJESv8mLqCWITr|=uz&3@~3DSgsF`Y@(lI3CMX_o;e~?X~Gd*%}q2wUuGni zF41+_TL7!tefFGgO08!paK$u)A%;H?^?^Ic^ewR$TcU*yUA{AALWZqvIAx(&orwp< zj)Ce2SgXz+vJ%MmXf?o`i{MOM8?Go=%a66xlEJq`kWi=h^slmn1p_9j zBPpvdPRj=^0#EhVu8YO-f24o>S$m(7OEdO_&{=^5rIdM=L zkH?T=JdjQJBGbw_FH+P6e((vhe;s<)?RS3{xsmZNCfQ5P2ACdj7y((G0$b~5+0M8$&{*%9xTO(j(b3C@J_)0qX6%5szmJ)!CytpO?kal0d&OWp2!E=3yGf~RMr^Wes6(v~@+j68#Z8AcnmrLP6iY z^H{&*>%2VppkwzmXW?U2*+B7)BA!AHX))h2%C!&G`grm?UZb6pxnbi{RQ=XcLIR-% zj6yuC!}{AL#rO8et#n@ne12y3u=NvHRGmHY=C*V3^H)@PsnFOCo}26sIXI^R=b3t( z&o!m{?~zjZq))M|2T86FB$Bp4&Mo&=MF9Qm?r-c%O9-3`5UnrX>GB1ry#nip$EU&( zUzPIzE#(?zPxA-aLcQzwJIO;gZIlK19;c99nfl>t0ktmK>kgi`QpO4xmGw&zH-Y%T zy)*jrZ3ZALk!%VH16EXB;B~;{u0Gs*s z>WweroO6PjXrY$HHnT!v!}nvQgOkr_zKmmejh~V&e@~bH0^ttKYUUs9{1ZYd{!#4z zg@pY5rX&pA{3JppqX=bm6z}58VH*Q+n=N=cUavh)3GN9=vszGpz6VUlTjby)#XX{Z zWGo;)Rw|xO!%s2u-u`!Qa_9b+Bvz)3ZJX5M@o3-K$va~HW7TNJc{$GTkyE>Kv<=u6 zpmF-U^JR~cc;vJJYGkOjj7QrCahTPT z6d5pJGQL~EGziJxpE|`D1&;av#vQC*J$)M;UPedxf|KS4nDI*OK3?$!s8$Y1_~V0} zDpG^(j}?Rbv0`B5Xvg-|PXGN;n|1+S5ig>eo}gXr^6K!fQK9!8$C9V`F%P_1~P z?ov`;EXq^@7i^CVo(oa}q_^g+ii6=#g2mR?@; zSyl8|B3bw9pq;G-v^c<_n8;S2XO-lr$_uxQzez14Q`TuBEt{{4V~Cj{Csv5@j&(yD zoZW}1i!t6mNzUuxTMFzY3Qe_-L`Ku_(UCTzn_KfLNhK1wF-MXk%)Scmx(P?PTE+FU z-x-4^_?s-w-)XKF0oJRlYsw`Vtx3Lj?Iqklulz-$Vf=S*4y8i= z)to1&QU9IpUlTh7o5uhWp&9hSmg#C9ZL=Y~CeMjadoT*zh|g_~&H$!ZsA;BW<^vm= zf>prJxLu4R0*}WBp~SSyo+O%YtZ>Q4v>v{BBy5c;6!``=Q>dHdT>F*&1i4r0?_3(rt6WB|O zDZ8VqmOSHD1PYx~#yo+Q-iEYQAv5f_>u530yU-k+!VbF>`^p!dXOh*l<;bgRmF=a! zzl^!c@b!PyVLF~GFN)$rBIw@txdscUl5IY2F&8e&J&GJHEnMsyF7H0bbBt-eh3<^5 z>=!gWEc(*yW9&vR<7?c}JU5I(8g!39b&g4`@=Wu{LkfFM3*DR`OCXFVIs2daDZYQV zyWxoxkA@%g3{y_qoiJa64ND=_N7knPzxKX6tf_3>JL(9EBH{=lppmB1mEL1R1f=&a zAW|YAy$P`aQbQ3?P>@~|q<5n9D$*hJ-g^&8xEr0pc+N0q=HC0wJ?Hu64<8phYwx}G zTJQR;x4+A@#ejyvBY)u#toGh>d?92x9aRqCyqM=xr4i#@^~b|&e^G+Ct<=haW%YT~ z450Ivq%YC~1hN|EP8?>qSO?&<{b%q+>3~iuXK?}RiGU5HU4?v#isgx4ifx_73PH6s zotp0e!LJ3%%(Vit|XX*bfpAkdK%vfCBZ5! z;gXri>?gfCjD`i{;l^(Y>n8Ar@2av`$334WHS(@1D<0y|Vb~0ldXb!vUeMfVWlGUt z*2Gj=e4&3%tJX#z@~aN3~OHa;uIg zQEn*ylm~Yd=yz^i?G87whzr0r;wii(FENg`q-Nqefa)Do<4WsqTY7Xvr5)9mH7t16 z0Jrl7=XZa|>ZvW~4;z_o#Z}=AfZX5&I5IT`UVx5U18STXP3M;KurD6M`XCKztr$z} zvpLAjGuR}3>Aiif*DTd5|H-OQiocBwovqGI*e44>HWjoS?x)U%$iq7zc*H==ZV8ih zxq~|(MClI54zg}GyY-VneiPw^nGtT|huY!GqHixP27XxSo~B$%TYeoUmF{F^PR(d< zIIf#{;JLD(^F za-J_MC?Yf|{syI4PbkNHPyHo2$thLaA|Ds=APudJ*JVgnA@6X<?u@vpp(vAn0a)GFZGB>8iUxS^Ym0L8LtWR8uZA(fUn5|F890TxVfO!X0(TsZo=;E(K zQMHiG@J*O8*@Wn4a`=nzZ?E2!p(~na zhp;=K*EZ6Bq?1@x2t@?pNFjuy%ko^o>o;=z>pl(J)XJ)sgdbyqZvde7ea^sW6jtW^ zXK>S&Cr{BI=*0amzFy#e>AU{@Mx#=(Ie`~LKzc35S>wLVUdh+lvv%Xm3+f+kleA;6 zQu%P7jp}Sr4xM2s0XVeVO0amKXTxbm zUSn#`nl|G;noD3tv;HtY6ScV%?xjk5L{N^mvU23zi1k?uR9`B8p^N4$T4UpQq>w5r znL=eFJGn&~N5}@Vf<%r5dg_C#*R2$h&Yzz5XO|6K@-ml*=7qAD^64vRQk!RrZdYi#QmCI$WRaEB=^4MZ#KO^W^)e!f?Sfto>t*vO+sIy3cKw%S zpP$L<$S}97(BxhF!-of}*HRPKRTGv!(4b~cz$KK3#Ny^2f7aGpd^1wY2au#69H=5{4r(oVRBc-wp)6Wx7`LeK8j@oEFw;ccmBv+?9=DK7#g4?PRL9&Qurev_gw%ELpU}y zqZ2hM=mZ$!Nv-;cc0fSxN}MfhOb_OXKkZ6(FtMsBZZgKZ>&gF>PDzuk)-tuH{^Fc> zxA65#%M?|csP7&gdVuw?Miz`Pe3_RSvQ4d^mJ~P%UJO)+wN~RcEq62g(vV>s0o4;J zp5li0X1^4qeiQJQ6APYh!5i#=u#9}*)ukPf8_n0Oy&mQGNXSAt%w0al|CgR*snT1h z^kU%rK1%9|6M+qQ>}q6(=zvtIiSX^kfg$C~u|e=zj&5(`nCR&!@ zKux4U-T)I@J@fiJAVOjfiZ)GPNe1?rAA+012%dwy=d!yDy?EHi)P0vr6*WW)aNv<2 z@Wx-4+M(BnX?(%cgjUglgDQYfzDG&m@86KB_x%o7nIIqriRxpUHWmISZ+5jgGyu@= zlk*cR_``bNiA~{$2>|Q^N5(p_Yo05RA%H#IYfJw!-JlJ~Tf`4{j6K$Z9mcLV5@G2B z9HRB)G-vNG;u;i0JnlS7Q6G#PaDUVf$Y$`DK((gNB@ZK!=aFC+!zF`EiA##c5x`!geVEtQxig z({q9mARk~k{f$_i&}qcnEQSDZw&KS)cc2Fsy=)ehk9K}!xB)G zS%HBsB}aXDp&gg43tK$Qa)M?4){k4Bbq~7jI_bX1JemHQx4;o7FE&`B)X@A_S#Sept@EsoM|a z_JbNlLh1rg{;R4#)nfI!l-i^GA5vc z3=BjCoO{}EWGX;30e;on;{|$q7t@R02PM+ji@Bv_ckkM`Gz0!mTm(@7d^hT0DTml0Hi<73S1Tz%NLEkgNe;Z4Kxi?>M9w&d?mqw7Na6Gz1p<( zGoaHNR9WuQyqbD3zB_AjNhMy2xw=qLDu{|H|CKn&8JjO8GsaMMu`*!C9!FdN1raB< zq@}5L=w9&B&lO*36$F>Lrlts57s~3Oeh$}oo>v5%mrpCCe=cX0UmqKe3FP8`%Y)~|n5_&66(LK$n zpU|z9@xiZOrOMu4AigNHUe>$fo!Hsq9}hkv62K@rIco|cAPQ+N_Sxr)%&ctHtixqq zX*SdUbhVb;{Kfr4b1G>=t!w7jpLM5Mp-0UJi3JzDVcnU9vQo1!^wpd;oxtjLFE*q> zp0pR8kr#gEyhqWo2pT~I6IYL>1molCF_gCRB6Fd8g|(yG0PfsXzSErdo2+EU>X=ug z6JEef3V7?!C;lw%P@kZw5hss8WW|#8iy%C(0^_L~Q@JN~^$e)S+*LHLPn@$pOvW0X zSL~4`kd5oGQdXH$oIh3{u)aS0lY(Dti9))Nl|ry&?<(D;%L(nT)f~=XUI7XswWB({ zdecg{X5AAu*>__Fk0U=3K4Beg34 zPA9-JFpx&}y}h8v`g#Xcj0#*WfNd9Y=$MA_J(A$5J1Ds!YZeJDDE1Z%yX`N2Jk5z1 zReyv1In;Uz)xGGxym6*lsd}@cwLZ4dq^?iQ{)&e_6WE#G8COk1`mPcveR-8T#&uX& znWA?JPEnE&ecXPMvhNLS0MqWuDwpd*jN0ZQkm=Kgt6DBQchqpti;emGP)qBi77%~_EpiFKDg87&>VNzOTnJl4`E zud3ESYDtQd77qkK4!m7c9rj_AL1xmXN1e#(g5I*MuO>DgewWi2-=g(tLilALc_xty z+{!y?3|S|u>GmwtAlOQwb4_5z#}r|-JmDKVBEU;wU+8;f`aGS-XLU6*C2vle)qxjAk7yth{SoyvmZ6&LFR~be#{JU{Pv&I#4h#l8{IGTMZiwLYHrA3Mq#hhygL^surB~U5Gk?c zy}UFq-@3|E0V9!+1BCXP$jb`qnhUYV;&Fir)vU5!z**TS#- z{E&aiz)2SHq@bgLZ!}t$ewTE0Q~@uWf4Zud{sm9#O2(461Wx{X;Jk7FOF}rw!hf*6 zrnkUGFA;CI*AO~Xe3N>~Hd1LN7UL%ylYlf6NwZMJ9u24~y^hE=D#@rQhr8PbnUm=O ziHgd7!%fM_vb+>=e$tIb$T{DChTqCWr-0L*3wB@eLi+dCwubj9mwi3E+gFX zR5uF3+L<*w@NbIy8JS`0sV>U!M-?fu4s8wq(tVJmrM6YziE@SW`iS%UrL`lOp2wuk zWh5sRO}?PQFT+xE}lXY`cf+23~f&|O~^oA=B}lVXYF$};Z_c^(szW_xV-R_JN1 z9;$gpxx&%5D~|5bL35QUusF~a#1mA!L9@&Vw4^JP#Jlu~lOUja4VWgGAKn2eM$LN< zu@?E@q+o@t6`$=rn^py`_S|xHa;RKbZIt_j;VsXc%0mZey+3gXR0Fy+Ij0+|F2=qX z88vbaye+m_Y(y)0e2G>>R)`!=F)^0^Qh!N4Rmlhz(yF#);S?-WRWnw~ya~POIn=ni zWV#-j+07!F9Z-=}I-0hJ)xh`%ecME(%$iS42(BvqCDV&y6iq`BAfTO`Bhh zZU#I~?2ca#rAKg_P9F5Ng5T5~r%X84x_&A=spT!Q)zkzL7t)`yAXofZvVT3}81_ad zmfd`~gi}5IoU}p<<;9%BE!uSma5hSFuof}#ljZfoF{x)yEa7i~-*hrBT`q85)*Z;gW*Rn)qqmg~GW7ods~w zwXt{2?nCTqRl1j#h!Q5WJqGT!$UEbQJGo7N2 zzl>(;F;_oraAgpX(XpC2w4WmLnu?F{gn73GqMG*YdZiFmL&-4XMBV!WMeU+gmjR=q zf#!8*by{O{68cuPu=S_xl*o+W`&n;$m>WwM??MzIaD=oz(y^yXl^XRj+uUN{?uX3q zJ97_ZQfeoD0%2#Jw4Vt%%nEfXzy@8qi_Hy(p`?$x8&4m#v^mp#)2msF@t|CNQKGdd zln>^?zrH-phavMu@|0Xs42&ztq^!Sq>`@O@fEt6pupvUKO8*koij+v*Rn4j?jqSSL zbXA0Lx{gYD=$-PCP0wL=L%GGf;cQX&Iflnl7n+oO`Un_JD?M6h7n~b{X!QP7@2V4ZNq+J z4&@*%24WUD*)^LAu`}&q5`Qfau>VkR~$hR1I% znREcVn>1a!XG<-}F&HmK_tiKkv{r=L1k$k=-!EL^J(==ir&iP^*%|`# zaj&(dU5^`=Xf@ga%}@3QS3DeVz`Z8RqJ>8-SixnPz0xyh2hVYqqyZ#6m&?7I8V|*ee#03B+u1TG8e_Or@y>)jE&R`ZZaB3={C8FQ9XCMgD*2D zvdmG4GddT&3Ux4Nqa?2iFgAN$TT`vAIF%Q(QZZ#c&CoAdW#i9!!v9qVS8R}V`z0%p zbG($XC24kO4-|!e^dq{g+xgq2_Rcscc-#7By!8DgBCe~37w1PO;_nAXW-F!orSh$1 z&aybN$V*MncC_+VXXHUmqqThO@kAr1b!M1Zdm>x}-m7a_Kkd&nNy7A5s2rR0!5}Vi zxR_b=ZTdD^kv8z9Qk2;}o_GZCQ^Nv`WS7<--WcjUT-&sYbn}MkAdp4DYb-*h8Ob(j zIaR?FPoG@AabmF#Mp7T+T@^2>>=0NYq>z9(xi8x@SPd&``tnv{G0t{XLHx1TaHRBN zr(9;Ih|(|);Cfg~#CeI_AFq;)U0?%&R;S=GZ9m7kQEj$5op|YGN!kHt+#tUN5)V{) zDeQbgMGkue(TMP}x+wIg_LgMjbFi0|8`3!Wtst4u!%Eujwba9qMI}QJLLLI z*E(801Vw1l>I_t%PQpD}rpaY}QZ&tcOgfKQ9PF)94)~;Q_15{syFxYbw5H)EGDkZ- zm8HlxP%9hzW7_p*+{Ycy-U(pt?{3olXxpgM=0?+);>agTvQnWI#_f~gz%*9&=I90T z9Ob;a;?+iz&H7I*j*+ex*ISi@+@j+fjPhQUG3nZmQZO3N!#fJjbAqOCUr|sP54H@& zXjm_ti$KW;A>FO(sw2BMqjWp6QPd`KNSc~C+g7W?t7yk(x9=XDHoS0;E9iaXZENPj zOE2wEB##-I!&~NABM_>iTBcKsbgFHX4@cpsnkKQ5B^~6u#d$4PEsZuHf=+A8&WAF) z@M52#;04n9=TAUXhroVHxtr>9gRMi2a=Jm&mzgwdw_@_hd1Fbd{bgrEMdey^g_!*7 z(W9+l;iO0%T#v9zDl#3K+HKY>l0_$@SLsH!y?kl=TeKLi%S%dIm2?~ zGKgMvW-T7R8Is#_ODB)kZmges#41tNPbx=h>WqN*jJ>lvmdtVdWx_?c^HcW=!GZo{ z6t!w+Uib-GC)LFV75c-~TrP%`^ssh2IWXT=TNbGo-+Yc}OxEH^9Byk}J7kqRaktI( z(`h;`HuVD3)Sy!>oL^ARiVX! zO0O{?IQE4FWPrDB^Sej2o#t;`bVJ?Tv|1GjAEAK%5bRI7 zY@MxKb4Jxc6*M4=fq=%<+H$v|380hw-d3o9F`W63+|?122^*r%!jBGot@r^UXp6S4VR zztq3^wW-IwMk}$*U5hEv8{=339BjsJm=c4h5jTJxUR^adEFq&i6ldM|5SKLYTU3pFvCf zsnMvr0qPPJOZ*Z(g#Y^-XgHU~Q|y3V(VsYIEbPIWe!v}Z{Fc$>ZW{Sm1!bAo#x>5Z zBfYRvXb3?($aD_)pD*HcJr_qN9dl4GadNv>`?iL?T1qNRzp5{uF#lLaDL;y^CgDG4 z8zd~FG@nXPlJFY!t6%(xAGZ^#!!2GT(Y`nMR30y?-&bQI{Kdl$(6p$F<@ht9eWL;q zhn+KYjpatoz$;6ZfDsWdi=}rV7ZxsWQF~&s0FV#F;(46Dw|Skt z#ddp<85E(4V+rp6@mZHK6-Au+qTciPWq4o`tf>-(!$3wPzBkp9VrNdxmJY5<7j4&x z-HrwBhj*q%*glcXRySph8sM4k>4>9^zrU)$awo_?4R4#w!uO zn|L>Npp*|FjZ^>dN3o*-193i&!kU-8-Xj;LKHM0XK^}d^1^OA&siLiM!sU{F+El+gxpCfs4;)#H5!P$JHmCYQPGIUDvbx`EZhcAus*(66w} zOSZL{2RK(qU`+_xW^1Qi4RpeK&(}J@oFr=6AQQs_cpP{s_Ioq6t%&9UF3}Px5Um!` z&6%(JS#+RZp5Ph3^*qS^KeEC>e{h9m30L^uOs$aIk;q#%*&_ylIA|KuxME;wg?QU@ zt1~*J!@44_W!s;&EZmQ1zKLxguloJG?US}z{sRE~x8jz5xE7mp(D+O;kh5e2x1ya+ zkiJ9Zw=SX;c6FzLsaT0=*Gqu$BSHVK$(D6!JpG3Z9?4sden z+>NQ~G8tH=c#fMcr@)@Ya6g1B8BW`01L7O=8sKpDH4icij7H+H;8Jk-r6o)1~o$DdZHk{Gm zb0*O?t#@Kam%UiYw?#;kI#)S_o@F1&l{~;!nQyqDGM|v+Q{JK}Ve@moO*YY42Hu%= z59(CAY_tCGNEN}(xRTK1-eiZNZu-z(tp+AZe~Lmk-t>5r#VJhK8861VC?3aAWoQce zx@jUG zatAbiKs?Gp~Dj}d*_(RoES_}V<^q3t-rEL^857Lg`A zLIr0Ev3e>OKHpMRhyKKIO6?T$gLA}!yhb@t-cD(!I>R9Lh)*@dyfLvJl4TejQyU0W zCr4N(ZtgvfRnDXCSf%%v@d)YDy6`ccO41Z9R?zi;M{YT-8o1{&TE)}kf)R|zy~k}k z@8qx=+mobTV2teT6ypr#ib)G8UT7O2Z5UB0u|VBS>Rfe9^!%Lbr>b6Hq~PnFicsI` zK-;P=vD989K6I|ArEkWnXJ&>+(TlzNU}Ni zVfSz~iM$c^Hru3R&$3UN^{Sy`Wu^4yGntn96XjPw*m~g3+iGVT#CNHELLS+AJ_sD7 zD#)<)W*It{ZA+%G4U>E-ER+&jZ!?rU`15Teu2ZM^1^TpP{Yb6`dxUcfv?EmFMeQU6 z0n<)n0aSrhMJ0a#-(~f6Y z+f8P{s&wW5%IT7tAM1W^2jmRU;co(}zBm3j?rbi2Gh=NxXH@BF%p#CNW-qvHB52l^;;2mt&v)5$21j{J!=a`v1JP&w!Y=o>&q}ab``(ZAgN`~` zH#lhrYbGYXsZ`LRh;%=sNmEqy+7rTZD=s|GG6t$rO-f&P#!7{X;;!Q(Kxxkd@Nwt^5YAjvX?BD|{R2J1vPysL#g%!cNH**{ePmLX4 z-|D0CC$$uhF>Rg-)X|k=bSZq|(&1b9sLfVd%TbSf{;VMm-4Us#D4A?&PX6w0)lsBM z{F*U^xFr&9XqDpXk9MHHa@SYZ>74r+%?i zA2Oxp-qObwlrbC5a?6@LP^tC$xW)pFUYsk^f`*;EGIko?rdpC^W)9=(mjHKdeY#*f zr`24qJQ#nm)nO{fmt~~)Vw<8FU{1d0ul(UQi%ec z^tW>d?Uz$XPxo={UlWXwJcP`HEP`jW)}HRQmJ7nR8{i3mlWyM^1#yax_W$kVzvq(v z(|ISrrTL$DrFXbOwA-mzuRL zzjp+850C>%1P%i-){`~Q_RBE(Z9lgxt*tlRFbhDzGq3|Xclv8^)q{XX|3_!BtTBo(P7u zRinYZTLR;ssOfEzNf3Onax~Uwo-C~c|G-{vZ+5`DEi7y#CVzl%Y!JfF#l(}1>OFFO zkhj8}iSGsUBY+3hl?1)SGu<-&aB6YZC(A zoc!Zcv$?yHU&s1&O|4J_tG5axc>9_ZxTbUv(`Y$vZ1n4de4CkSyb!A&;mQ##ONu;R ze1o_H3gZ^$oNK&^cL6wp82Dc6&&&-9F9ein{biE=#MJE0$N!LOzE0Agm>OV4z6})^ z^BZ=()_9FSfg*x_~mHyE@Zd@b-fDFXyIx%X_Ue^+T6tn~b0^V+1-kY5nJTmD^+q5P>KnxZp`+gchmd4D6r*Kj zm%R(ppvvBv!5Lk!hrp)V-Ny>JEZDEW1tZwU1kYK6sK#QGVCty=gg|luo%F<4^6N(Z z0z*g$0g)5#Z!v_#mb|M$`U>|4{9feUTj0!+|E;<3YFxzMQp?7>hgu?9mP`|Dhc+b#AOR$C)UU? zXVPJSgtMwjyp9K0p&M%pNvRNJ6gYm>2>qNbd-G%`iOtkaH%IO0r z@p~}KY49qiF5Un>hr5Y)pBZS7C&K;x@V}?!Z)*8_6a97AvPQcDng;w3Y=N|r6??$N zC*vUR*bN&3myD9P$rHiLQCr0P+f4v`k<6v~FH+=GBD|zuhcllT@9Mj)u4V{i{r^^w I4ZPF+U*#UF3IG5A literal 0 HcmV?d00001 diff --git a/tensorflow/contrib/lite/g3doc/apis.md b/tensorflow/contrib/lite/g3doc/apis.md new file mode 100644 index 00000000000..311fc69696a --- /dev/null +++ b/tensorflow/contrib/lite/g3doc/apis.md @@ -0,0 +1,359 @@ +# TensorFlow Lite APIs + +TensorFlow Lite provides programming APIs in C++ and Java, and in both cases +the API design reflects a preference for performance over ease of use. +TensorFlow Lite is designed for fast inference on small devices so it should be +no surprise that the APIs try to avoid unnecessary copies at the expense of +convenience. Similarly, consistency with TensorFlow APIs was not an explicit +goal and some variance is to be expected. + +## C++ + +In order to run the inference model in TensorFlow Lite, one has to load the +model into a `FlatBufferModel` object which then can be executed by an +`Interpreter`. The `FlatBufferModel` needs to remain valid for the whole +lifetime of the `Interpreter`, and a single `FlatBufferModel` can be +simultaneously used by more than one `Interpreter`. In concrete terms, the +`FlatBufferModel` object must be created before any `Interpreter` objects that +use it, and must be kept around until they have all been destroyed. + +The simplest usage of TensorFlow Lite will look like this: + +```c++ +tflite::FlatBufferModel model(path_to_model); +tflite::ops::builtin::BuiltinOpResolver resolver; +std::unique_ptr interpreter; +tflite::InterpreterBuilder(*model, resolver)(&interpreter); +// Resize input tensors, if desired. +interpreter->AllocateTensors(); +float* input = interpreter->typed_input_tensor(0); +// Fill `input`. +interpreter->Invoke(); +float* output = interpreter->type_output_tensor(0); +``` +### Data Alignment + +TensorFlow Lite data is usually aligned to 32-bit boundaries. It is recommended +that all data provided to TensorFlow Lite be aligned that way. + +### Error Reporting + +In many places TensorFlow Lite returns status information through +`TfLiteStatus` objects: + +```c++ +typedef enum { + kTfLiteOk = 0, + kTfLiteError = 1 +} TfLiteStatus; + +``` + +Failures can be easily verified with: +```c++ +if (status != kTfLiteOk) { + // ... error handling here ... +} +``` + +In order to obtain detailed error information an ErrorReporter must be +provided: + +```c++ +class ErrorReporter { + virtual int Report(const char* format, va_list args) = 0; +}; +``` + +The `DefaultErrorReporter` takes care of reporting to `stderr`. + +### Loading a Model + +The `FlatBufferModel` class encapsulates a model and can be built in a couple of +slightly different ways depending on where the model is stored: + +```c++ +class FlatBufferModel { +  // Build a model based on a file. Return a nullptr in case of failure. +  static std::unique_ptr BuildFromFile( +      const char* filename, +      ErrorReporter* error_reporter); + +  // Build a model based on a pre-loaded flatbuffer. The caller retains +  // ownership of the buffer and should keep it alive until the returned object +  // is destroyed. Return a nullptr in case of failure. +  static std::unique_ptr BuildFromBuffer( +      const char* buffer, +      size_t buffer_size, +      ErrorReporter* error_reporter); +}; +``` + +Note that if TensorFlow Lite detects the presence of Android's NNAPI it will +automatically try to use shared memory to store the FlatBufferModel. + +### Running a Model + +Running a model involves a few simple steps: + + * Build an `Interpreter` based on an existing `FlatBufferModel` + * Optionally resize input tensors if the predefined sizes are not desired. + * Set input tensor values + * Invoke inference + * Read output tensor values + +The important parts of public interface of the `Interpreter` are provided +below. It should be noted that: + + * Tensors are represented by integers, in order to avoid string comparisons + (and any fixed dependency on string libraries). + * An interpreter must not be accessed from concurrent threads + * Memory allocation for input and output tensors must be triggered + by calling AllocateTensors() right after resizing tensors. + +```c++ +class Interpreter { + Interpreter(ErrorReporter* error_reporter); + + // Read only access to list of inputs. + const std::vector& inputs() const; + + // Read only access to list of outputs. + const std::vector& outputs() const; + + // Change the dimensionality of a given tensor. + TfLiteStatus ResizeInputTensor(int tensor_index, + const std::vector& dims); + + // Returns status of success or failure. + TfLiteStatus AllocateTensors(); + + // Return a pointer into the data of a given input tensor. + template + T* typed_input_tensor(int index) { + return typed_tensor(inputs_[index]); + } + + // Return a pointer into the data of a given output tensor. + template + T* typed_output_tensor(int index) { + return typed_tensor(outputs_[index]); + } + + // Execute the model, populating output tensors. + TfLiteStatus Invoke(); +}; +``` + +### Writing Custom Operators + +All TensorFlow Lite operators (both custom and builtin) are defined using a +simple pure-C interface that consists of four functions: + +```c++ +typedef struct { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); +} TfLiteRegistration; +``` + +Refer to `context.h` for details on `TfLiteContext` and `TfLiteNode`. The +former provides error reporting facilities and access to global objects, +including all the tensors. The latter allows implementations to access their +inputs and outputs. + +When the interpreter loads a model, it calls init() once for each node in the +graph. A given `init()` will be called more than once if the op is used +multiple times in the graph. For custom ops a configuration buffer will be +provided, containing a flexbuffer that maps parameter names to their values. +The buffer is empty for builtin ops because the interpreter has already parsed +the op parameters. Kernel implementation that require state should initialize +it here and transfer ownership to the caller. For each `init()` call, there +will be a corresponding call to `free()`, allowing implementations to dispose +of the buffer they might have allocated in `init()`. + +Whenever the input tensors are resized the interpreter will go through the +graph notifying implementations of the change. This gives them the chance to +resize their internal buffer, check validity of input shapes and types, and +recalculate output shapes. This is all done through `prepare()` and +implementation can access their state using `node->user_data`. + +Finally, each time inference runs the interpreter traverses the graph calling +`invoke()`, and here too the state is available as `node->user_data`. + +Custom ops can be implemented in exactly the same way as builtin ops, by +defined those four functions and a global registration function that usually +looks like this: + +```c++ +namespace tflite { +namespace ops { +namespace custom { + TfLiteRegistration* Register_MY_CUSTOM_OP() { + static TfLiteRegistration r = {my_custom_op::Init, + my_custom_op::Free, + my_custom_op::Prepare, + my_custom_op::Eval}; + return &r; + } +} // namespace custom +} // namespace ops +} // namespace tflite +``` + +Note that registration is not automatic and an explicit call to +`Register_MY_CUSTOM_OP` should be made somewhere. While the standard +`:builtin_ops` takes care of the registration of builtins, custom ops will have +to be collected in separated custom libraries. + +### Customizing the kernel library + +Behind the scenes the interpreter will load a library of kernels which will be +assigned to execute each of the operators in the model. While the default +library only contains builtin kernels, it is possible to replace it with a +custom library. + +The interpreter uses an `OpResolver` to translate operator codes and names into +actual code: + +```c++ +class OpResolver { + virtual TfLiteRegistration* FindOp(tflite::BuiltinOperator op) const = 0; + virtual TfLiteRegistration* FindOp(const char* op) const = 0; + virtual void AddOp(tflite::BuiltinOperator op, TfLiteRegistration* registration) = 0; + virtual void AddOp(const char* op, TfLiteRegistration* registration) = 0; +}; +``` + +The regular usage will require the developer to use the `BuiltinOpResolver` and +write: + +```c++ +tflite::ops::builtin::BuiltinOpResolver resolver; +``` + +They can then optionally register custom ops: + +```c++ +resolver.AddOp("MY_CUSTOM_OP", Register_MY_CUSTOM_OP()); +``` + +before the resolver is passed to the `InterpreterBuilder`. + +If the set of builtin ops is deemed to be too large, a new `OpResolver` could +be code-generated based on a given subset of ops, possibly only the ones +contained in a given model. This is the equivalent of TensorFlow's selective +registration (and a simple version of it is available in the `tools` +directory). + +## Java + +TensorFlow Lite's Java API supports on-device inference and is provided as an +Android Studio Library that allows loading models, feeding inputs, and +retrieving inference outputs. + +The simplest usage of Tensorflow Lite Java API looks like this: + +```java +try (Interpreter interpreter = new Interpreter(file_of_a_tensorflowlite_model)) { + interpreter.run(input, output); +} +``` + +### Loading a Model + +The `Interpreter.java` class drives model inference with TensorFlow Lite. In +most of the cases, this is the only class an app developer will need. + +#### Initializing an `Interpreter` Mith a Model Mile + +The `Interpreter` can be initialized with a model file using the constructor: + +```java +public Interpreter(@NotNull File modelFile); +``` + +or with a `MappedByteBuffer`: + +```java +public Interpreter(@NotNull MappedByteBuffer mappedByteBuffer); +``` + +In both cases a valid TensorFlow Lite must be provided or an +`IllegalArgumentException` with be thrown. If a `MappedByteBuffer` is used to +initialize an Interpreter, it should remain unchanged for the whole lifetime of +the `Interpreter`. + +### Running a Model + +#### Supported Data Types + +To use TensorFlow Lite, the data types of the input and output tensors must be +one of the following primitive types: + +* `float` +* `int` +* `long` +* `byte` + +If other data types, including boxed types like `Integer` and `Float`, are used, +an `IllegalArgumentException` will be thrown. + +#### Inputs + +Each input should be an array, a multi-dimensional array, or a `ByteBuffer` of +the supported primitive types. + +The use of `ByteBuffer` is preferred since it allows the `Interpreter` to avoid +unnecessary copies. Each `ByteBuffer` needs to be a direct byte buffer, and its +order must be `ByteOrder.nativeOrder()`. After it is used for a model inference, +it must remain unchanged until the model inference is finished. + +#### Outputs + +Each output should be an array, or a multi-dimensional array of the supported +primitive types. + +#### Running Model Inference + +If a model takes only one input and returns only one output, the following will +trigger an inference run: + +```java +interpreter.run(input, output); +``` + +For models with multiple inputs, or multiple outputs, use: + +```java +interpreter.runForMultipleInputsOutputs(inputs, map_of_indices_to_outputs); +``` + +where each entry in `inputs` corresponds to an input tensor and +`map_of_indices_to_outputs` maps indices of output tensors to the +corresponding output data. In both cases the tensor indices should correspond to +the values given to the `TensorFlow Lite Optimized Converter` when the model was +created. Be aware that the order of tensors in `input` must match the order +given to the `TensorFlow Lite Optimized Converter`. + +The Java API also provides convenient functions for app developers to get the +index of any model input or output using a tensor name: + +```java +public int getInputIndex(String tensorName); +public int getOutputIndex(String tensorName); +``` + +If tensorName is not a valid name in model, an `IllegalArgumentException` will +be thrown. + +### Releasing Resources After Use + +An `Interpreter` owns resources. To avoid memory leak, the resources must be +released after use by: + +```java +interpreter.close(); +``` diff --git a/tensorflow/contrib/lite/g3doc/custom_operators.md b/tensorflow/contrib/lite/g3doc/custom_operators.md new file mode 100644 index 00000000000..204a489a935 --- /dev/null +++ b/tensorflow/contrib/lite/g3doc/custom_operators.md @@ -0,0 +1,91 @@ +# How to use custom operators + +TensorFlow Lite currently supports a subset of TensorFlow operators. However, it +does support the use of user-provided implementations (as known as custom +implementations) if the model contains an operator that is not supported. + +Let’s walk through this via an example. Assume we are using the `Sin` operator +and that we are building a very simple model for a function `y = sin(x + +offset)`, where `offset` is trainable. + +The code to train the TensorFlow model will be something like: + +```python +offset = tf.get_variable("offset", [1,], tf.float32) +x = tf.placeholder(tf.float32, shape=(None,)) +y = tf.sin(x + offset) +y_ = tf.placeholder(tf.float32, shape=(None,)) +loss = tf.reduce_sum(tf.square(y - y_)) +optimizer = tf.train.GradientDescentOptimizer(0.001) +train = optimizer.minimize(loss) +``` + +If you convert this model to Tensorflow Lite format using the TensorFlow Lite +Optimizing Converter with `--allow_custom_ops` argument, and run it with the +default interpreter, the interpreter will raise the following error messages: + +``` +Didn't find custom op for name 'Sin' +Registration failed. +``` + +All we need to do to use the op in TensorFlow Lite is define two functions +(`Prepare` and `Eval`), and construct a `TfLiteRegistration`. This code would +look something like this: + +```cpp +TfLiteStatus SinPrepare(TfLiteContext* context, TfLiteNode* node) { + using namespace tflite; + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input = GetInput(context, node, 0); + TfLiteTensor* output = GetOutput(context, node, 0); + + int num_dims = NumDimensions(input); + + TfLiteIntArray* output_size = TfLiteIntArrayCreate(num_dims); + for (int i=0; idata[i] = input->dims->data[i]; + } + + return context->ResizeTensor(context, output, output_size); +} + +TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { + using namespace tflite; + TfLiteTensor* input = GetInput(context, node,0); + TfLiteTensor* output = GetOutput(context, node,0); + + float* input_data = input->data.f; + float* output_data = output->data.f; + + size_t count = 1; + int num_dims = NumDimensions(input); + for (int i = 0; i < num_dims; ++i) { + count *= input->dims->data[i]; + } + + for (size_t i=0; i SDK Tools -> + Android Support Repository`. + + 2. [Edit your `WORKSPACE`](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#edit-workspace) + to add SDK and NDK targets. + + - Make sure the `api_level` in `WORKSPACE` is set to an SDK version that + you have installed. + - By default, Android Studio will install the SDK to `~/Android/Sdk` and + the NDK to `~/Android/Sdk/ndk-bundle`. + +2. Build the app with Bazel. The demo needs C++11: + + ```shell + bazel build -c opt --cxxopt='--std=c++11' \ + //tensorflow/contrib/lite/java/demo/app/src/main:TfLiteCameraDemo + ``` + +3. Install the demo on a + [debug-enabled device](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#install): + + ```shell + adb install bazel-bin/tensorflow/contrib/lite/java/demo/app/src/main/TfLiteCameraDemo.apk + ``` diff --git a/tensorflow/contrib/lite/models/smartreply/g3doc/README.md b/tensorflow/contrib/lite/models/smartreply/g3doc/README.md new file mode 100644 index 00000000000..cab5dcca43a --- /dev/null +++ b/tensorflow/contrib/lite/models/smartreply/g3doc/README.md @@ -0,0 +1,146 @@ +# Smart Reply Model + +## What is On-Device Smart Reply Model? + +Smart Replies are contextually relevant, one-touch responses that help the user +to reply to an incoming text message (or email) efficiently and effortlessly. +Smart Replies have been highly successful across several Google products +including +[Gmail](https://www.blog.google/products/gmail/save-time-with-smart-reply-in-gmail/), +[Inbox](https://www.blog.google/products/gmail/computer-respond-to-this-email/) +and +[Allo](https://blog.google/products/allo/google-allo-smarter-messaging-app/). + +The On-device Smart Reply model is targeted towards text chat use cases. It has +a completely different architecture from its cloud-based counterparts, and is +built specifically for memory constraints devices such as phones & watches. It +has been successfully used to provide [Smart Replies on Android +Wear](https://research.googleblog.com/2017/02/on-device-machine-intelligence.html) +to all first- & third-party apps. + +The on-device model comes with several benefits. It is: + +* **Faster**: The model resides on the device and does not require internet + connectivity. Thus, the inference is very fast and has an average latency of + only a few milliseconds. +* **Resource efficient**: The model has a small memory footprint on + the device. +* **Privacy-friendly**: The user data never leaves the device and this + eliminates any privacy restrictions. + +A caveat, though, is that the on-device model has lower triggering rate than its +cloud counterparts (triggering rate is the percentage of times the model +suggests a response for an incoming message). + +## When to use this Model? + +The On-Device Smart Reply model is aimed towards improving the messaging +experience for day-to-day conversational chat messages. We recommend using this +model for similar use cases. Some sample messages on which the model does well +are provided in this [tsv +file](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/testdata/smartreply_samples.tsv) +for reference. The file format is: + +``` + {incoming_message smart_reply1 [smart_reply2] [smart_reply3]} +``` + +For the current model, we see a triggering rate of about 30-40% for messages +which are similar to those provided in the tsv file above. + +In case the model does not trigger any response, the system falls back to +suggesting replies from a fixed back-off set that was compiled from popular +response intents observed in chat conversations. Some of the fallback responses +are `Ok, Yes, No, 👍, ☺`. + +The model can only be used for inference at this time (i.e. it cannot be custom +trained). If you are interested to know how the model was trained, please refer +to this [blog +post](https://research.googleblog.com/2017/02/on-device-machine-intelligence.html) +and [research paper](https://arxiv.org/pdf/1708.00630). + +## How to use this Model? + +We have provided a pre-built demo APK that you can download, install and test on +your phone ([demo APK +here](http://download.tensorflow.org/deps/tflite/SmartReplyDemo.apk)). + +The On-Device Smart Reply demo App works in the following way: + +1. Android app links to the JNI binary with a predictor library. + +2. In the predictor library, `GetSegmentPredictions` is called with a list of input + strings. + + 2.1 The input string can be 1-3 most recent messages of the conversations in + form of string vector. The model will run on these input sentences and + provide Smart Replies corresponding to them. + + 2.2 The function performs some preprocessing on input data which includes: + + * Sentence splitting: The input message will be split into sentences if + message has more than one sentence. Eg: a message like “How are you? + Want to grab lunch?” will be broken down into 2 different sentences. + * Normalization: The individual sentences will be normalized by converting + them into lower cases, removing unnecessary punctuations, etc. Eg: “how + are you????” will be converted to “how are you?” (refer for NORMALIZE op + for more details). + + The input string content will be converted to tensors. + + 2.3 The function then runs the prediction model on the input tensors. + + 2.4 The function also performs some post-processing which includes + aggregating the model predictions for the input sentences from 2.2 and + returning the appropriate responses. + +3. Finally, it gets response(s) from `std::vector`, and + returns back to Android app. Responses are sorted in descending order of + confidence score. + +## Ops and Functionality Supported + +Following are the ops supported for using On-Device Smart Reply model: + +* **NORMALIZE** + + This is a custom op which normalizes the sentences by: + + * Converting all sentences into lower case. + * Removing unnecessary punctuations (eg: “how are you????” → “how are + you?”). + * Expanding sentences wherever necessary (eg: “ I’m home” → “I am home”). + +* **SKIP_GRAM** + + This is an op inside TensorFlow Lite that converts sentences into a list of + skip grams. The configurable parameters are `ngram_size` and + `max_skip_size`. For the model provided, the values for these parameters are + set to 3 & 2 respectively. + +* **EXTRACT_FEATURES** + + This is a custom op that hashes skip grams to features represented as + integers. Longer skip-grams are allocated higher weights. + +* **LSH_PROJECTION** + + This is an op inside TensorFlow Lite that projects input features to a + corresponding bit vector space using Locality Sensitive Hashing (LSH). + +* **PREDICT** + + This is a custom op that runs the input features through the projection + model (details [here](https://arxiv.org/pdf/1708.00630.pdf)), computes the + appropriate response labels along with weights for the projected features, + and aggregates the response labels and weights together. + +* **HASHTABLE_LOOKUP** + + This is a custom op that uses label id from predict op and looks up the + response text from the given label id. + +## Further Information + +* Open source code + [here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/smartreply/). diff --git a/tensorflow/contrib/lite/models/testdata/g3doc/README.md b/tensorflow/contrib/lite/models/testdata/g3doc/README.md new file mode 100644 index 00000000000..d0c21d28338 --- /dev/null +++ b/tensorflow/contrib/lite/models/testdata/g3doc/README.md @@ -0,0 +1,102 @@ +## Speech Model Tests + +Sample test data has been provided for speech related models in Tensorflow Lite +to help users working with speech models to verify and test their models. + +For the hotword, speaker-id and automatic speech recognition sample models, the +architecture assumes that the models receive their input from a speech +pre-processing module. The speech pre-processing module receives the audio +signal and produces features for the encoder neural network and uses some +typical signal processing algorithms, like FFT and spectral subtraction, and +ultimately produces a log-mel filterbank (the log of the triangular mel filters +applied to the power spectra). The text-to-speech model assumes that the inputs +are linguistic features describing characteristics of phonemes, syllables, +words, phrases, and sentence. The outputs are acoustic features including +mel-cepstral coefficients, log fundamental frequency, and band aperiodicity. +The pre-processing modules for these models are not provided in the open source +version of TensorFlow Lite. + +The following sections describe the architecture of the sample models at a high +level: + +### Hotword Model + +The hotword model is the neural network model we use for keyphrase/hotword +spotting (i.e. "okgoogle" detection). It is the entry point for voice +interaction (e.g. Google search app on Android devices or Google Home, etc.). +The speech hotword model block diagram is shown in Figure below. It has an input +size of 40 (float), an output size of 7 (float), one Svdf layer, and four fully +connected layers with the corresponding parameters as shown in figure below. + +![hotword_model](hotword.svg "Hotword model") + +### Speaker-id Model + +The speaker-id model is the neural network model we use for speaker +verification. It runs after the hotword triggers. The speech speaker-id model +block diagram is shown in Figure below. It has an input size of 80 (float), an +output size of 64 (float), three Lstm layers, and one fully connected layers +with the corresponding parameters as shown in figure below. + +![speakerid_model](speakerid.svg "Speaker-id model") + +### Text-to-speech (TTS) Model + +The text-to-speech model is the neural network model used to generate speech +from text. The speech text-to-speech model’s block diagram is shown +in Figure below. It has and input size of 334 (float), an output size of 196 +(float), two fully connected layers, three Lstm layers, and one recurrent layer +with the corresponding parameters as shown in the figure. + +![tts_model](tts.svg "TTS model") + +### Automatic Speech Recognizer (ASR) Acoustic Model (AM) + +The acoustic model for automatic speech recognition is the neural network model +for matching phonemes to the input autio features. It generates posterior +probabilities of phonemes from speech frontend features (log-mel filterbanks). +It has an input size of 320 (float), an output size of 42 (float), five LSTM +layers and one fully connected layers with a Softmax activation function, with +the corresponding parameters as shown in the figure. + +![asr_am_model](asr_am.svg "ASR AM model") + +## Speech models test input/output generation + +As mentioned above the input to models are generated from a pre-processing +module (output of a log-mel filterbank, or linguistic features), and the outputs +are generated by running the equivalent TensorFlow model by feeding them the +same input. + +## Link to the open source code + +### Models: + +[Speech hotword model (Svdf rank=1)] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/testdata/speech_hotword_model_rank1.tflite) + +[Speech hotword model (Svdf rank=2)] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/testdata/speech_hotword_model_rank2.tflite) + +[Speaker-id model] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/testdata/speech_speakerid_model.tflite) + +[TTS model] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/testdata/speech_tts_model.tflite) + +[ASR AM model] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/testdata/speech_terse_am_model.tflite) + +### Test benches + +[Speech hotword model test] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/speech_hotword_model_test.cc) + +[Speaker-id model test] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/speech_speakerid_model_test.cc) + +[TTS model test] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/speech_tts_model_test.cc) + +[ASR AM model test] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/models/speech_terse_am_model_test.cc) + +## Android Support +The models have been tested on Android phones, using the following tests: + +[Hotword] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/android/BUILD?rcl=172930882&l=25) + +[Speaker-id] (https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/android/BUILD?rcl=172930882&l=36) + + diff --git a/tensorflow/contrib/lite/models/testdata/g3doc/asr_am.svg b/tensorflow/contrib/lite/models/testdata/g3doc/asr_am.svg new file mode 100644 index 00000000000..ca965564221 --- /dev/null +++ b/tensorflow/contrib/lite/models/testdata/g3doc/asr_am.svg @@ -0,0 +1,4 @@ + + + + diff --git a/tensorflow/contrib/lite/models/testdata/g3doc/hotword.svg b/tensorflow/contrib/lite/models/testdata/g3doc/hotword.svg new file mode 100755 index 00000000000..36187aa3218 --- /dev/null +++ b/tensorflow/contrib/lite/models/testdata/g3doc/hotword.svg @@ -0,0 +1,4 @@ + + + + diff --git a/tensorflow/contrib/lite/models/testdata/g3doc/speakerid.svg b/tensorflow/contrib/lite/models/testdata/g3doc/speakerid.svg new file mode 100755 index 00000000000..dbe4312c464 --- /dev/null +++ b/tensorflow/contrib/lite/models/testdata/g3doc/speakerid.svg @@ -0,0 +1,4 @@ + + + + diff --git a/tensorflow/contrib/lite/models/testdata/g3doc/tts.svg b/tensorflow/contrib/lite/models/testdata/g3doc/tts.svg new file mode 100755 index 00000000000..9664b78f160 --- /dev/null +++ b/tensorflow/contrib/lite/models/testdata/g3doc/tts.svg @@ -0,0 +1,4 @@ + + + + diff --git a/tensorflow/contrib/lite/models/testdata/smartreply_samples.tsv b/tensorflow/contrib/lite/models/testdata/smartreply_samples.tsv new file mode 100644 index 00000000000..dfdc7831060 --- /dev/null +++ b/tensorflow/contrib/lite/models/testdata/smartreply_samples.tsv @@ -0,0 +1,50 @@ +any chance ur free tonight Maybe not +any updates? No update yet +anything i can do to help? No, but thanks No, but thank you No, but thanks for asking +be safe. I will be Will do my best Thanks, I will +congratulations Thanks thanks Congratulations +cool, let me know when you have time Cool Yes very cool Yeah, cool +drive safe Thank you, I will Home now I will thanks +hang in there, you'll be okay Doing my best Of course we will +happy birthday! Hey, thanks +happy new year! Wish you the same Thanks and same to you +have a safe flight Thanks, love you too Safe travels +hey What is up? How it going? Can I help you? +hey, got a sec? What is up? How it going? Can I help you? +how are you doing? Great and you? I am doing great +how are you feeling Feeling okay A little better Much much better +how was your weekend? It was real good +how you doing Okay and you +hugs. So sweet Thanks sweetie Take care of yourself +i'm bored Sorry to hear that Join the club No you are not +i'm planning on coming next week. let me know if that works. Works Perfect, thanks +i'm sick Sorry to hear that +i'm so happy for you Thanks me too +i'm so hungry Haha me too +i'm sorry No I am sorry Why sorry? No worries love +i'm sorry, i'm going to have to cancel. No I am sorry Why sorry? No worries love +is there anything i can do to help? No, but thanks No, but thanks for asking +lunch? Yes coming +okay. lemme know as soon as you find out. Any more questions? It is done +omg amazing So amazing +on my way Okay see you soon Cool, see you soon Oh wow, ok +oops, mistexted. Oops Haha, oh well That was funny +safe travels. Thanks, love you too Safe travels +so sorry So sorry +sorry, i can't. No worries at all Sorry what? +sorry, i can't do saturday No worries at all +thank you so much. You are so welcome You are so very welcome You are most welcome +thanks for coming It was my pleasure +thanks, this has been great. Glad to help So happy for you +tomorrow would be ideal. Yes it would +tried calling Try again? +ugh, my flight is delayed. Ugh indeed +what are you guys up to tonight? Nothing planned +what day works best for you Any day +what do you want for dinner Your call Whatever is fine +what time will you be home? Not sure why +where are you?!? At my house +wish you were here. I wish the same Me too honey +you're amazing You are too You are amazing I am +you're marvelous You are too +you're the best. I do my best You are the best Well, I try \ No newline at end of file diff --git a/tensorflow/contrib/lite/nnapi/README.md b/tensorflow/contrib/lite/nnapi/README.md new file mode 100644 index 00000000000..913467d1768 --- /dev/null +++ b/tensorflow/contrib/lite/nnapi/README.md @@ -0,0 +1,15 @@ +# Android Neural Network API + +The Android Neural Networks API (NNAPI) is an Android C API designed for running +computationally intensive operators for machine learning on mobile devices. +Tensorflow Lite is designed to use the NNAPI to perform hardware-accelerated +inference operators on supported devices. +Based on the app’s requirements and the hardware capabilities on a device, the +NNAPI can distribute the computation workload across available on-device +processors, including dedicated neural network hardware, graphics processing +units (GPUs), and digital signal processors (DSPs). +For devices that lack a specialized vendor driver, the NNAPI runtime relies on +optimized code to execute requests on the CPU. For more information about the +NNAPI, please refer to the [NNAPI documentation](https://developer.android.com/ndk/guides/neuralnetworks/index.html) + + diff --git a/tensorflow/contrib/lite/toco/README.md b/tensorflow/contrib/lite/toco/README.md new file mode 100644 index 00000000000..281b2ea5e4c --- /dev/null +++ b/tensorflow/contrib/lite/toco/README.md @@ -0,0 +1,26 @@ +# The TensorFlow Lite Optimizing Converter + +The TensorFlow Lite Optimizing Converter's most typical use is converting from the TensorFlow GraphDef to the TensorFlow Lite +format, but it supports much more than that. + +## Usage documentation + +Usage information is given in these documents: + +* [Command-line examples](g3doc/cmdline_examples.md) +* [Command-line reference](g3doc/cmdline_reference.md) +* [Python API](g3doc/python_api.md) + +## Design documentation + +Coming soon! + +## Where the converter fits in the TensorFlow landscape + +In the typical case, an application developer is using TensorFlow to design and +train models, then uses TensorFlow's freeze_graph.py to generate a frozen +inference graph, then uses the converter to convert that into a TensorFlow Lite flatbuffer file, +then ships that file to client devices where the TensorFlow Lite interpreter handles them +on-device. This is represented in the following diagram: + +![drawing](https://storage.googleapis.com/download.tensorflow.org/example_images/tensorflow_landscape.svg) diff --git a/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md new file mode 100644 index 00000000000..b9f8c8d152e --- /dev/null +++ b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md @@ -0,0 +1,509 @@ +# TensorFlow Lite Optimizing Converter command-line examples + +This page is a guide to using the TensorFlow Lite Optimizing Converter by +looking at some example command lines. It is complemented by the following other +documents: + +* [README](../README.md) +* [Command-line reference](cmdline_reference.md) + +Table of contents: + +[TOC] + +## Convert a TensorFlow GraphDef to TensorFlow Lite for float inference + +In this example, we look at the most common task: we have an ordinary TensorFlow +GraphDef and want to convert it to a TensorFlow Lite flatbuffer to perform +floating-point inference. + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.50_128_frozen.tgz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/mobilenet_v1_0.50_128/frozen_graph.pb \ + --output_file=/tmp/foo.lite \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TFLITE \ + --input_type=FLOAT \ + --inference_type=FLOAT \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 +``` + +To explain each of these flags: + +* `--input_format` and `--output_format` determine the formats of the input + and output files: here we are converting from `TENSORFLOW_GRAPHDEF` to + `TFLITE`. +* `--input_file` specifies the path of the input file, to be converted. When + `--input_format=TENSORFLOW_GRAPHDEF`, this file should be a + *[frozen](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py)* + *inference* graph. Being frozen means in particular that the input file is + self-contained, and does not reference any external "checkpoint" file. An + *inference* graph is a version of a graph meant to be used for inference, + typically not the same graph file as was used for training a given model. +* `--output_file` specifies the destination to write the converted file to. +* `--input_array` specifies the input activations, that is, the input "tensor" + in the input TensorFlow GraphDef file. The array designated by + `--input_array` is the one that the user will have to provide the contents + of as input to the runtime inference code. +* `--output_array` specifies the output activations, that is, the output + "tensor" in the input TensorFlow GraphDef file. The runtime inference code + will store its results in the array designated by `--output_array`. +* `--input_shape` specifies the shape of the input array. It is currently + required, but the plan is for a future version to no longer require it, + allowing to defer the specification of the input shape until runtime. The + format of `input_shape` is always a comma-separated list of dimensions, + always in TensorFlow convention. +* `--input_type` specifies what should be the type of the input arrays in the + **output** file. `--input_type` does not describe a property of the input + file: the type of input arrays is already encoded in the input graph. + Rather, `--input_type` is how you specify what should be the type of the + inputs to be provided to the output converted graph. This only affects + arrays of real numbers: this flag allows to quantized/dequantize + real-numbers inputs, switching between floating-point and quantized forms. + This flag has no incidence on all other types of input arrays, such as plain + integers or strings. +* `--inference_type` specifies what type of arithmetic the output file should + be relying on. It implies in particular the choice of type of the output + arrays in the output file. Like `--input_type`, `--inference_type` does not + describe a property of the input file. + +## Just optimize a TensorFlow GraphDef + +The converter accepts both TENSORFLOW_GRAPHDEF and TFLITE file formats as both +`--input_format` and `--output_format`. This means that conversion from and to +any supported format is possible, and in particular, same-format "conversions" +are possible, and effectively ask the converter to optimize and simplify a +graph. Example: + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.50_128_frozen.tgz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/mobilenet_v1_0.50_128/frozen_graph.pb \ + --output_file=/tmp/foo.pb \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TENSORFLOW_GRAPHDEF \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 +``` + +Here we did not pass `--input_type` and `--inference_type` because they are +considered not applicable to the TensorFlow GraphDef format (as far as we are +concerned, TensorFlow GraphDefs are technically always float, and the only +flavor of "quantized" GraphDef that the converter deals with is "FakeQuantized" +graphs that are still technically float graphs). + +Below in the section about passing arbitrary input/output arrays we give another +example, using the converter to extract just a sub-graph from a TensorFlow +GraphDef. + +## Convert a TensorFlow Lite flatbuffer back into TensorFlow GraphDef format + +As we mentioned that the converter supports file format conversions in any +direction, let us just give an example of that: + +``` +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/foo.lite \ + --output_file=/tmp/foo.pb \ + --input_format=TFLITE \ + --output_format=TENSORFLOW_GRAPHDEF \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 +``` + +## Convert a TensorFlow GraphDef to TensorFlow Lite for quantized inference + +Let us now look at a quantized model. As mentioned above, the only flavor of +quantized TensorFlow GraphDefs that the converter is concerned with, is +"FakeQuantized" models. These are technically float models, but with special +`FakeQuant*` ops inserted at the boundaries of fused layers to record min-max +range information allowing to generate a quantized inference workload that is +able to reproduce exactly the specific quantization behavior that was used +during training. Indeed, the whole point of quantized training is to allow for +both training and inference to perform exactly the same arithmetic, so that the +way that the training process about around quantization inaccuracy is +effectively helping the quantized inference process to be more accurate. + +Given a quantized TensorFlow GraphDef, generating a quantized TensorFlow Lite +flatbuffer is done like this: + +``` +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/some_quantized_graph.pb \ + --output_file=/tmp/foo.lite \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TFLITE \ + --input_type=QUANTIZED_UINT8 \ + --inference_type=QUANTIZED_UINT8 \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 \ + --mean_value=128 \ + --std_value=127 +``` + +Here, besides changing `--input_file` to point to a (fake-)quantized GraphDef, +the only other changes are: + +* To change `--input_type` and `--inference_type` to `QUANTIZED_UINT8`. This + effectively tells the converter to generate an output file that can take a + quantized uint8 array as input (`--input_type=QUANTIZED_UINT8`), and have + quantized uint8 internal and output arrays as well + (`--inference_type=QUANTIZED_UINT8`). +* To pass `--mean_value` and `--std_value` flags to describe how the quantized + uint8 input array values are to be interpreted as the mathematical real + numbers that the graph is concerned with (keep in mind that even a + "fake-quantized" TensorFlow GraphDef is still technically a float graph). + The meaning of `--mean_value` and `--std_value` is explained in the + command-line reference; it suffices for now to say that they are a property + of each model. + +## Use dummy-quantization to try out quantized inference on a float graph + +Sometimes, one only has a plain float graph, and one is curious as to how much +faster inference might run if one could perform quantized inference instead of +float inference. Rather than requiring users to first invest in quantizing their +graphs before they can evaluate a possible benefit, the converter allows to +simply experiment with what we call "dummy quantization": provide some vaguely +plausible values for the min-max ranges of values in all arrays that do not have +min-max information, so that quantization can carry on, certainly producing +inaccurate results (do not use that in production!) but with performance +characteristics that should be identical to those of an actually quantized +flavor of the model. + +In the present example, we have a model using Relu6 activation functions almost +everywhere, so a reasonable guess is that most activation ranges should be +contained in [0, 6] and roughly comparable to it. + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.50_128_frozen.tgz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/mobilenet_v1_0.50_128/frozen_graph.pb \ + --output_file=/tmp/foo.cc \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TFLITE \ + --input_type=QUANTIZED_UINT8 \ + --inference_type=QUANTIZED_UINT8 \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 \ + --default_ranges_min=0 \ + --default_ranges_max=6 \ + --mean_value=127.5 \ + --std_value=127.5 +``` + +## Multiple output arrays + +Some models have multiple outputs. Even in a model with only one output, you may +want for the inference code to return the contents of other arrays as well, or +to perform inference on a subgraph with multiple outputs (see the section below +on specifying arbitrary arrays as input/output arrays). + +Either way, using `--output_arrays` instead of `--output_array` allows to +specify a comma-separated list of output arrays. + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/inception_v1_2016_08_28_frozen.pb \ + --output_file=/tmp/foo.lite \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TFLITE \ + --input_type=FLOAT \ + --inference_type=FLOAT \ + --input_shape=1,224,224,3 \ + --input_array=input \ + --output_arrays=InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/Relu +``` + +## Multiple input arrays + +Some models have multiple inputs; even in a model with a single input, you may +want for the inference code to implement only a subgraph with multiple inputs +(see the section below on specifying arbitrary arrays as input/output arrays). + +Either way, multiple input arrays are specified by using `--input_arrays` +instead of `--input_array` to specify a comma-separated list of input arrays. In +that case, one also needs to use `--input_shapes` instead of `--input_shape`. +The syntax for `--input_shapes` is a bit trickier, since already the singular +`--input_shape` was a comma-separated list of integers! Multiple input shapes +are delimited by a colon (`:`) in `--input_shapes`. + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/inception_v1_2016_08_28_frozen.pb \ + --output_file=/tmp/foo.lite \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TFLITE \ + --input_type=FLOAT \ + --inference_type=FLOAT \ + --input_shapes=1,28,28,96:1,28,28,16:1,28,28,192:1,28,28,64 \ + --input_arrays=InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_3/MaxPool_0a_3x3/MaxPool,InceptionV1/InceptionV1/Mixed_3b/Branch_0/Conv2d_0a_1x1/Relu \ + --output_array=InceptionV1/Logits/Predictions/Reshape_1 +``` + +## Specifying arbitrary arrays in a graph as input or output arrays + +Any array in the input file can be specified as an input or output array. This +allows to use the converter to extract a sub-graph out of the input graph file. +The converter then automatically discards any part of the graph that is not +needed for the subgraph identified by the specified input and output arrays. +Another use case for specifying multiple output arrays is to get inference code +to return the contents of some specified intermediate activations array, not +just the output activations. + +In order to know which array you want to pass as `--input_arrays` / +`--output_arrays`, it helps to have a visualization of the graph. See the +section below on graph visualization. When using graph visualization for that +purpose, make sure to use `--dump_graphviz=` to visualize exactly the graph as +it is in the actual final form being exported to the output file. + +Note that the final representation of an on-device inference workload (say, in +TensorFlow Lite flatbuffers format) tends to have coarser granularity than the +very fine granularity of the TensorFlow GraphDef representation. For example, +while a fully-connected layer is typically represented as at least four separate +ops in TensorFlow GraphDef (Reshape, MatMul, BiasAdd, Relu...), it is typically +represented as a single "fused" op (FullyConnected) in the converter's optimized +representation and in the final on-device representation (e.g. in TensorFlow +Lite flatbuffer format). As the level of granularity gets coarser, some +intermediate arrays (say, the array between the MatMul and the BiasAdd in the +TensorFlow GraphDef) are dropped. When specifying intermediate arrays as +`--input_arrays` / `--output_arrays`, it is generally at least desirable (and +often required) to specify arrays that are meant to survive in the final form of +the graph, after fusing. These are typically the outputs of activation functions +(since everything in each layer until the activation function tends to get +fused). + +Here is an example of extracting just a sub-graph, namely just a single fused +layer, out of a TensorFlow GraphDef, and exporting a TensorFlow GraphDef +containing just that subgraph: + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/inception_v1_2016_08_28_frozen.pb \ + --output_file=/tmp/foo.pb \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TENSORFLOW_GRAPHDEF \ + --input_shapes=1,28,28,96:1,28,28,16:1,28,28,192:1,28,28,64 \ + --input_arrays=InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_2/Conv2d_0a_1x1/Relu,InceptionV1/InceptionV1/Mixed_3b/Branch_3/MaxPool_0a_3x3/MaxPool,InceptionV1/InceptionV1/Mixed_3b/Branch_0/Conv2d_0a_1x1/Relu \ + --output_array=InceptionV1/InceptionV1/Mixed_3b/concat_v2 +``` + +## Logging + +### Standard logging + +The converter generates some informative log messages during processing. The +easiest way to view them is to add `--logtostderr` to command lines. For the +previous example, that gives: + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.50_128_frozen.tgz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/mobilenet_v1_0.50_128/frozen_graph.pb \ + --output_file=/tmp/foo.lite \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TFLITE \ + --input_type=FLOAT \ + --inference_type=FLOAT \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 \ + --logtostderr +``` + +After some initialization messages, we get the following informative messages: + +``` +I1101 21:51:33.297475 5339 graph_transformations.cc:39] Before general graph transformations: 416 operators, 583 arrays (0 quantized) +I1101 21:51:33.308972 5339 graph_transformations.cc:39] After general graph transformations pass 1: 31 operators, 89 arrays (0 quantized) +I1101 21:51:33.309204 5339 graph_transformations.cc:39] Before dequantization graph transformations: 31 operators, 89 arrays (0 quantized) +I1101 21:51:33.309368 5339 allocate_transient_arrays.cc:312] Total transient array allocated size: 1048576 bytes, theoretical optimal value: 786432 bytes. +I1101 21:51:33.309484 5339 toco_tooling.cc:249] Estimated count of arithmetic ops: 0.099218 billion (note that a multiply-add is counted as 2 ops). +``` + +### Verbose logging + +For debugging purposes, the converter supports two levels of verbose logging, +which can be set by passing a `--v=` flag: + +* At `--v=1`, the converter generates text dumps of the graph at various + points during processing, as well as log messages about every graph + transformation that did take place, typically answering questions of the + form "why was my graph transformed in this way"? +* At `--v=2`, the converter additionally generates log messages about graph + transformations that were considered but not actually performed, typically + answering questions of the form "why was my graph NOT transformed when I + expected it would be?". + +### Graph "video" logging + +When `--dump_graphviz=` is used (see the section on Graph visualizations), one +may additionally pass `--dump_graphviz_video`, which causes a graph +visualization to be dumped after each individual graph transformations, often +resulting in thousands of files. Typically, one would then bisect into these +files to understand when a given change was introduced in the graph. + +## Graph visualizations + +The converter is able to export a graph to the GraphViz Dot format, for easy +visualization. Combined with the converter's ability to transform the graph into +a simpler, coarser-granularity representation, that makes it a very powerful +visualization tool. + +There are two ways to get the converter to export a GraphViz Dot file, +corresponding to two separate use cases. Understanding the difference between +them is key to getting useful graph visualizations. + +### Using `--output_format=GRAPHVIZ_DOT` + +The first way to get a graphviz rendering is to pass +`--output_format=GRAPHVIZ_DOT`, instead of the `--output_format` that you would +otherwise use. This says: "I just want to get a plausible visualization of that +graph". The upside is that it makes for very simple command lines, and makes the +converter very lax about aspects of the graph or the command line that it would +otherwise complain about. Example: + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.50_128_frozen.tgz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/mobilenet_v1_0.50_128/frozen_graph.pb \ + --output_file=/tmp/foo.dot \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=GRAPHVIZ_DOT \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 +``` + +The resulting `.dot` file can be rendered into a PDF as follows: + +``` +dot -Tpdf -O /tmp/foo.dot +``` + +And the resulting `.dot.pdf` can be viewed in any PDF viewer, but we suggest one +with a good ability to pan and zoom across a very large page; Google Chrome does +well in that respect. + +``` +google-chrome /tmp/foo.dot.pdf +``` + +Example PDF files are viewable online in the next section. + +### Using `--dump_graphviz=` + +The second way to get a graphviz rendering is to pass a `--dump_graphviz=` flag +specifying a destination directory to dump GraphViz rendering to. Unlike the +previous approach, this one allows you to keep your real command-line (with your +real `--output_format` and other flags) unchanged, just appending a +`--dump_graphviz=` flag to it. This says: "I want visualizations of the actual +graph during this specific conversion process". Example: + +``` +curl https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_0.50_128_frozen.tgz \ + | tar xzv -C /tmp +bazel run --config=opt \ + //tensorflow/contrib/lite/toco:toco -- \ + --input_file=/tmp/mobilenet_v1_0.50_128/frozen_graph.pb \ + --output_file=/tmp/foo.lite \ + --input_format=TENSORFLOW_GRAPHDEF \ + --output_format=TFLITE \ + --input_type=FLOAT \ + --inference_type=FLOAT \ + --input_shape=1,128,128,3 \ + --input_array=input \ + --output_array=MobilenetV1/Predictions/Reshape_1 \ + --dump_graphviz=/tmp +``` + +This generates a few files in the destination directory, here `/tmp`. Most +important are these two files: + +``` +/tmp/toco_AT_IMPORT.dot +/tmp/toco_AFTER_TRANSFORMATIONS.dot +``` + +`toco_AT_IMPORT.dot` represents the graph as it was imported from +`--input_file`, before any transformation was applied to it (besides some +transformations that are applied immediately while importing). This tends to be +a complex visualization with limited information, but is useful especially in +situations where a conversion command fails (this file is generated even if the +conversion subsequently fails). + +`toco_AFTER_TRANSFORMATIONS.dot` represents the graph after all transformations +were applied to it, just before it was exported to the `--output_file`. +Typically, this is a much smaller graph, and it conveys much more information +about each node. + +Again, these can be rendered to PDFs: + +``` +dot -Tpdf -O /tmp/toco_*.dot +``` + +The resulting files can be seen here: + +* [toco_AT_IMPORT.dot.pdf](https://storage.googleapis.com/download.tensorflow.org/example_images/toco_AT_IMPORT.dot.pdf) +* [toco_AFTER_TRANSFORMATIONS.dot.pdf](https://storage.googleapis.com/download.tensorflow.org/example_images/toco_AFTER_TRANSFORMATIONS.dot.pdf). + +### Legend for the graph visualizations + +* Operators are red square boxes with the following hues of red: + * Most operators are + bright + red. + * Some typically heavy operators (e.g. Conv) are rendered in a + darker + red. +* Arrays are octogons with the following colors: + * Constant arrays are + blue. + * Activation arrays are gray: + * Internal (intermediate) activation arrays are + light + gray. + * Those activation arrays that are designated as `--input_arrays` or + `--output_arrays` are + dark + gray. + * RNN state arrays are green. Because of the way that the converter + represents RNN back-edges explicitly, each RNN state is represented by a + pair of green arrays: + * The activation array that is the source of the RNN back-edge (i.e. + whose contents are copied into the RNN state array after having been + computed) is + light + green. + * The actual RNN state array is + dark + green. It is the destination of the RNN back-edge updating + it. diff --git a/tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md b/tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md new file mode 100644 index 00000000000..cc6d416959c --- /dev/null +++ b/tensorflow/contrib/lite/toco/g3doc/cmdline_reference.md @@ -0,0 +1,238 @@ +# TensorFlow Lite Optimizing Converter command-line reference + +This page is complete reference of command-line flags. It is complemented by the +following other documents: + +* [README](../README.md) +* [Command-line examples](cmdline_examples.md) + +Table of contents: + +[TOC] + +## High-level overview + +A full list and detailed specification of all flags is given in the next +section. For now we focus on a higher-level description of command lines: + +``` +toco \ + --input_format=... \ + --output_format=... \ + --input_file=... \ + --output_file=... \ + [model flags...] \ + [transformation flags...] \ + [logging flags...] +``` + +In other words, the converter requires at least the following mandatory flags: +`--input_format`, `--output_format`, `--input_file`, `--output_file`. Depending +on the input and output formats, additional flags may be allowed or mandatory: + +* *Model flags* provide additional information about the model stored in the + input file. + * `--output_array` or `--output_arrays` specify which arrays in the input + file are to be considered the output activations. + * `--input_array` or `--input_arrays` specify which arrays in the input + file are to be considered the input activations. + * `--input_shape` or `--input_shapes` specify the shapes of the input + arrays. + * `--mean_value` or `--mean_values`, and `--std_value` or `--std_values`, + give the dequantization parameters of the input arrays, for the case + when the output file will accept quantized input arrays. +* *Transformation flags* specify options of the transformations to be applied + to the graph, i.e. they specify requested properties that the output file + should have. + * `--input_type` specifies the type that the input arrays should have + after transformations, in the output file. This is where you choose + whether you want runtime inference code to accept float or quantized + inputs. This flag only applies to float or quantized inputs, and allows + to convert between the two. This flag has no effect on all other types + of inputs, such as ordinary integer arrays. + * `--inference_type` or `--inference_types` specify the type that generic + intermediate and output activation arrays should have after + transformations, in the output file. This is where you choose whether + you want runtime inference code to perform float or quantized inference + arithmetic. + * Some transformation flags allow to carry on with quantization when the + input graph is not properly quantized: `--default_ranges_min`, + `--default_ranges_max`, `--drop_fake_quant`, + `--reorder_across_fake_quant`. +* *Logging flags* described below. + +## Command-line flags complete reference + +### Mandatory flags + +* `--input_format`. Type: string. Specifies the format of the input file. + Allowed values: + * `TENSORFLOW_GRAPHDEF` — The TensorFlow GraphDef format. Both + binary and text proto formats are allowed. + * `TFLITE` — The TensorFlow Lite flatbuffers format. +* `--output_format`. Type: string. Specifies the format of the output file. + Allowed values: + * `TENSORFLOW_GRAPHDEF` — The TensorFlow GraphDef format. Always + produces a file in binary (not text) proto format. + * `TFLITE` — The TensorFlow Lite flatbuffers format. + * Whether a float or quantized TensorFlow Lite file will be produced + depends on the `--inference_type` flag. + * Whether the produced TensorFlow Lite file will accept a float or + quantized input depends on the `--input_type` flag. + * `GRAPHVIZ_DOT` — The GraphViz `.dot` format. This asks the + converter to generate a reasonable graphical representation of the graph + after simplification by a generic set of transformation. + * A typical `dot` command line to view the resulting graph might look + like: `dot -Tpdf -O file.dot`. + * Note that since passing this `--output_format` means losing the + information of which output format you actually care about, and + since the converter's transformations depend on the specific output + format, the resulting visualization may not fully reflect what you + would get on the actual output format that you are using. To avoid + that concern, and generally to get a visualization of exactly what + you get in your actual output format as opposed to just a merely + plausible visualization of a model, consider using `--dump_graphviz` + instead and keeping your true `--output_format`. +* `--input_file`. Type: string. Specifies the path of the input file. This may + be either an absolute or a relative path. +* `--output_file`. Type: string. Specifies the path of the output file. + +### Model flags + +* `--output_array`. Type: string. Specifies a single array as the output + activations. Incompatible with `--output_arrays`. +* `--output_arrays`. Type: comma-separated list of strings. Specifies a list + of arrays as the output activations, for models with multiple outputs. + Incompatible with `--output_array`. +* `--input_array`. Type: string. Specifies a single array as the input + activations. Incompatible with `--input_arrays`. +* `--input_arrays`. Type: comma-separated list of strings. Specifies a list of + arrays as the input activations, for models with multiple inputs. + Incompatible with `--input_array`. + +When `--input_array` is used, the following flags are available to provide +additional information about the single input array: + +* `--input_shape`. Type: comma-separated list of integers. Specifies the shape + of the input array, in TensorFlow convention: starting with the outer-most + dimension (the dimension corresponding to the largest offset stride in the + array layout), ending with the inner-most dimension (the dimension along + which array entries are typically laid out contiguously in memory). + * For example, a typical vision model might pass + `--input_shape=1,60,80,3`, meaning a batch size of 1 (no batching), an + input image height of 60, an input image width of 80, and an input image + depth of 3, for the typical case where the input image is a RGB bitmap + (3 channels, depth=3) stored by horizontal scanlines (so 'width' is the + next innermost dimension after 'depth'). +* `--mean_value` and `--std_value`. Type: floating-point. The decimal point + character is always the dot (`.`) regardless of the locale. These specify + the (de-)quantization parameters of the input array, to use when the output + file will take a quantized input array (that is, when passing + `--input_type=QUANTIZED_UINT8`). + * The meaning of mean_value and std_value is as follows: each quantized + value in the quantized input array will be interpreted as a mathematical + real number (i.e. as an input activation value) according to the + following formula: + * `real_value = (quantized_input_value - mean_value) / std_value`. + * When performing float inference (`--inference_type=FLOAT`) on a + quantized input, the quantized input would be immediately dequantized by + the inference code according to the above formula, before proceeding + with float inference. + * When performing quantized inference + (`--inference_type=QUANTIZED_UINT8`), no dequantization is ever to be + performed by the inference code; however, the quantization parameters of + all arrays, including those of the input arrays as specified by + mean_value and std_value, all participate in the determination of the + fixed-point multipliers used in the quantized inference code. + +When `--input_arrays` is used, the following flags are available to provide +additional information about the multiple input arrays: + +* `--input_shapes`. Type: colon-separated list of comma-separated lists of + integers. Each comma-separated list of integer gives the shape of one of the + input arrays specified in `--input_arrays`, in the same order. See + `--input_shape` for details. + * Example: `--input_arrays=foo,bar --input_shapes=2,3:4,5,6` means that + there are two input arrays. The first one, "foo", has shape [2,3]. The + second one, "bar", has shape [4,5,6]. +* `--mean_values`, `--std_values`. Type: comma-separated lists of + floating-point numbers. Each number gives the corresponding value for one of + the input arrays specified in `--input_arrays`, in the same order. See + `--mean_value`, `--std_value` for details. + +### Transformation flags + +* `--input_type`. Type: string. Specifies what should be the type of the + entries in the input array(s) in the output file, after transformations, for + those input arrays that are originally either floating-point or quantized + real numbers in the input file. If there are multiple such input arrays, + then they all use this type. Input arrays of other types, such as arrays of + plain integers or strings, are not concerned with this flag. Allowed values: + * `FLOAT` — Keep floating-point input arrays as such. Dequantize any + quantized input array. entries ("float32"). + * `QUANTIZED_UINT8` — Quantize floating-point input arrays, to have + 8-bit unsigned integer entries. The quantization params are specified by + `--mean_value`, `--std_value` flags as explained in the documentation of + these flags. +* `--inference_type`. Type: string. Specifies what to do with floating-point + arrays found in the input file, besides input arrays. In other words, this + controls the possible quantization of floating-point weights, intermediate + activations, and output activations. Has no effect on arrays that aren't + floating-point in the input file. Allowed values: + * `FLOAT` — Keep floating-point arrays as floating-point in the + output file. This corresponds to what is commonly called "floating-point + inference". + * `QUANTIZED_UINT8` — Quantize floating-point arrays, changing their + storage data type from float to some integer type: + * All float activations are quantized as `uint8`. + * Almost all float weights are quantized as `uint8`. + * A few exceptions exist. In particular, the bias-vectors in + "Conv" and "FullyConnected" layers are quantized as `int32` + instead for technical reasons. +* `--default_ranges_min`, `--default_ranges_max`. Type: floating-point. The + decimal point character is always the dot (`.`) regardless of the locale. + These flags enable what is called "dummy quantization". If defined, their + effect is to define fallback (min, max) range values for all arrays that do + not have a properly specified (min, max) range in the input file, thus + allowing to proceed with quantization of non-quantized or + incorrectly-quantized input files. This enables easy performance prototyping + ("how fast would my model run if I quantized it?") but should never be used + in production as the resulting quantized arithmetic is inaccurate. +* `--drop_fake_quant`. Type: boolean. Default: false. Causes fake-quantization + nodes to be dropped from the graph. This may be used to recover a plain + float graph from a fake-quantized graph. +* `--reorder_across_fake_quant`. Type: boolean. Default: false. Normally, + fake-quantization nodes must be strict boundaries for graph transformations, + in order to ensure that quantized inference has the exact same arithmetic + behavior as quantized training --- which is the whole point of quantized + training and of FakeQuant nodes in the first place. However, that entails + subtle requirements on where exactly FakeQuant nodes must be placed in the + graph. Some quantized graphs have FakeQuant nodes at unexpected locations, + that prevent graph transformations that are necessary in order to generate a + well-formed quantized representation of these graphs. Such graphs should be + fixed, but as a temporary work-around, setting this + reorder_across_fake_quant flag allows the converter to perform necessary + graph transformaitons on them, at the cost of no longer faithfully matching + inference and training arithmetic. + +### Logging flags + +The following are standard Google logging flags: + +* `--logtostderr` redirects Google logging to standard error, typically making + it visible in a terminal. +* `--v` sets verbose logging levels (for debugging purposes). Defined levels: + * `--v=1`: log all graph transformations that did make a change on the + graph. + * `--v=2`: log all graph transformations that did *not* make a change on + the graph. + +The following flags allow to generate graph visualizations of the actual graph +at various points during transformations: + +* `--dump_graphviz=/path` enables dumping of the graphs at various stages of + processing as GraphViz `.dot` files. Generally preferred over + `--output_format=GRAPHVIZ_DOT` as this allows you to keep your actually + relevant `--output_format`. +* `--dump_graphviz_video` enables dumping of the graph after every single + graph transformation (for debugging purposes). diff --git a/tensorflow/contrib/lite/toco/g3doc/python_api.md b/tensorflow/contrib/lite/toco/g3doc/python_api.md new file mode 100644 index 00000000000..440f9c367c2 --- /dev/null +++ b/tensorflow/contrib/lite/toco/g3doc/python_api.md @@ -0,0 +1,62 @@ +# TensorFlow Lite Optimizing Converter (TOCO) Python API reference + +## High-level overview + +While the TensorFlow Lite Optimizing Converter can be used from the command +line, it is often convenient to use it as part of Python model build and +training script. This is so that conversion can be part of your model +development pipeline. This allows you to know early and often that you are +designing a model that can be targeted to devices with mobile. + +## API + +In Python you can run `help(tf.contrib.lite)` to get documentation on functions. +In particular, `tf.contrib.lite.toco_convert` presents a simple API and +`tf.contrib.lite.toco_from_protos` allows more detailed control of TOCO using +the protobuf interface to TOCO. + +## Example + +In particular, here we show creating a simple model and converting it to a +TensorFlow Lite Model. + +```python +import tensorflow as tf + +img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3)) +val = img + tf.constant([1., 2., 3.]) + tf.constant([1., 4., 4.]) +out = tf.identity(val, name="out") +with tf.Session() as sess: + tflite_model = tf.contrib.lite.toco_convert(sess.graph_def, [img], [out]) + open("test.tflite", "wb").write(tflite_modeL) +``` + +**NOTE** Currently, the TOCO command will cause a fatal error to the Python +interpreter when TOCO conversion fails. This will be remedied as soon as +possible. + +## Example 2: Export with variables + +If a model has variables, they need to be turned into constants. This process is +known as freezing, and it can actually be accomplished with + +```python +import tensorflow as tf + +img = tf.placeholder(name="img", dtype=tf.float32, shape=(1, 64, 64, 3)) +var = tf.get_variable("weights", dtype=tf.float32, shape=(1,64,64,3)) +val = img + var + +def canonical_name(x): + return x.name.split(":")[0] + +out = tf.identity(val, name="out") +with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + out_tensors = [out] + frozen_graphdef = tf.graph_util.convert_variables_to_constants( + sess, sess.graph_def, map(canonical_name, out_tensors)) + tflite_model = tf.contrib.lite.toco_convert( + frozen_graphdef, [img], out_tensors) + open("converted_model.tflite", "wb").write(tflite_model) +``` diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops.py b/tensorflow/contrib/metrics/python/ops/metric_ops.py index 24692ff12fb..6e2190cb7af 100644 --- a/tensorflow/contrib/metrics/python/ops/metric_ops.py +++ b/tensorflow/contrib/metrics/python/ops/metric_ops.py @@ -60,61 +60,6 @@ def _safe_div(numerator, denominator, name): name=name) -# TODO(ptucker): Move this somewhere common, to share with ops/losses/losses.py. -def _assert_weights_rank(weights, values): - """`weights` rank must be either `0`, or the same as 'values'.""" - return check_ops.assert_rank_in(weights, (0, array_ops.rank(values))) - - -def _count_condition(values, - weights=None, - metrics_collections=None, - updates_collections=None): - """Sums the weights of cases where the given values are True. - - If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. - - Args: - values: A `bool` `Tensor` of arbitrary size. - weights: Optional `Tensor` whose rank is either 0, or the same rank as - `values`, and must be broadcastable to `values` (i.e., all dimensions - must be either `1`, or the same as the corresponding `values` - dimension). - metrics_collections: An optional list of collections that the metric - value variable should be added to. - updates_collections: An optional list of collections that the metric update - ops should be added to. - - Returns: - value_tensor: A `Tensor` representing the current value of the metric. - update_op: An operation that accumulates the error from a batch of data. - - Raises: - ValueError: If `weights` is not `None` and its shape doesn't match `values`, - or if either `metrics_collections` or `updates_collections` are not a list - or tuple. - """ - check_ops.assert_type(values, dtypes.bool) - count_ = metrics_impl.metric_variable([], dtypes.float32, name='count') - - values = math_ops.to_float(values) - if weights is not None: - weights = math_ops.to_float(weights) - with ops.control_dependencies((_assert_weights_rank(weights, values),)): - values = math_ops.multiply(values, weights) - - value_tensor = array_ops.identity(count_) - update_op = state_ops.assign_add(count_, math_ops.reduce_sum(values)) - - if metrics_collections: - ops.add_to_collections(metrics_collections, value_tensor) - - if updates_collections: - ops.add_to_collections(updates_collections, update_op) - - return value_tensor, update_op - - def streaming_true_positives(predictions, labels, weights=None, @@ -194,17 +139,13 @@ def streaming_true_negatives(predictions, either `metrics_collections` or `updates_collections` are not a list or tuple. """ - with variable_scope.variable_scope(name, 'true_negatives', - (predictions, labels, weights)): - - predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access - predictions=math_ops.cast(predictions, dtype=dtypes.bool), - labels=math_ops.cast(labels, dtype=dtypes.bool), - weights=weights) - is_true_negative = math_ops.logical_and( - math_ops.equal(labels, False), math_ops.equal(predictions, False)) - return _count_condition(is_true_negative, weights, metrics_collections, - updates_collections) + return metrics.true_negatives( + predictions=predictions, + labels=labels, + weights=weights, + metrics_collections=metrics_collections, + updates_collections=updates_collections, + name=name) def streaming_false_positives(predictions, @@ -294,34 +235,6 @@ def streaming_false_negatives(predictions, name=name) -# TODO(ptucker): Move this somewhere common, to share with ops/losses/losses.py. -def _broadcast_weights(weights, values): - """Broadcast `weights` to the same shape as `values`. - - This returns a version of `weights` following the same broadcast rules as - `mul(weights, values)`. When computing a weighted average, use this function - to broadcast `weights` before summing them; e.g., - `reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`. - - Args: - weights: `Tensor` whose rank is either 0, or the same rank as `values`, and - must be broadcastable to `values` (i.e., all dimensions must be either - `1`, or the same as the corresponding `values` dimension). - values: `Tensor` of any shape. - - Returns: - `weights` broadcast to `values` shape. - """ - with ops.name_scope(None, 'broadcast_weights', (values, weights)) as scope: - weights_shape = weights.get_shape() - values_shape = values.get_shape() - if (weights_shape.is_fully_defined() and values_shape.is_fully_defined() and - weights_shape.is_compatible_with(values_shape)): - return weights - with ops.control_dependencies((_assert_weights_rank(weights, values),)): - return math_ops.multiply(weights, array_ops.ones_like(values), name=scope) - - def streaming_mean(values, weights=None, metrics_collections=None, @@ -423,8 +336,10 @@ def streaming_mean_tensor(values, updates_collections=updates_collections, name=name) -@deprecated(None, "Please switch to tf.metrics.accuracy. Note that the order " - "of the inputs of labels and predictions have been switched.") + +@deprecated( + None, 'Please switch to tf.metrics.accuracy. Note that the order of the ' + 'labels and predictions arguments has been switched.') def streaming_accuracy(predictions, labels, weights=None, @@ -592,53 +507,6 @@ def streaming_recall(predictions, name=name) -def _true_negatives(labels, - predictions, - weights=None, - metrics_collections=None, - updates_collections=None, - name=None): - """Sum the weights of true negatives. - - If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. - - Args: - labels: The ground truth values, a `Tensor` whose dimensions must match - `predictions`. Will be cast to `bool`. - predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will - be cast to `bool`. - weights: Optional `Tensor` whose rank is either 0, or the same rank as - `labels`, and must be broadcastable to `labels` (i.e., all dimensions must - be either `1`, or the same as the corresponding `labels` dimension). - metrics_collections: An optional list of collections that the metric - value variable should be added to. - updates_collections: An optional list of collections that the metric update - ops should be added to. - name: An optional variable_scope name. - - Returns: - value_tensor: A `Tensor` representing the current value of the metric. - update_op: An operation that accumulates the error from a batch of data. - - Raises: - ValueError: If `predictions` and `labels` have mismatched shapes, or if - `weights` is not `None` and its shape doesn't match `predictions`, or if - either `metrics_collections` or `updates_collections` are not a list or - tuple. - """ - with variable_scope.variable_scope(name, 'true_negatives', - (predictions, labels, weights)): - - predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access - predictions=math_ops.cast(predictions, dtype=dtypes.bool), - labels=math_ops.cast(labels, dtype=dtypes.bool), - weights=weights) - is_true_negative = math_ops.logical_and( - math_ops.equal(labels, False), math_ops.equal(predictions, False)) - return _count_condition(is_true_negative, weights, metrics_collections, - updates_collections) - - def streaming_false_positive_rate(predictions, labels, weights=None, @@ -696,16 +564,16 @@ def streaming_false_positive_rate(predictions, weights=weights) false_p, false_positives_update_op = metrics.false_positives( - labels, - predictions, - weights, + labels=labels, + predictions=predictions, + weights=weights, metrics_collections=None, updates_collections=None, name=None) - true_n, true_negatives_update_op = _true_negatives( - labels, - predictions, - weights, + true_n, true_negatives_update_op = metrics.true_negatives( + labels=labels, + predictions=predictions, + weights=weights, metrics_collections=None, updates_collections=None, name=None) @@ -1102,8 +970,10 @@ def streaming_curve_points(labels=None, return points, update_op -@deprecated(None, "Please switch to tf.metrics.auc. Note that the order of " - "the inputs of labels and predictions have been switched.") + +@deprecated( + None, 'Please switch to tf.metrics.auc. Note that the order of the ' + 'labels and predictions arguments has been switched.') def streaming_auc(predictions, labels, weights=None, @@ -1636,9 +1506,10 @@ def streaming_sensitivity_at_specificity(predictions, updates_collections=updates_collections, name=name) + @deprecated( - None, "Please switch to tf.metrics.precision_at_thresholds. Note that the " - "order of of the inputs of labels and predictions have been switched.") + None, 'Please switch to tf.metrics.precision_at_thresholds. Note that the ' + 'order of the labels and predictions arguments has been switched.') def streaming_precision_at_thresholds(predictions, labels, thresholds, @@ -1697,9 +1568,10 @@ def streaming_precision_at_thresholds(predictions, updates_collections=updates_collections, name=name) + @deprecated( - None, "Please switch to tf.metrics.recall_at_thresholds. Note that the " - "order of of the inputs of labels and predictions have been switched.") + None, 'Please switch to tf.metrics.recall_at_thresholds. Note that the ' + 'order of the labels and predictions arguments has been switched.') def streaming_recall_at_thresholds(predictions, labels, thresholds, @@ -1909,8 +1781,8 @@ def _at_k_name(name, k=None, class_id=None): return name -@deprecated("2016-11-08", "Please use `streaming_sparse_recall_at_k`, " - "and reshape labels from [batch_size] to [batch_size, 1].") +@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, ' + 'and reshape labels from [batch_size] to [batch_size, 1].') def streaming_recall_at_k(predictions, labels, k, @@ -2543,7 +2415,8 @@ def streaming_sparse_average_precision_at_top_k(top_k_predictions, updates_collections=updates_collections, name=name) -@deprecated(None, "Please switch to tf.metrics.mean.") + +@deprecated(None, 'Please switch to tf.metrics.mean.') def streaming_mean_absolute_error(predictions, labels, weights=None, diff --git a/tensorflow/python/feature_column/feature_column.py b/tensorflow/python/feature_column/feature_column.py index 190a25d4d79..5ff75162468 100644 --- a/tensorflow/python/feature_column/feature_column.py +++ b/tensorflow/python/feature_column/feature_column.py @@ -233,6 +233,8 @@ def input_layer(features, ordered_columns = [] for column in sorted(feature_columns, key=lambda x: x.name): ordered_columns.append(column) + # TODO(b/67952670): Implement a column._var_scope_name property and use + # that instead of column.name. with variable_scope.variable_scope(None, default_name=column.name): tensor = column._get_dense_tensor( # pylint: disable=protected-access builder, @@ -340,6 +342,8 @@ def linear_model(features, ordered_columns = [] builder = _LazyBuilder(features) for column in sorted(feature_columns, key=lambda x: x.name): + # TODO(b/67952670): Implement a column._var_scope_name property and use + # that instead of column.name. with variable_scope.variable_scope(None, default_name=column.name): ordered_columns.append(column) if isinstance(column, _CategoricalColumn): @@ -489,15 +493,36 @@ def embedding_column( representation (e.g., to feed to a DNN). Inputs must be a `_CategoricalColumn` created by any of the - `categorical_column_*` function. Here is an example embedding of an identity - column for a DNN model: + `categorical_column_*` function. Here is an example of using + `embedding_column` with `DNNClassifier`: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] - features = tf.parse_example(..., features=make_parse_example_spec(columns)) - dense_tensor = input_layer(features, columns) + + estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) + + label_column = ... + def input_fn(): + features = tf.parse_example( + ..., features=make_parse_example_spec(columns + [label_column])) + labels = features.pop(label_column.name) + return features, labels + + estimator.train(input_fn=input_fn, steps=100) + ``` + + Here is an example using `embedding_column` with model_fn: + + ```python + def model_fn(features, ...): + video_id = categorical_column_with_identity( + key='video_id', num_buckets=1000000, default_value=0) + columns = [embedding_column(video_id, 9),...] + dense_tensor = input_layer(features, columns) + # Form DNN layers, calculate loss, and return EstimatorSpec. + ... ``` Args: @@ -551,12 +576,144 @@ def embedding_column( dimension=dimension, combiner=combiner, initializer=initializer, + shared_embedding_collection_name=None, ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt, max_norm=max_norm, trainable=trainable) +def _shared_embedding_columns( + categorical_columns, dimension, combiner='mean', initializer=None, + shared_embedding_collection_name=None, ckpt_to_load_from=None, + tensor_name_in_ckpt=None, max_norm=None, trainable=True): + """List of `_DenseColumn`s that convert from sparse, categorical input. + + This is similar to `embedding_column`, except that that it produces a list of + embedding columns that share the same embedding weights. + + Use this when your inputs are sparse and of the same type (e.g. watched and + impression video IDs that share the same vocabulary), and you want to convert + them to a dense representation (e.g., to feed to a DNN). + + Inputs must be a list of `_CategoricalColumn` created by any of the + `categorical_column_*` function. They must all be of the same type and have + the same arguments except `key`. E.g. they can be + categorical_column_with_vocabulary_file with the same vocabulary_file. Some or + all columns could also be weighted_categorical_column. + + Here is an example embedding of two features for a DNNClassifier model: + + ```python + watched_video_id = categorical_column_with_vocabulary_file( + 'watched_video_id', video_vocabulary_file, video_vocabulary_size) + impression_video_id = categorical_column_with_vocabulary_file( + 'impression_video_id', video_vocabulary_file, video_vocabulary_size) + columns = shared_embedding_columns( + [watched_video_id, impression_video_id], dimension=10) + + estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...) + + label_column = ... + def input_fn(): + features = tf.parse_example( + ..., features=make_parse_example_spec(columns + [label_column])) + labels = features.pop(label_column.name) + return features, labels + + estimator.train(input_fn=input_fn, steps=100) + ``` + + Here is an example using `shared_embedding_columns` with model_fn: + + ```python + def model_fn(features, ...): + watched_video_id = categorical_column_with_vocabulary_file( + 'watched_video_id', video_vocabulary_file, video_vocabulary_size) + impression_video_id = categorical_column_with_vocabulary_file( + 'impression_video_id', video_vocabulary_file, video_vocabulary_size) + columns = shared_embedding_columns( + [watched_video_id, impression_video_id], dimension=10) + dense_tensor = input_layer(features, columns) + # Form DNN layers, calculate loss, and return EstimatorSpec. + ... + ``` + + Args: + categorical_columns: List of `_CategoricalColumn`s created by a + `categorical_column_with_*` function. These columns produce the sparse IDs + that are inputs to the embedding lookup. All columns must be of the same + type and have the same arguments except `key`. E.g. they can be + categorical_column_with_vocabulary_file with the same vocabulary_file. + Some or all columns could also be weighted_categorical_column. + dimension: An integer specifying dimension of the embedding, must be > 0. + combiner: A string specifying how to reduce if there are multiple entries + in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with + 'mean' the default. 'sqrtn' often achieves good accuracy, in particular + with bag-of-words columns. Each of this can be thought as example level + normalizations on the column. For more information, see + `tf.embedding_lookup_sparse`. + initializer: A variable initializer function to be used in embedding + variable initialization. If not specified, defaults to + `tf.truncated_normal_initializer` with mean `0.0` and standard deviation + `1/sqrt(dimension)`. + shared_embedding_collection_name: Optional name of the collection where + shared embedding weights are added. If not given, a reasonable name will + be chosen based on the names of `categorical_columns`. + ckpt_to_load_from: String representing checkpoint name/pattern from which to + restore column weights. Required if `tensor_name_in_ckpt` is not `None`. + tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from + which to restore the column weights. Required if `ckpt_to_load_from` is + not `None`. + max_norm: If not `None`, embedding values are l2-normalized to this value. + trainable: Whether or not the embedding is trainable. Default is True. + + Returns: + A list of `_DenseColumn`s that converts from sparse input. The order of + results follows the ordering of `categorical_columns`. + + Raises: + ValueError: if `dimension` not > 0. + ValueError: if any of the given `categorical_columns` is of different type + or has different arguments than the others. + ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` + is specified. + ValueError: if `initializer` is specified and is not callable. + """ + if (dimension is None) or (dimension < 1): + raise ValueError('Invalid dimension {}.'.format(dimension)) + if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): + raise ValueError('Must specify both `ckpt_to_load_from` and ' + '`tensor_name_in_ckpt` or none of them.') + + if (initializer is not None) and (not callable(initializer)): + raise ValueError('initializer must be callable if specified.') + if initializer is None: + initializer = init_ops.truncated_normal_initializer( + mean=0.0, stddev=1 / math.sqrt(dimension)) + # TODO(b/67952670): Validate categorical_columns. + if not shared_embedding_collection_name: + # Sort the columns so the name is deterministic even if the user passes + # columns from an unsorted collection, such as dict.values(). + sorted_columns = sorted(categorical_columns, key=lambda x: x.name) + shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns) + shared_embedding_collection_name += '_shared_embedding' + + result = [] + for column in categorical_columns: + result.append(_EmbeddingColumn( + categorical_column=column, + dimension=dimension, + combiner=combiner, + initializer=initializer, + shared_embedding_collection_name=shared_embedding_collection_name, + ckpt_to_load_from=ckpt_to_load_from, + tensor_name_in_ckpt=tensor_name_in_ckpt, + max_norm=max_norm, + trainable=trainable)) + return result + + def numeric_column(key, shape=(1,), default_value=None, @@ -1847,14 +2004,18 @@ class _EmbeddingColumn( _DenseColumn, collections.namedtuple('_EmbeddingColumn', ( 'categorical_column', 'dimension', 'combiner', 'initializer', - 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable' + 'shared_embedding_collection_name', 'ckpt_to_load_from', + 'tensor_name_in_ckpt', 'max_norm', 'trainable' ))): - """See `_embedding_column`.""" + """See `embedding_column`.""" @property def name(self): if not hasattr(self, '_name'): - self._name = '{}_embedding'.format(self.categorical_column.name) + if self.shared_embedding_collection_name: + self._name = '{}_shared_embedding'.format(self.categorical_column.name) + else: + self._name = '{}_embedding'.format(self.categorical_column.name) return self._name @property @@ -1877,14 +2038,47 @@ class _EmbeddingColumn( sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor - # Create embedding weight, and restore from checkpoint if necessary. - embedding_weights = variable_scope.get_variable( - name='embedding_weights', - shape=(self.categorical_column._num_buckets, self.dimension), # pylint: disable=protected-access - dtype=dtypes.float32, - initializer=self.initializer, - trainable=self.trainable and trainable, - collections=weight_collections) + embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access + if self.shared_embedding_collection_name: + shared_embedding_collection = ops.get_collection( + self.shared_embedding_collection_name) + if shared_embedding_collection: + if len(shared_embedding_collection) > 1: + raise ValueError( + 'Collection {} can only contain one variable. ' + 'Suggested fix A: Choose a unique name for this collection. ' + 'Suggested fix B: Do not add any variables to this collection. ' + 'The feature_column library already adds a variable under the ' + 'hood.'.format(shared_embedding_collection)) + embedding_weights = shared_embedding_collection[0] + if embedding_weights.shape != embedding_shape: + raise ValueError( + 'Shared embedding collection {} contains variable {} of ' + 'unexpected shape {}. Expected shape is {}. ' + 'Suggested fix A: Choose a unique name for this collection. ' + 'Suggested fix B: Do not add any variables to this collection. ' + 'The feature_column library already adds a variable under the ' + 'hood.'.format( + self.shared_embedding_collection_name, embedding_weights.name, + embedding_weights.shape, embedding_shape)) + else: + embedding_weights = variable_scope.get_variable( + name=self.shared_embedding_collection_name + '_weights', + shape=embedding_shape, + dtype=dtypes.float32, + initializer=self.initializer, + trainable=self.trainable and trainable, + collections=weight_collections) + ops.add_to_collection( + self.shared_embedding_collection_name, embedding_weights) + else: + embedding_weights = variable_scope.get_variable( + name='embedding_weights', + shape=embedding_shape, + dtype=dtypes.float32, + initializer=self.initializer, + trainable=self.trainable and trainable, + collections=weight_collections) if self.ckpt_to_load_from is not None: to_restore = embedding_weights if isinstance(to_restore, variables.PartitionedVariable): diff --git a/tensorflow/python/feature_column/feature_column_test.py b/tensorflow/python/feature_column/feature_column_test.py index e57e9a9836c..4b06a85ad34 100644 --- a/tensorflow/python/feature_column/feature_column_test.py +++ b/tensorflow/python/feature_column/feature_column_test.py @@ -27,6 +27,7 @@ from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.client import session from tensorflow.python.estimator.inputs import numpy_io +from tensorflow.python.feature_column import feature_column as fc_lib from tensorflow.python.feature_column import feature_column_lib as fc from tensorflow.python.feature_column.feature_column import _CategoricalColumn from tensorflow.python.feature_column.feature_column import _DenseColumn @@ -3403,6 +3404,7 @@ class EmbeddingColumnTest(test.TestCase): self.assertEqual('mean', embedding_column.combiner) self.assertIsNotNone(embedding_column.initializer) self.assertIsNone(embedding_column.ckpt_to_load_from) + self.assertIsNone(embedding_column.shared_embedding_collection_name) self.assertIsNone(embedding_column.tensor_name_in_ckpt) self.assertIsNone(embedding_column.max_norm) self.assertTrue(embedding_column.trainable) @@ -3426,6 +3428,7 @@ class EmbeddingColumnTest(test.TestCase): self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_initializer', embedding_column.initializer()) + self.assertIsNone(embedding_column.shared_embedding_collection_name) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) @@ -3456,6 +3459,7 @@ class EmbeddingColumnTest(test.TestCase): self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_initializer', embedding_column.initializer()) + self.assertIsNone(embedding_column.shared_embedding_collection_name) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) @@ -3979,6 +3983,269 @@ class EmbeddingColumnTest(test.TestCase): self.assertAllEqual(expected_lookups, input_layer.eval()) +class SharedEmbeddingColumnTest(test.TestCase): + + def test_defaults(self): + categorical_column_a = fc.categorical_column_with_identity( + key='aaa', num_buckets=3) + categorical_column_b = fc.categorical_column_with_identity( + key='bbb', num_buckets=3) + embedding_dimension = 2 + embedding_column_b, embedding_column_a = fc_lib._shared_embedding_columns( + [categorical_column_b, categorical_column_a], + dimension=embedding_dimension) + self.assertIs(categorical_column_a, embedding_column_a.categorical_column) + self.assertIs(categorical_column_b, embedding_column_b.categorical_column) + self.assertEqual(embedding_dimension, embedding_column_a.dimension) + self.assertEqual(embedding_dimension, embedding_column_b.dimension) + self.assertEqual('mean', embedding_column_a.combiner) + self.assertEqual('mean', embedding_column_b.combiner) + self.assertIsNotNone(embedding_column_a.initializer) + self.assertIsNotNone(embedding_column_b.initializer) + self.assertIsNone(embedding_column_a.ckpt_to_load_from) + self.assertIsNone(embedding_column_b.ckpt_to_load_from) + self.assertEqual('aaa_bbb_shared_embedding', + embedding_column_a.shared_embedding_collection_name) + self.assertEqual('aaa_bbb_shared_embedding', + embedding_column_b.shared_embedding_collection_name) + self.assertIsNone(embedding_column_a.tensor_name_in_ckpt) + self.assertIsNone(embedding_column_b.tensor_name_in_ckpt) + self.assertIsNone(embedding_column_a.max_norm) + self.assertIsNone(embedding_column_b.max_norm) + self.assertTrue(embedding_column_a.trainable) + self.assertTrue(embedding_column_b.trainable) + self.assertEqual('aaa_shared_embedding', embedding_column_a.name) + self.assertEqual('bbb_shared_embedding', embedding_column_b.name) + self.assertEqual( + (embedding_dimension,), embedding_column_a._variable_shape) + self.assertEqual( + (embedding_dimension,), embedding_column_b._variable_shape) + self.assertEqual({ + 'aaa': parsing_ops.VarLenFeature(dtypes.int64) + }, embedding_column_a._parse_example_spec) + self.assertEqual({ + 'bbb': parsing_ops.VarLenFeature(dtypes.int64) + }, embedding_column_b._parse_example_spec) + + def test_all_constructor_args(self): + categorical_column_a = fc.categorical_column_with_identity( + key='aaa', num_buckets=3) + categorical_column_b = fc.categorical_column_with_identity( + key='bbb', num_buckets=3) + embedding_dimension = 2 + embedding_column_a, embedding_column_b = fc_lib._shared_embedding_columns( + [categorical_column_a, categorical_column_b], + dimension=embedding_dimension, + combiner='my_combiner', + initializer=lambda: 'my_initializer', + shared_embedding_collection_name='shared_embedding_collection_name', + ckpt_to_load_from='my_ckpt', + tensor_name_in_ckpt='my_ckpt_tensor', + max_norm=42., + trainable=False) + self.assertIs(categorical_column_a, embedding_column_a.categorical_column) + self.assertIs(categorical_column_b, embedding_column_b.categorical_column) + self.assertEqual(embedding_dimension, embedding_column_a.dimension) + self.assertEqual(embedding_dimension, embedding_column_b.dimension) + self.assertEqual('my_combiner', embedding_column_a.combiner) + self.assertEqual('my_combiner', embedding_column_b.combiner) + self.assertEqual('my_initializer', embedding_column_a.initializer()) + self.assertEqual('my_initializer', embedding_column_b.initializer()) + self.assertEqual('shared_embedding_collection_name', + embedding_column_a.shared_embedding_collection_name) + self.assertEqual('shared_embedding_collection_name', + embedding_column_b.shared_embedding_collection_name) + self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from) + self.assertEqual('my_ckpt', embedding_column_b.ckpt_to_load_from) + self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt) + self.assertEqual('my_ckpt_tensor', embedding_column_b.tensor_name_in_ckpt) + self.assertEqual(42., embedding_column_a.max_norm) + self.assertEqual(42., embedding_column_b.max_norm) + self.assertFalse(embedding_column_a.trainable) + self.assertFalse(embedding_column_b.trainable) + self.assertEqual('aaa_shared_embedding', embedding_column_a.name) + self.assertEqual('bbb_shared_embedding', embedding_column_b.name) + self.assertEqual( + (embedding_dimension,), embedding_column_a._variable_shape) + self.assertEqual( + (embedding_dimension,), embedding_column_b._variable_shape) + self.assertEqual({ + 'aaa': parsing_ops.VarLenFeature(dtypes.int64) + }, embedding_column_a._parse_example_spec) + self.assertEqual({ + 'bbb': parsing_ops.VarLenFeature(dtypes.int64) + }, embedding_column_b._parse_example_spec) + + def test_deep_copy(self): + categorical_column_a = fc.categorical_column_with_identity( + key='aaa', num_buckets=3) + categorical_column_b = fc.categorical_column_with_identity( + key='bbb', num_buckets=3) + embedding_dimension = 2 + original_a, _ = fc_lib._shared_embedding_columns( + [categorical_column_a, categorical_column_b], + dimension=embedding_dimension, + combiner='my_combiner', + initializer=lambda: 'my_initializer', + shared_embedding_collection_name='shared_embedding_collection_name', + ckpt_to_load_from='my_ckpt', + tensor_name_in_ckpt='my_ckpt_tensor', + max_norm=42., trainable=False) + for embedding_column_a in (original_a, copy.deepcopy(original_a)): + self.assertEqual('aaa', embedding_column_a.categorical_column.name) + self.assertEqual(3, embedding_column_a.categorical_column._num_buckets) + self.assertEqual({ + 'aaa': parsing_ops.VarLenFeature(dtypes.int64) + }, embedding_column_a.categorical_column._parse_example_spec) + + self.assertEqual(embedding_dimension, embedding_column_a.dimension) + self.assertEqual('my_combiner', embedding_column_a.combiner) + self.assertEqual('my_initializer', embedding_column_a.initializer()) + self.assertEqual('shared_embedding_collection_name', + embedding_column_a.shared_embedding_collection_name) + self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from) + self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt) + self.assertEqual(42., embedding_column_a.max_norm) + self.assertFalse(embedding_column_a.trainable) + self.assertEqual('aaa_shared_embedding', embedding_column_a.name) + self.assertEqual( + (embedding_dimension,), embedding_column_a._variable_shape) + self.assertEqual({ + 'aaa': parsing_ops.VarLenFeature(dtypes.int64) + }, embedding_column_a._parse_example_spec) + + def test_invalid_initializer(self): + categorical_column_a = fc.categorical_column_with_identity( + key='aaa', num_buckets=3) + categorical_column_b = fc.categorical_column_with_identity( + key='bbb', num_buckets=3) + with self.assertRaisesRegexp(ValueError, 'initializer must be callable'): + fc_lib._shared_embedding_columns( + [categorical_column_a, categorical_column_b], dimension=2, + initializer='not_fn') + + def test_parse_example(self): + a = fc.categorical_column_with_vocabulary_list( + key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) + b = fc.categorical_column_with_vocabulary_list( + key='bbb', vocabulary_list=('omar', 'stringer', 'marlo')) + a_embedded, b_embedded = fc_lib._shared_embedding_columns( + [a, b], dimension=2) + data = example_pb2.Example(features=feature_pb2.Features( + feature={ + 'aaa': + feature_pb2.Feature(bytes_list=feature_pb2.BytesList( + value=[b'omar', b'stringer'])), + 'bbb': + feature_pb2.Feature(bytes_list=feature_pb2.BytesList( + value=[b'stringer', b'marlo'])), + })) + features = parsing_ops.parse_example( + serialized=[data.SerializeToString()], + features=fc.make_parse_example_spec([a_embedded, b_embedded])) + self.assertIn('aaa', features) + self.assertIn('bbb', features) + with self.test_session(): + _assert_sparse_tensor_value( + self, + sparse_tensor.SparseTensorValue( + indices=[[0, 0], [0, 1]], + values=np.array([b'omar', b'stringer'], dtype=np.object_), + dense_shape=[1, 2]), + features['aaa'].eval()) + _assert_sparse_tensor_value( + self, + sparse_tensor.SparseTensorValue( + indices=[[0, 0], [0, 1]], + values=np.array([b'stringer', b'marlo'], dtype=np.object_), + dense_shape=[1, 2]), + features['bbb'].eval()) + + def test_input_layer(self): + # Inputs. + vocabulary_size = 3 + sparse_input_a = sparse_tensor.SparseTensorValue( + # example 0, ids [2] + # example 1, ids [0, 1] + # example 2, ids [] + # example 3, ids [1] + indices=((0, 0), (1, 0), (1, 4), (3, 0)), + values=(2, 0, 1, 1), + dense_shape=(4, 5)) + sparse_input_b = sparse_tensor.SparseTensorValue( + # example 0, ids [0] + # example 1, ids [] + # example 2, ids [] + # example 3, ids [1] + indices=((0, 0), (3, 0)), + values=(0, 1), + dense_shape=(4, 5)) + + # Embedding variable. + embedding_dimension = 2 + embedding_values = ( + (1., 2.), # id 0 + (3., 5.), # id 1 + (7., 11.) # id 2 + ) + def _initializer(shape, dtype, partition_info): + self.assertAllEqual((vocabulary_size, embedding_dimension), shape) + self.assertEqual(dtypes.float32, dtype) + self.assertIsNone(partition_info) + return embedding_values + + # Expected lookup result, using combiner='mean'. + expected_lookups = ( + # example 0: + # A ids [2], embedding = [7, 11] + # B ids [0], embedding = [1, 2] + (7., 11., 1., 2.), + # example 1: + # A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] + # B ids [], embedding = [0, 0] + (2., 3.5, 0., 0.), + # example 2: + # A ids [], embedding = [0, 0] + # B ids [], embedding = [0, 0] + (0., 0., 0., 0.), + # example 3: + # A ids [1], embedding = [3, 5] + # B ids [1], embedding = [3, 5] + (3., 5., 3., 5.), + ) + + # Build columns. + categorical_column_a = fc.categorical_column_with_identity( + key='aaa', num_buckets=vocabulary_size) + categorical_column_b = fc.categorical_column_with_identity( + key='bbb', num_buckets=vocabulary_size) + embedding_column_a, embedding_column_b = fc_lib._shared_embedding_columns( + [categorical_column_a, categorical_column_b], + dimension=embedding_dimension, initializer=_initializer) + + # Provide sparse input and get dense result. + input_layer = fc.input_layer( + features={'aaa': sparse_input_a, 'bbb': sparse_input_b}, + feature_columns=(embedding_column_b, embedding_column_a)) + + # Assert expected embedding variable and lookups. + global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) + self.assertItemsEqual( + ['input_layer/aaa_shared_embedding/aaa_bbb_shared_embedding_weights:0'], + tuple([v.name for v in global_vars])) + trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) + self.assertItemsEqual( + ['input_layer/aaa_shared_embedding/aaa_bbb_shared_embedding_weights:0'], + tuple([v.name for v in trainable_vars])) + shared_embedding_vars = ops.get_collection('aaa_bbb_shared_embedding') + self.assertItemsEqual( + ['input_layer/aaa_shared_embedding/aaa_bbb_shared_embedding_weights:0'], + tuple([v.name for v in shared_embedding_vars])) + with _initialized_session(): + self.assertAllEqual(embedding_values, trainable_vars[0].eval()) + self.assertAllEqual(expected_lookups, input_layer.eval()) + + class WeightedCategoricalColumnTest(test.TestCase): def test_defaults(self): diff --git a/tensorflow/python/kernel_tests/metrics_test.py b/tensorflow/python/kernel_tests/metrics_test.py index 971dc9d5530..3358b78efd2 100644 --- a/tensorflow/python/kernel_tests/metrics_test.py +++ b/tensorflow/python/kernel_tests/metrics_test.py @@ -3857,6 +3857,56 @@ class MeanPerClassAccuracyTest(test.TestCase): self.assertAlmostEqual(desired_mean_accuracy, mean_accuracy.eval()) +class FalseNegativesTest(test.TestCase): + + def setUp(self): + np.random.seed(1) + ops.reset_default_graph() + + def testVars(self): + metrics.false_negatives( + labels=(0, 1, 0, 1), + predictions=(0, 0, 1, 1)) + _assert_metric_variables(self, ('false_negatives/count:0',)) + + def testUnweighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + tn, tn_update_op = metrics.false_negatives( + labels=labels, predictions=predictions) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(3., tn_update_op.eval()) + self.assertAllClose(3., tn.eval()) + + def testWeighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + weights = constant_op.constant((1., 1.5, 2., 2.5)) + tn, tn_update_op = metrics.false_negatives( + labels=labels, predictions=predictions, weights=weights) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(5., tn_update_op.eval()) + self.assertAllClose(5., tn.eval()) + + class FalseNegativesAtThresholdsTest(test.TestCase): def setUp(self): @@ -3906,6 +3956,56 @@ class FalseNegativesAtThresholdsTest(test.TestCase): self.assertAllEqual((0.0, 8.0, 11.0), fn.eval()) +class FalsePositivesTest(test.TestCase): + + def setUp(self): + np.random.seed(1) + ops.reset_default_graph() + + def testVars(self): + metrics.false_positives( + labels=(0, 1, 0, 1), + predictions=(0, 0, 1, 1)) + _assert_metric_variables(self, ('false_positives/count:0',)) + + def testUnweighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + tn, tn_update_op = metrics.false_positives( + labels=labels, predictions=predictions) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(7., tn_update_op.eval()) + self.assertAllClose(7., tn.eval()) + + def testWeighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + weights = constant_op.constant((1., 1.5, 2., 2.5)) + tn, tn_update_op = metrics.false_positives( + labels=labels, predictions=predictions, weights=weights) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(14., tn_update_op.eval()) + self.assertAllClose(14., tn.eval()) + + class FalsePositivesAtThresholdsTest(test.TestCase): def setUp(self): @@ -3957,6 +4057,56 @@ class FalsePositivesAtThresholdsTest(test.TestCase): self.assertAllEqual((125.0, 42.0, 12.0), fp.eval()) +class TrueNegativesTest(test.TestCase): + + def setUp(self): + np.random.seed(1) + ops.reset_default_graph() + + def testVars(self): + metrics.true_negatives( + labels=(0, 1, 0, 1), + predictions=(0, 0, 1, 1)) + _assert_metric_variables(self, ('true_negatives/count:0',)) + + def testUnweighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + tn, tn_update_op = metrics.true_negatives( + labels=labels, predictions=predictions) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(3., tn_update_op.eval()) + self.assertAllClose(3., tn.eval()) + + def testWeighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + weights = constant_op.constant((1., 1.5, 2., 2.5)) + tn, tn_update_op = metrics.true_negatives( + labels=labels, predictions=predictions, weights=weights) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(4., tn_update_op.eval()) + self.assertAllClose(4., tn.eval()) + + class TrueNegativesAtThresholdsTest(test.TestCase): def setUp(self): @@ -4006,6 +4156,56 @@ class TrueNegativesAtThresholdsTest(test.TestCase): self.assertAllEqual((5.0, 15.0, 23.0), tn.eval()) +class TruePositivesTest(test.TestCase): + + def setUp(self): + np.random.seed(1) + ops.reset_default_graph() + + def testVars(self): + metrics.true_positives( + labels=(0, 1, 0, 1), + predictions=(0, 0, 1, 1)) + _assert_metric_variables(self, ('true_positives/count:0',)) + + def testUnweighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + tn, tn_update_op = metrics.true_positives( + labels=labels, predictions=predictions) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(7., tn_update_op.eval()) + self.assertAllClose(7., tn.eval()) + + def testWeighted(self): + labels = constant_op.constant(((0, 1, 0, 1, 0), + (0, 0, 1, 1, 1), + (1, 1, 1, 1, 0), + (0, 0, 0, 0, 1))) + predictions = constant_op.constant(((0, 0, 1, 1, 0), + (1, 1, 1, 1, 1), + (0, 1, 0, 1, 0), + (1, 1, 1, 1, 1))) + weights = constant_op.constant((1., 1.5, 2., 2.5)) + tn, tn_update_op = metrics.true_positives( + labels=labels, predictions=predictions, weights=weights) + + with self.test_session() as sess: + sess.run(variables.local_variables_initializer()) + self.assertAllClose(0., tn.eval()) + self.assertAllClose(12., tn_update_op.eval()) + self.assertAllClose(12., tn.eval()) + + class TruePositivesAtThresholdsTest(test.TestCase): def setUp(self): diff --git a/tensorflow/python/ops/metrics_impl.py b/tensorflow/python/ops/metrics_impl.py index 67caf726211..717ee1254f3 100644 --- a/tensorflow/python/ops/metrics_impl.py +++ b/tensorflow/python/ops/metrics_impl.py @@ -1511,6 +1511,56 @@ def false_positives_at_thresholds(labels, predictions, thresholds, weights=None, return values['fp'], update_ops['fp'] +def true_negatives(labels, predictions, weights=None, + metrics_collections=None, + updates_collections=None, + name=None): + """Sum the weights of true_negatives. + + If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. + + Args: + labels: The ground truth values, a `Tensor` whose dimensions must match + `predictions`. Will be cast to `bool`. + predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will + be cast to `bool`. + weights: Optional `Tensor` whose rank is either 0, or the same rank as + `labels`, and must be broadcastable to `labels` (i.e., all dimensions must + be either `1`, or the same as the corresponding `labels` dimension). + metrics_collections: An optional list of collections that the metric + value variable should be added to. + updates_collections: An optional list of collections that the metric update + ops should be added to. + name: An optional variable_scope name. + + Returns: + value_tensor: A `Tensor` representing the current value of the metric. + update_op: An operation that accumulates the error from a batch of data. + + Raises: + ValueError: If `predictions` and `labels` have mismatched shapes, or if + `weights` is not `None` and its shape doesn't match `predictions`, or if + either `metrics_collections` or `updates_collections` are not a list or + tuple. + RuntimeError: If eager execution is enabled. + """ + if context.in_eager_mode(): + raise RuntimeError('tf.metrics.true_negatives is not ' + 'supported when eager execution is enabled.') + + with variable_scope.variable_scope( + name, 'true_negatives', (predictions, labels, weights)): + + predictions, labels, weights = _remove_squeezable_dimensions( + predictions=math_ops.cast(predictions, dtype=dtypes.bool), + labels=math_ops.cast(labels, dtype=dtypes.bool), + weights=weights) + is_true_negative = math_ops.logical_and(math_ops.equal(labels, False), + math_ops.equal(predictions, False)) + return _count_condition(is_true_negative, weights, metrics_collections, + updates_collections) + + def true_negatives_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, diff --git a/tensorflow/tools/api/golden/tensorflow.metrics.pbtxt b/tensorflow/tools/api/golden/tensorflow.metrics.pbtxt index 85088834b79..e9b996c9f53 100644 --- a/tensorflow/tools/api/golden/tensorflow.metrics.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.metrics.pbtxt @@ -116,6 +116,10 @@ tf_module { name: "specificity_at_sensitivity" argspec: "args=[\'labels\', \'predictions\', \'sensitivity\', \'weights\', \'num_thresholds\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'200\', \'None\', \'None\', \'None\'], " } + member_method { + name: "true_negatives" + argspec: "args=[\'labels\', \'predictions\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], " + } member_method { name: "true_negatives_at_thresholds" argspec: "args=[\'labels\', \'predictions\', \'thresholds\', \'weights\', \'metrics_collections\', \'updates_collections\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], " From a2c3dab386857cd4fe63990c6bb3aa791e3fcaf3 Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Tue, 14 Nov 2017 11:01:11 -0800 Subject: [PATCH 003/104] Tape stack in C++ instead of python. PiperOrigin-RevId: 175704617 --- tensorflow/python/eager/pywrap_tfe.h | 47 +++++---- tensorflow/python/eager/pywrap_tfe_src.cc | 121 ++++++++++++++++++---- tensorflow/python/eager/tape.py | 121 +++------------------- tensorflow/python/pywrap_tfe.i | 15 +-- 4 files changed, 152 insertions(+), 152 deletions(-) diff --git a/tensorflow/python/eager/pywrap_tfe.h b/tensorflow/python/eager/pywrap_tfe.h index a67519f9a22..f96245f7a53 100644 --- a/tensorflow/python/eager/pywrap_tfe.h +++ b/tensorflow/python/eager/pywrap_tfe.h @@ -87,22 +87,36 @@ TFE_TensorHandle* EagerTensor_Handle(const PyObject* o); // newly created type, or nullptr on error. PyObject* TFE_Py_InitEagerTensor(PyObject* base_class); -PyObject* TFE_Py_NewTape(); -PyObject* TFE_Py_TapeShouldRecord(PyObject* py_tape, PyObject* tensors); -void TFE_Py_TapeWatch(PyObject* tape, tensorflow::int64 tensor_id); -void TFE_Py_TapeDeleteTrace(PyObject* tape, tensorflow::int64 tensor_id); +// Pushes a new tape into the thread-local stack. +void TFE_Py_TapeStackPushNew(); -// Records an operation in the gradient tape. `tape` should point to an object -// returned by TFE_Py_NewTape. op_type is a string for the operation type, used -// in the backprop code. output_tensors should be a list of python ops.Tensor -// objects. input_tensor_ids should be a list of python integers with the ids of -// the input tensors of the recorded operation. backward_function should be the -// function to be called during backprop to, given the gradients of the output -// tensors, produce the gradients of the input tensors. -void TFE_Py_TapeRecordOperation(PyObject* tape, PyObject* op_type, - PyObject* output_tensors, - PyObject* input_tensor_ids, - PyObject* backward_function); +// Pops the tape from the top of the stack and returns it. +PyObject* TFE_Py_TapeStackPop(); + +// Pushes an existing tape onto the stack. +void TFE_Py_TapeStackPush(PyObject* tape); + +// Returns true if the tape stack is empty. +PyObject* TFE_Py_TapeStackIsEmpty(); + +PyObject* TFE_Py_TapeStackShouldRecord(PyObject* tensors); +void TFE_Py_TapeStackWatch(PyObject* tensor); +void TFE_Py_TapeStackDeleteTrace(tensorflow::int64 tensor_id); + +// Records an operation in the gradient tape stack.type is a string for the +// operation type, used in the backprop code. output_tensors should be a list of +// python ops.Tensor objects. input_tensor_ids should be a list of python +// integers with the ids of the input tensors of the recorded +// operation. backward_function should be the function to be called during +// backprop to, given the gradients of the output tensors, produce the gradients +// of the input tensors. +void TFE_Py_TapeStackRecordOperation(PyObject* op_type, + PyObject* output_tensors, + PyObject* input_tensor_ids, + PyObject* backward_function); + +// Watches the given variable object on the given tape. +void TFE_Py_TapeStackWatchVariable(PyObject* variable); // Computes a gradient based on information recorded on the tape.`tape` must // have been produced by TFE_Py_NewTape. `vspace` must be a @@ -114,9 +128,6 @@ PyObject* TFE_Py_TapeGradient(PyObject* tape, PyObject* vspace, PyObject* target, PyObject* sources, PyObject* output_gradients, TF_Status* status); -// Watches the given variable object on the given tape. -void TFE_Py_TapeWatchVariable(PyObject* tape, PyObject* variable); - // Returns the set of variables watched by the given tape. PyObject* TFE_Py_TapeWatchedVariables(PyObject* tape); diff --git a/tensorflow/python/eager/pywrap_tfe_src.cc b/tensorflow/python/eager/pywrap_tfe_src.cc index 5cb1313c4b0..387eec13584 100644 --- a/tensorflow/python/eager/pywrap_tfe_src.cc +++ b/tensorflow/python/eager/pywrap_tfe_src.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include + #include "tensorflow/python/eager/pywrap_tfe.h" #include "tensorflow/c/c_api.h" @@ -525,12 +527,65 @@ static PyTypeObject TFE_Py_Tape_Type = { "TFE_Py_Tape objects", /* tp_doc */ }; -PyObject* TFE_Py_NewTape() { +// xcode 7 doesn't define thread_local, so for compatibility we implement our +// own. TODO(apassos) remove once we can deprecate xcode 7. +#ifndef __APPLE__ +thread_local std::vector* tape_stack = nullptr; +std::vector* GetTapeStack() { + if (tape_stack == nullptr) { + tape_stack = new std::vector; + } + return tape_stack; +} +#else +static tensorflow::mutex stack_mu(tensorflow::LINKER_INITIALIZED); +static std::unordered_map*>* + tape_stack GUARDED_BY(stack_mu) = nullptr; +std::vector* GetTapeStack() { + tensorflow::mutex_lock ml(stack_mu); + if (tape_stack == nullptr) { + tape_stack = + new std::unordered_map*>; + } + auto it = tape_stack->find(std::this_thread::get_id()); + if (it != tape_stack->end()) { + return it->second; + } + return tape_stack + ->emplace(std::this_thread::get_id(), new std::vector) + .first->second; +} +#endif + +void TFE_Py_TapeStackPushNew() { TFE_Py_Tape_Type.tp_new = PyType_GenericNew; - if (PyType_Ready(&TFE_Py_Tape_Type) < 0) return nullptr; + if (PyType_Ready(&TFE_Py_Tape_Type) < 0) return; TFE_Py_Tape* tape = PyObject_NEW(TFE_Py_Tape, &TFE_Py_Tape_Type); tape->tape = new GradientTape(); - return reinterpret_cast(tape); + GetTapeStack()->push_back(tape); +} + +void TFE_Py_TapeStackPush(PyObject* tape) { + Py_INCREF(tape); + GetTapeStack()->push_back(reinterpret_cast(tape)); +} + +PyObject* TFE_Py_TapeStackIsEmpty() { + if (GetTapeStack()->empty()) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + +PyObject* TFE_Py_TapeStackPop() { + auto* stack = GetTapeStack(); + if (stack->empty()) { + PyErr_SetString(PyExc_RuntimeError, "tape stack is empty."); + return nullptr; + } + TFE_Py_Tape* top = stack->back(); + stack->pop_back(); + return reinterpret_cast(top); } static std::vector MakeIntList(PyObject* list) { @@ -557,10 +612,14 @@ static std::vector MakeIntList(PyObject* list) { return tensor_ids; } -PyObject* TFE_Py_TapeShouldRecord(PyObject* py_tape, PyObject* tensors) { +PyObject* TFE_Py_TapeStackShouldRecord(PyObject* tensors) { if (tensors == Py_None) { Py_RETURN_FALSE; } + auto* stack = GetTapeStack(); + if (stack->empty()) { + Py_RETURN_FALSE; + } PyObject* seq = PySequence_Fast(tensors, "expected a sequence"); if (seq == nullptr) { return nullptr; @@ -575,16 +634,22 @@ PyObject* TFE_Py_TapeShouldRecord(PyObject* py_tape, PyObject* tensors) { tensor_ids.push_back(FastTensorId(item)); } Py_DECREF(seq); - TFE_Py_Tape* tape = reinterpret_cast(py_tape); - if (tape->tape->ShouldRecord(tensor_ids)) { - Py_RETURN_TRUE; - } else { - Py_RETURN_FALSE; + for (TFE_Py_Tape* tape : *stack) { + if (tape->tape->ShouldRecord(tensor_ids)) { + Py_RETURN_TRUE; + } } + Py_RETURN_FALSE; } -void TFE_Py_TapeWatch(PyObject* tape, tensorflow::int64 tensor_id) { - reinterpret_cast(tape)->tape->Watch(tensor_id); +void TFE_Py_TapeStackWatch(PyObject* tensor) { + tensorflow::int64 tensor_id = FastTensorId(tensor); + if (PyErr_Occurred()) { + return; + } + for (TFE_Py_Tape* tape : *GetTapeStack()) { + tape->tape->Watch(tensor_id); + } } static tensorflow::eager::TapeTensor TapeTensorFromTensor(PyObject* tensor) { @@ -646,8 +711,10 @@ std::vector MakeTensorIDList(PyObject* tensors) { return list; } -void TFE_Py_TapeWatchVariable(PyObject* tape, PyObject* variable) { - reinterpret_cast(tape)->tape->WatchVariable(variable); +void TFE_Py_TapeStackWatchVariable(PyObject* variable) { + for (TFE_Py_Tape* tape : *GetTapeStack()) { + tape->tape->WatchVariable(variable); + } } PyObject* TFE_Py_TapeWatchedVariables(PyObject* tape) { @@ -661,10 +728,14 @@ PyObject* TFE_Py_TapeWatchedVariables(PyObject* tape) { return result; } -void TFE_Py_TapeRecordOperation(PyObject* tape, PyObject* op_type, - PyObject* output_tensors, - PyObject* input_tensors, - PyObject* backward_function) { +void TFE_Py_TapeStackRecordOperation(PyObject* op_type, + PyObject* output_tensors, + PyObject* input_tensors, + PyObject* backward_function) { + auto* stack = GetTapeStack(); + if (stack->empty()) { + return; + } std::vector input_ids = MakeTensorIDList(input_tensors); std::vector output_info; PyObject* seq = PySequence_Fast(output_tensors, @@ -697,14 +768,18 @@ void TFE_Py_TapeRecordOperation(PyObject* tape, PyObject* op_type, return; } - Py_INCREF(backward_function); - reinterpret_cast(tape)->tape->RecordOperation( - op_type_str, output_info, input_ids, backward_function, - [backward_function]() { Py_DECREF(backward_function); }); + for (TFE_Py_Tape* tape : *stack) { + Py_INCREF(backward_function); + tape->tape->RecordOperation( + op_type_str, output_info, input_ids, backward_function, + [backward_function]() { Py_DECREF(backward_function); }); + } } -void TFE_Py_TapeDeleteTrace(PyObject* tape, tensorflow::int64 tensor_id) { - reinterpret_cast(tape)->tape->DeleteTrace(tensor_id); +void TFE_Py_TapeStackDeleteTrace(tensorflow::int64 tensor_id) { + for (TFE_Py_Tape* tape : *GetTapeStack()) { + tape->tape->DeleteTrace(tensor_id); + } } class PyVSpace : public tensorflow::eager::VSpace { diff --git a/tensorflow/python/eager/tape.py b/tensorflow/python/eager/tape.py index fb6b62a3e09..440c84b7ea9 100644 --- a/tensorflow/python/eager/tape.py +++ b/tensorflow/python/eager/tape.py @@ -18,106 +18,24 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import collections import contextlib -import threading from tensorflow.python import pywrap_tensorflow -def tid(tensor): - return tensor._id # pylint: disable=protected-access - - -class TapeEntry( - collections.namedtuple("TapeEntry", [ - "op_type", - "output_ids", "input_ids", "backward_function", - "output_shape_and_dtype", - ])): - """Entry in the gradient tape. - - Represents the execution of one op or function, with instructions for doing - its backward pass and useful information for it. - - Args: - output_ids: tensor_id(t) for each output tensor T - input_ids: tensor_id(t) for each input tensor T - backward_function: function to be called with the downstream gradients and - side outputs as arguments which computes the backward pass. - output_shape_and_dtype: a list of (shape_tuple, dtype) for every output - tensor_id - """ - - -def _tensor_shape(t): - return t._shape_tuple() # pylint: disable=protected-access - - class Tape(object): """Represents a gradient propagation trace.""" - def __init__(self): - self._tape = pywrap_tensorflow.TFE_Py_NewTape() - - def should_record(self, tensors): - """Returns true if any tensor should be recorded. - - Args: - tensors: some tensors. - - Returns: - True if any of the tensors is in the tape. - """ - return pywrap_tensorflow.TFE_Py_TapeShouldRecord( - self._tape, tensors) - - def watch(self, tensor): - """Adds a tensor to the tape.""" - pywrap_tensorflow.TFE_Py_TapeWatch(self._tape, tid(tensor)) - - def watch_variable(self, v): - pywrap_tensorflow.TFE_Py_TapeWatchVariable(self._tape, v) + def __init__(self, tape): + self._tape = tape def watched_variables(self): return pywrap_tensorflow.TFE_Py_TapeWatchedVariables(self._tape) - def record_operation(self, op_type, output_tensors, input_tensors, - backward_function): - """Records an operation in the tape.""" - pywrap_tensorflow.TFE_Py_TapeRecordOperation( - self._tape, - op_type, - output_tensors, - input_tensors, - backward_function) - - def _delete_tensor_id(self, i): - pywrap_tensorflow.TFE_Py_TapeDeleteTrace(self._tape, i) - - def delete_trace(self, tensor_id): - """Deletes any trace we have for this tensor.""" - self._delete_tensor_id(tensor_id) - - -class _TapeStack(threading.local): - - def __init__(self): - super(_TapeStack, self).__init__() - self._stack = [] - - @property - def stack(self): - return self._stack - - -# The global tape stack. -_tape_stack = _TapeStack() - def push_new_tape(): """Pushes a new tape onto the tape stack.""" - _tape_stack.stack.append(Tape()) + pywrap_tensorflow.TFE_Py_TapeStackPushNew() def watch(tensor): @@ -126,8 +44,7 @@ def watch(tensor): Args: tensor: tensor to be watched. """ - for t in _tape_stack.stack: - t.watch(tensor) + pywrap_tensorflow.TFE_Py_TapeStackWatch(tensor) def watch_variable(variable): @@ -136,48 +53,42 @@ def watch_variable(variable): Args: variable: variable to be watched. """ - for t in _tape_stack.stack: - t.watch_variable(variable) + pywrap_tensorflow.TFE_Py_TapeStackWatchVariable(variable) def pop_tape(): """Pops the top tape in the stack, if any.""" - if _tape_stack.stack: - return _tape_stack.stack.pop() - return None + return Tape(pywrap_tensorflow.TFE_Py_TapeStackPop()) @contextlib.contextmanager def stop_recording(): - old = _tape_stack.stack - _tape_stack._stack = [] # pylint: disable=protected-access + stack = [] + while not pywrap_tensorflow.TFE_Py_TapeStackIsEmpty(): + stack.append(pop_tape()._tape) # pylint: disable=protected-access try: yield finally: - _tape_stack._stack = old # pylint: disable=protected-access + for tape in reversed(stack): + pywrap_tensorflow.TFE_Py_TapeStackPush(tape) def should_record(tensors): """Returns true if any tape in the stack watches any of these tensors.""" - if not _tape_stack.stack: - return False - return any(x.should_record(tensors) for x in _tape_stack.stack) + return pywrap_tensorflow.TFE_Py_TapeStackShouldRecord(tensors) def record_operation(op_type, output_tensors, input_tensors, backward_function): """Records the operation on all tapes in the stack.""" - for t in _tape_stack.stack: - t.record_operation(op_type, output_tensors, - input_tensors, - backward_function) + pywrap_tensorflow.TFE_Py_TapeStackRecordOperation( + op_type, output_tensors, input_tensors, backward_function) def delete_trace(tensor_id): """Deletes traces for this Tensor from all tapes in the stack.""" - for t in _tape_stack.stack: - t.delete_trace(tensor_id) + pywrap_tensorflow.TFE_Py_TapeStackDeleteTrace(tensor_id) def could_possibly_record(): """Returns True if any tape is active.""" - return len(_tape_stack.stack) > 0 # pylint: disable=g-explicit-length-test + return not pywrap_tensorflow.TFE_Py_TapeStackIsEmpty() diff --git a/tensorflow/python/pywrap_tfe.i b/tensorflow/python/pywrap_tfe.i index 5ca0e572869..82b154164e8 100644 --- a/tensorflow/python/pywrap_tfe.i +++ b/tensorflow/python/pywrap_tfe.i @@ -24,13 +24,16 @@ limitations under the License. %rename("%s") TFE_Py_RegisterExceptionClass; %rename("%s") TFE_Py_Execute; %rename("%s") TFE_Py_UID; -%rename("%s") TFE_Py_NewTape; -%rename("%s") TFE_Py_TapeShouldRecord; -%rename("%s") TFE_Py_TapeWatch; -%rename("%s") TFE_Py_TapeDeleteTrace; -%rename("%s") TFE_Py_TapeRecordOperation; +%rename("%s") TFE_Py_TapeStackPushNew; +%rename("%s") TFE_Py_TapeStackPush; +%rename("%s") TFE_Py_TapeStackPop; +%rename("%s") TFE_Py_TapeStackIsEmpty; +%rename("%s") TFE_Py_TapeStackShouldRecord; +%rename("%s") TFE_Py_TapeStackWatch; +%rename("%s") TFE_Py_TapeStackDeleteTrace; +%rename("%s") TFE_Py_TapeStackRecordOperation; +%rename("%s") TFE_Py_TapeStackWatchVariable; %rename("%s") TFE_Py_TapeGradient; -%rename("%s") TFE_Py_TapeWatchVariable; %rename("%s") TFE_Py_TapeWatchedVariables; %rename("%s") TFE_NewContextOptions; %rename("%s") TFE_ContextOptionsSetConfig; From 6408777b225b742720fb6575addd3643fc57f0b1 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 11:57:40 -0800 Subject: [PATCH 004/104] [tpu:profiler] Add matrix unit utilization to TfOpStats. PiperOrigin-RevId: 175713542 --- tensorflow/contrib/tpu/profiler/tf_op_stats.proto | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/contrib/tpu/profiler/tf_op_stats.proto b/tensorflow/contrib/tpu/profiler/tf_op_stats.proto index d8ee2437909..2d2207a43fe 100644 --- a/tensorflow/contrib/tpu/profiler/tf_op_stats.proto +++ b/tensorflow/contrib/tpu/profiler/tf_op_stats.proto @@ -124,4 +124,6 @@ message TfOpStats { optional LoopingResult looping = 4; // The result for the HloExtraInfoMap. optional HloExtraInfoMapResult hlo_extrainfo_map = 5; + // Overall matrix unit utilization in percentage. + optional double matrix_unit_utilization_percent = 6; } From 7a346347ee5a4078e2bd1cca00247c4219af326c Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 12:02:21 -0800 Subject: [PATCH 005/104] Adding an optimizer module for preconditioned stochastic gradient langevin dynamics. PiperOrigin-RevId: 175714379 --- tensorflow/contrib/bayesflow/BUILD | 21 ++ tensorflow/contrib/bayesflow/__init__.py | 3 +- .../kernel_tests/sgld_optimizer_test.py | 209 +++++++++++++++++ .../bayesflow/python/ops/optimizers.py | 34 +++ .../bayesflow/python/ops/sgld_optimizer.py | 216 ++++++++++++++++++ 5 files changed, 482 insertions(+), 1 deletion(-) create mode 100644 tensorflow/contrib/bayesflow/python/kernel_tests/sgld_optimizer_test.py create mode 100644 tensorflow/contrib/bayesflow/python/ops/optimizers.py create mode 100644 tensorflow/contrib/bayesflow/python/ops/sgld_optimizer.py diff --git a/tensorflow/contrib/bayesflow/BUILD b/tensorflow/contrib/bayesflow/BUILD index f92b57869ed..9f3650e8f9c 100644 --- a/tensorflow/contrib/bayesflow/BUILD +++ b/tensorflow/contrib/bayesflow/BUILD @@ -160,6 +160,27 @@ cuda_py_test( ], ) +cuda_py_test( + name = "sgld_optimizer_test", + size = "small", + srcs = ["python/kernel_tests/sgld_optimizer_test.py"], + additional_deps = [ + ":bayesflow_py", + "//third_party/py/numpy", + "//tensorflow/contrib/distributions:distributions_py", + "//tensorflow/contrib/layers:layers_py", + "//tensorflow/python/ops/distributions", + "//tensorflow/python:client_testlib", + "//tensorflow/python:framework", + "//tensorflow/python:framework_for_generated_wrappers", + "//tensorflow/python:framework_test_lib", + "//tensorflow/python:gradients", + "//tensorflow/python:math_ops", + "//tensorflow/python:platform_test", + "//tensorflow/python:random_seed", + ], +) + filegroup( name = "all_files", srcs = glob( diff --git a/tensorflow/contrib/bayesflow/__init__.py b/tensorflow/contrib/bayesflow/__init__.py index beaf6f1854d..a638753f2f0 100644 --- a/tensorflow/contrib/bayesflow/__init__.py +++ b/tensorflow/contrib/bayesflow/__init__.py @@ -27,6 +27,7 @@ from tensorflow.contrib.bayesflow.python.ops import halton_sequence from tensorflow.contrib.bayesflow.python.ops import hmc from tensorflow.contrib.bayesflow.python.ops import metropolis_hastings from tensorflow.contrib.bayesflow.python.ops import monte_carlo +from tensorflow.contrib.bayesflow.python.ops import optimizers # pylint: enable=unused-import,line-too-long from tensorflow.python.util.all_util import remove_undocumented @@ -34,7 +35,7 @@ from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = ['csiszar_divergence', 'custom_grad', 'entropy', 'metropolis_hastings', 'monte_carlo', 'halton_sequence', - 'hmc', 'special_math', 'stochastic_variables', + 'hmc', 'optimizers', 'special_math', 'stochastic_variables', 'variational_inference'] remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/bayesflow/python/kernel_tests/sgld_optimizer_test.py b/tensorflow/contrib/bayesflow/python/kernel_tests/sgld_optimizer_test.py new file mode 100644 index 00000000000..66793383fdd --- /dev/null +++ b/tensorflow/contrib/bayesflow/python/kernel_tests/sgld_optimizer_test.py @@ -0,0 +1,209 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Functional test for GradientDescent.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +from tensorflow.contrib.bayesflow.python.ops.optimizers import SGLDOptimizer +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import variables +from tensorflow.python.platform import test + + +class SGLDOptimizerTest(test.TestCase): + + def testBasic(self): + for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: + with self.test_session(): + var0 = variables.Variable([1.1, 2.1], dtype=dtype) + var1 = variables.Variable([3.0, 4.0], dtype=dtype) + grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) + grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) + decay_rate = 0.53 + sgd_op = SGLDOptimizer( + 3.0, preconditioner_decay_rate=decay_rate).apply_gradients( + zip([grads0, grads1], [var0, var1])) + variables.global_variables_initializer().run() + # Fetch params to validate initial values + self.assertAllCloseAccordingToType([1.1, 2.1], var0.eval()) + self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval()) + # Run 1 step of sgd + sgd_op.run() + # Validate updated params + grads_scaled = (0.5 * 0.1 / math.sqrt(decay_rate + + (1 - decay_rate) * 0.1**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [1.1 - 3.0 * grads_scaled, 2.1 - 3.0 * grads_scaled], var0.eval()) + grads_scaled = (0.5 * 0.01 / math.sqrt( + decay_rate + (1 - decay_rate) * 0.01**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [3.0 - 3.0 * grads_scaled, 4.0 - 3.0 * grads_scaled], var1.eval()) + + def testBasicMultiInstance(self): + for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: + with self.test_session(): + var0 = variables.Variable([1.1, 2.1], dtype=dtype) + var1 = variables.Variable([3.0, 4.0], dtype=dtype) + grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) + grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) + vara = variables.Variable([1.1, 2.1], dtype=dtype) + varb = variables.Variable([3.0, 4.0], dtype=dtype) + gradsa = constant_op.constant([0.1, 0.1], dtype=dtype) + gradsb = constant_op.constant([0.01, 0.01], dtype=dtype) + decay_rate = 0.5 + sgd_optimizer = SGLDOptimizer(3.0, preconditioner_decay_rate=decay_rate) + sgd_op = sgd_optimizer.apply_gradients( + zip([grads0, grads1], [var0, var1])) + sgd_optimizer2 = SGLDOptimizer( + 3.0, preconditioner_decay_rate=decay_rate) + sgd_op2 = sgd_optimizer2.apply_gradients( + zip([gradsa, gradsb], [vara, varb])) + variables.global_variables_initializer().run() + # Fetch params to validate initial values + self.assertAllCloseAccordingToType([1.1, 2.1], var0.eval()) + self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval()) + self.assertAllCloseAccordingToType([1.1, 2.1], vara.eval()) + self.assertAllCloseAccordingToType([3.0, 4.0], varb.eval()) + + # Run 1 step of sgd + sgd_op.run() + sgd_op2.run() + # Validate updated params + grads_scaled = (0.5 * 0.1 / math.sqrt(decay_rate + + (1 - decay_rate) * 0.1**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [1.1 - 3.0 * grads_scaled, 2.1 - 3.0 * grads_scaled], var0.eval()) + self.assertAllCloseAccordingToType( + [1.1 - 3.0 * grads_scaled, 2.1 - 3.0 * grads_scaled], vara.eval()) + + grads_scaled = (0.5 * 0.01 / math.sqrt( + decay_rate + (1 - decay_rate) * 0.01**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [3.0 - 3.0 * grads_scaled, 4.0 - 3.0 * grads_scaled], var1.eval()) + self.assertAllCloseAccordingToType( + [3.0 - 3.0 * grads_scaled, 4.0 - 3.0 * grads_scaled], varb.eval()) + self.assertNotEqual(sgd_optimizer.variable_scope, + sgd_optimizer2.variable_scope) + self.assertNotEqual(sgd_optimizer.variable_scope.name, + sgd_optimizer2.variable_scope.name) + + def testTensorLearningRate(self): + for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: + with self.test_session(): + var0 = variables.Variable([1.1, 2.1], dtype=dtype) + var1 = variables.Variable([3.0, 4.0], dtype=dtype) + grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) + grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) + lrate = constant_op.constant(3.0) + decay_rate = 0.5 + sgd_op = SGLDOptimizer( + lrate, preconditioner_decay_rate=constant_op.constant( + decay_rate)).apply_gradients( + zip([grads0, grads1], [var0, var1])) + variables.global_variables_initializer().run() + # Fetch params to validate initial values + self.assertAllCloseAccordingToType([1.1, 2.1], var0.eval()) + self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval()) + # Run 1 step of sgd + sgd_op.run() + # Validate updated params + grads_scaled = (0.5 * 0.1 / math.sqrt(decay_rate + + (1 - decay_rate) * 0.1**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [1.1 - 3.0 * grads_scaled, 2.1 - 3.0 * grads_scaled], var0.eval()) + grads_scaled = (0.5 * 0.01 / math.sqrt( + decay_rate + (1 - decay_rate) * 0.01**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [3.0 - 3.0 * grads_scaled, 4.0 - 3.0 * grads_scaled], var1.eval()) + + def testGradWrtRef(self): + for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: + with self.test_session(): + opt = SGLDOptimizer(3.0) + values = [1.0, 3.0] + vars_ = [variables.Variable([v], dtype=dtype) for v in values] + grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_) + variables.global_variables_initializer().run() + for grad, _ in grads_and_vars: + self.assertAllCloseAccordingToType([1.0], grad.eval()) + + def testWithGlobalStep(self): + for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: + with self.test_session(): + global_step = variables.Variable(0, trainable=False) + var0 = variables.Variable([1.1, 2.1], dtype=dtype) + var1 = variables.Variable([3.0, 4.0], dtype=dtype) + grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) + grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) + decay_rate = 0.1 + sgd_op = SGLDOptimizer( + 3.0, preconditioner_decay_rate=decay_rate).apply_gradients( + zip([grads0, grads1], [var0, var1]), global_step=global_step) + variables.global_variables_initializer().run() + # Fetch params to validate initial values + self.assertAllCloseAccordingToType([1.1, 2.1], var0.eval()) + self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval()) + # Run 1 step of sgd + sgd_op.run() + + # Validate updated params and global_step + grads_scaled = (0.5 * 0.1 / math.sqrt(decay_rate + + (1 - decay_rate) * 0.1**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [1.1 - 3.0 * grads_scaled, 2.1 - 3.0 * grads_scaled], var0.eval()) + grads_scaled = (0.5 * 0.01 / math.sqrt( + decay_rate + (1 - decay_rate) * 0.01**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [3.0 - 3.0 * grads_scaled, 4.0 - 3.0 * grads_scaled], var1.eval()) + self.assertAllCloseAccordingToType(1, global_step.eval()) + + def testSparseBasic(self): + for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: + with self.test_session(): + var0 = variables.Variable([[1.1], [2.1]], dtype=dtype) + var1 = variables.Variable([[3.0], [4.0]], dtype=dtype) + grads0 = ops.IndexedSlices( + constant_op.constant([0.1], shape=[1, 1], dtype=dtype), + constant_op.constant([0]), constant_op.constant([2, 1])) + grads1 = ops.IndexedSlices( + constant_op.constant([0.01], shape=[1, 1], dtype=dtype), + constant_op.constant([1]), constant_op.constant([2, 1])) + decay_rate = 0.9 + sgd_op = SGLDOptimizer( + 3.0, preconditioner_decay_rate=decay_rate).apply_gradients( + zip([grads0, grads1], [var0, var1])) + variables.global_variables_initializer().run() + # Fetch params to validate initial values + self.assertAllCloseAccordingToType([[1.1], [2.1]], var0.eval()) + self.assertAllCloseAccordingToType([[3.0], [4.0]], var1.eval()) + # Run 1 step of sgd + sgd_op.run() + # Validate updated params + grads_scaled = (0.5 * 0.1 / math.sqrt(decay_rate + + (1 - decay_rate) * 0.1**2 + 1e-8)) + self.assertAllCloseAccordingToType([[1.1 - 3.0 * grads_scaled], [2.1]], + var0.eval()) + grads_scaled = (0.5 * 0.01 / math.sqrt( + decay_rate + (1 - decay_rate) * 0.01**2 + 1e-8)) + self.assertAllCloseAccordingToType( + [[3.0 - 3.0 * 0], [4.0 - 3.0 * grads_scaled]], var1.eval()) + + +if __name__ == "__main__": + test.main() diff --git a/tensorflow/contrib/bayesflow/python/ops/optimizers.py b/tensorflow/contrib/bayesflow/python/ops/optimizers.py new file mode 100644 index 00000000000..ee32e6b5c3d --- /dev/null +++ b/tensorflow/contrib/bayesflow/python/ops/optimizers.py @@ -0,0 +1,34 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Probabilistic optimizer modules. + +See ${python/contrib.bayesflow.optimizers}. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.contrib.bayesflow.python.ops.sgld_optimizer import * +# pylint: enable=wildcard-import +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = [ + 'SGLDOptimizer', +] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/bayesflow/python/ops/sgld_optimizer.py b/tensorflow/contrib/bayesflow/python/ops/sgld_optimizer.py new file mode 100644 index 00000000000..5d36ea7a2b5 --- /dev/null +++ b/tensorflow/contrib/bayesflow/python/ops/sgld_optimizer.py @@ -0,0 +1,216 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""An optimizer module for stochastic gradient Langevin dynamics.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import check_ops +from tensorflow.python.ops import control_flow_ops +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import variable_scope as varscope_ops +from tensorflow.python.training import optimizer +from tensorflow.python.training import training_ops + + +class SGLDOptimizer(optimizer.Optimizer): + """An optimizer module for stochastic gradient Langevin dynamics. + + This implements the preconditioned Stochastic Gradient Langevin Dynamics + optimizer [1]. The optimization variable is regarded as a sample from the + posterior under Stochastic Gradient Langevin Dynamics with noise rescaled in + each dimension according to RMSProp [2]. + + Note: If a prior is included in the loss, it should be scaled by + `1/num_pseudo_batches`, where num_pseudo_batches is the number of minibatches + in the data. I.e., it should be divided by the `num_pseudo_batches` term + described below. + + [1]: "Preconditioned Stochastic Gradient Langevin Dynamics for Deep Neural + Networks." Chunyuan Li, Changyou Chen, David Carlson, Lawrence Carin. + ArXiv:1512.07666, 2015. https://arxiv.org/abs/1512.07666 + [2]: http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + + Args: + learning_rate: Scalar `float`-like `Tensor`. The base learning rate for the + optimizer. Must be tuned to the specific function being minimized. + preconditioner_decay_rate: Scalar `float`-like `Tensor`. The exponential + decay rate of the rescaling of the preconditioner (RMSprop). (This is + "alpha" in [1]). Should be smaller than but nearly `1` to approximate + sampling from the posterior. (Default: `0.95`) + num_pseudo_batches: Scalar `int`-like `Tensor`. The effective number of + minibatches in the data set. Trades off noise and prior with the SGD + likelihood term. Note: Assumes the loss is taken as the mean over a + minibatch. Otherwise if the sum was taken, divide this number by the + batch size. (Default: `1`) + burnin: Scalar `int`-like `Tensor`. The number of iterations to collect + gradient statistics to update the preconditioner before starting to draw + noisy samples. (Default: `25`) + diagonal_bias: Scalar `float`-like `Tensor`. Term added to the diagonal of + the preconditioner to prevent the preconditioner from degenerating. + (Default: `1e-8`) + name: Python `str` describing ops managed by this function. + (Default: `"SGLDOptimizer"`) + variable_scope: Variable scope used for calls to `tf.get_variable`. + If `None`, a new variable scope is created using name + `ops.get_default_graph().unique_name(name or default_name)`. + + Raises: + InvalidArgumentError: If preconditioner_decay_rate is a `Tensor` not in + `(0,1]`. + """ + + def __init__(self, + learning_rate, + preconditioner_decay_rate=0.95, + num_pseudo_batches=1, + burnin=25, + diagonal_bias=1e-8, + name=None, + variable_scope=None): + default_name = 'SGLDOptimizer' + with ops.name_scope(name, default_name, [ + learning_rate, preconditioner_decay_rate, num_pseudo_batches, burnin, + diagonal_bias + ]): + if variable_scope is None: + var_scope_name = ops.get_default_graph().unique_name( + name or default_name) + with varscope_ops.variable_scope(var_scope_name) as scope: + self._variable_scope = scope + else: + self._variable_scope = variable_scope + + self._preconditioner_decay_rate = ops.convert_to_tensor( + preconditioner_decay_rate, name='preconditioner_decay_rate') + self._num_pseudo_batches = ops.convert_to_tensor( + num_pseudo_batches, name='num_pseudo_batches') + self._burnin = ops.convert_to_tensor(burnin, name='burnin') + self._diagonal_bias = ops.convert_to_tensor( + diagonal_bias, name='diagonal_bias') + self._learning_rate = ops.convert_to_tensor( + learning_rate, name='learning_rate') + + with varscope_ops.variable_scope(self._variable_scope): + self._counter = varscope_ops.get_variable( + 'counter', initializer=0, trainable=False) + + self._preconditioner_decay_rate = control_flow_ops.with_dependencies([ + check_ops.assert_non_negative( + self._preconditioner_decay_rate, + message='`preconditioner_decay_rate` must be non-negative'), + check_ops.assert_less_equal( + self._preconditioner_decay_rate, + 1., + message='`preconditioner_decay_rate` must be at most 1.'), + ], self._preconditioner_decay_rate) + + self._num_pseudo_batches = control_flow_ops.with_dependencies([ + check_ops.assert_greater( + self._num_pseudo_batches, + 0, + message='`num_pseudo_batches` must be greater than zero') + ], self._num_pseudo_batches) + + self._burnin = control_flow_ops.with_dependencies([ + check_ops.assert_non_negative( + self._burnin, message='`burnin` must be non-negative'), + check_ops.assert_integer( + self._burnin, message='`burnin` must be an integer') + ], self._burnin) + + self._diagonal_bias = control_flow_ops.with_dependencies([ + check_ops.assert_non_negative( + self._diagonal_bias, + message='`diagonal_bias` must be non-negative') + ], self._diagonal_bias) + + super(SGLDOptimizer, self).__init__(use_locking=False, + name=name or default_name) + + def _create_slots(self, var_list): + for v in var_list: + init_rms = init_ops.ones_initializer(dtype=v.dtype) + self._get_or_make_slot_with_initializer(v, init_rms, v.get_shape(), + v.dtype, 'rms', self._name) + + def _prepare(self): + # We need to put the conversion and check here because a user will likely + # want to decay the learning rate dynamically. + self._learning_rate_tensor = control_flow_ops.with_dependencies([ + check_ops.assert_non_negative( + self._learning_rate, message='`learning_rate` must be non-negative') + ], ops.convert_to_tensor(self._learning_rate, name='learning_rate_tensor')) + self._decay_tensor = ops.convert_to_tensor( + self._preconditioner_decay_rate, name='preconditioner_decay_rate') + + super(SGLDOptimizer, self)._prepare() + + def _apply_dense(self, grad, var): + rms = self.get_slot(var, 'rms') + + with ops.control_dependencies([ + self._update_momentum(rms, grad, math_ops.cast(self._decay_tensor, + var.dtype.base_dtype))]): + new_grad = self._apply_noisy_update(rms, grad) + + return training_ops.apply_gradient_descent( + var, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + new_grad, + use_locking=self._use_locking).op + + def _apply_sparse(self, grad, var): + rms = self.get_slot(var, 'rms') + + with ops.control_dependencies([ + self._update_momentum(rms, grad, math_ops.cast(self._decay_tensor, + var.dtype.base_dtype))]): + new_grad = self._apply_noisy_update(rms, grad) + + return training_ops.apply_gradient_descent( + var, + math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), + new_grad, + use_locking=self._use_locking).op + + @property + def variable_scope(self): + """Variable scope of all calls to `tf.get_variable`.""" + return self._variable_scope + + def _apply_noisy_update(self, mom, grad): + # Compute and apply the gradient update following + # preconditioned Langevin dynamics + stddev = array_ops.where( + array_ops.squeeze(self._counter > self._burnin), + math_ops.cast(math_ops.rsqrt(self._learning_rate), grad.dtype), + array_ops.zeros([], grad.dtype)) + + preconditioner = math_ops.rsqrt( + mom + math_ops.cast(self._diagonal_bias, grad.dtype)) + return ( + 0.5 * preconditioner * grad * math_ops.cast(self._num_pseudo_batches, + grad.dtype) + + random_ops.random_normal(array_ops.shape(grad), 1.0, dtype=grad.dtype) * + stddev * math_ops.sqrt(preconditioner)) + + def _update_momentum(self, mom, grad, decay): + # Keep an exponentially weighted moving average of squared gradients. + # Not thread safe + return mom.assign_add((1.0 - decay) * (math_ops.square(grad) - mom)) From 317c011b19c90c8aeed4ce200d33f68b56311150 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 12:02:25 -0800 Subject: [PATCH 006/104] Fixed a bug in tensorflow::str_util::CUnescape. Added a str util test that failed without this change. The CUnescape did a const_cast to the result string's buffer, which made it write the same buffer without copying. PiperOrigin-RevId: 175714391 --- .../xla/tools/parser/hlo_parser_test.cc | 2 +- tensorflow/core/lib/strings/str_util.cc | 28 +++++++++++++++---- tensorflow/core/lib/strings/str_util_test.cc | 13 +++++++++ 3 files changed, 36 insertions(+), 7 deletions(-) diff --git a/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc b/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc index 8eeed339b87..29ae3296ca2 100644 --- a/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc +++ b/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc @@ -866,7 +866,7 @@ TEST_F(HloParserTest, CommaBetweenSubAttributes) { const string original = R"(HloModule test_comma_module: ENTRY %test_comma.v4 () -> f32[] { - ROOT %constant = f32[] constant(-4.2), metadata={source_line=5, op_type="const"} + ROOT %constant = f32[] constant(-4.2), metadata={source_line=5, op_type="::const"} } )"; diff --git a/tensorflow/core/lib/strings/str_util.cc b/tensorflow/core/lib/strings/str_util.cc index 240e1454e58..d28857803d7 100644 --- a/tensorflow/core/lib/strings/str_util.cc +++ b/tensorflow/core/lib/strings/str_util.cc @@ -84,15 +84,32 @@ inline int hex_digit_to_int(char c) { return x & 0xf; } -bool CUnescapeInternal(StringPiece source, char* dest, +bool CUnescapeInternal(StringPiece source, string* dest, string::size_type* dest_len, string* error) { - char* d = dest; const char* p = source.data(); const char* end = source.end(); const char* last_byte = end - 1; + // We are going to write the result to dest with its iterator. If our string + // implementation uses copy-on-write, this will trigger a copy-on-write of + // dest's buffer; that is, dest will be assigned a new buffer. + // + // Note that the following way is NOT a legal way to modify a string's + // content: + // + // char* d = const_cast(dest->data()); + // + // This won't trigger copy-on-write of the string, and so is dangerous when + // the buffer is shared. + auto d = dest->begin(); + // Small optimization for case where source = dest and there's no escaping - while (p == d && p < end && *p != '\\') p++, d++; + if (source.data() == dest->data()) { + while (p < end && *p != '\\') { + p++; + d++; + } + } while (p < end) { if (*p != '\\') { @@ -192,7 +209,7 @@ bool CUnescapeInternal(StringPiece source, char* dest, p++; // read past letter we escaped } } - *dest_len = d - dest; + *dest_len = d - dest->begin(); return true; } @@ -215,8 +232,7 @@ bool SplitAndParseAsInts(StringPiece text, char delim, bool CUnescape(StringPiece source, string* dest, string* error) { dest->resize(source.size()); string::size_type dest_size; - if (!CUnescapeInternal(source, const_cast(dest->data()), &dest_size, - error)) { + if (!CUnescapeInternal(source, dest, &dest_size, error)) { return false; } dest->erase(dest_size); diff --git a/tensorflow/core/lib/strings/str_util_test.cc b/tensorflow/core/lib/strings/str_util_test.cc index 5c735a87a39..d5909d17aaa 100644 --- a/tensorflow/core/lib/strings/str_util_test.cc +++ b/tensorflow/core/lib/strings/str_util_test.cc @@ -43,6 +43,19 @@ TEST(CUnescape, Basic) { EXPECT_EQ("\320hi\200", ExpectCUnescapeSuccess("\\320hi\\200")); } +TEST(CUnescape, HandlesCopyOnWriteStrings) { + string dest = "hello"; + string read = dest; + // For std::string, read and dest now share the same buffer. + + string error; + StringPiece source = "llohe"; + // CUnescape is going to write "llohe" to dest, so dest's buffer will be + // reallocated, and read's buffer remains untouched. + EXPECT_TRUE(str_util::CUnescape(source, &dest, &error)); + EXPECT_EQ("hello", read); +} + TEST(StripTrailingWhitespace, Basic) { string test; test = "hello"; From a6a562f1174159143c5fcf85f494eaf511bcf168 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 12:06:05 -0800 Subject: [PATCH 007/104] Added support for multi-column vocab files to tf.contrib.lookup.index_table_from_file PiperOrigin-RevId: 175715120 --- .../python/kernel_tests/lookup_ops_test.py | 31 +++++++++++++++++ tensorflow/python/ops/lookup_ops.py | 33 +++++++++++++++---- 2 files changed, 58 insertions(+), 6 deletions(-) diff --git a/tensorflow/python/kernel_tests/lookup_ops_test.py b/tensorflow/python/kernel_tests/lookup_ops_test.py index 76c790a0a20..9944b5929fc 100644 --- a/tensorflow/python/kernel_tests/lookup_ops_test.py +++ b/tensorflow/python/kernel_tests/lookup_ops_test.py @@ -281,6 +281,37 @@ class IndexTableFromFile(test.TestCase): lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, 3), ids.eval()) + def test_string_index_table_from_multicolumn_file(self): + vocabulary_file = self._createVocabFile( + "f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1")) + with self.test_session(): + table = lookup_ops.index_table_from_file( + vocabulary_file=vocabulary_file, + num_oov_buckets=1, + key_column_index=0, + value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER) + ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) + + self.assertRaises(errors_impl.OpError, ids.eval) + lookup_ops.tables_initializer().run() + self.assertAllEqual((1, 2, 3), ids.eval()) + + def test_string_index_table_from_multicolumn_file_custom_delimiter(self): + vocabulary_file = self._createVocabFile( + "f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1")) + with self.test_session(): + table = lookup_ops.index_table_from_file( + vocabulary_file=vocabulary_file, + num_oov_buckets=1, + key_column_index=0, + value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER, + delimiter=" ") + ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) + + self.assertRaises(errors_impl.OpError, ids.eval) + lookup_ops.tables_initializer().run() + self.assertAllEqual((1, 2, 3), ids.eval()) + def test_string_index_table_from_file_tensor_filename(self): vocabulary_file = self._createVocabFile("f2i_vocab1.txt") with self.test_session(): diff --git a/tensorflow/python/ops/lookup_ops.py b/tensorflow/python/ops/lookup_ops.py index fa58ffc37e2..f28eadf2489 100644 --- a/tensorflow/python/ops/lookup_ops.py +++ b/tensorflow/python/ops/lookup_ops.py @@ -864,7 +864,10 @@ def index_table_from_file(vocabulary_file=None, default_value=-1, hasher_spec=FastHashSpec, key_dtype=dtypes.string, - name=None): + name=None, + key_column_index=TextFileIndex.WHOLE_LINE, + value_column_index=TextFileIndex.LINE_NUMBER, + delimiter="\t"): """Returns a lookup table that converts a string tensor into int64 IDs. This operation constructs a lookup table to convert tensor of strings into @@ -881,6 +884,16 @@ def index_table_from_file(vocabulary_file=None, The underlying table must be initialized by calling `tf.tables_initializer.run()` or `table.init.run()` once. + To specify multi-column vocabulary files, use key_column_index and + value_column_index and delimiter. + + - TextFileIndex.LINE_NUMBER means use the line number starting from zero, + expects data type int64. + - TextFileIndex.WHOLE_LINE means use the whole line content, expects data + type string. + - A value >=0 means use the index (starting at zero) of the split line based + on `delimiter`. + Sample Usages: If we have a vocabulary file "test.txt" with the following content: @@ -912,6 +925,11 @@ def index_table_from_file(vocabulary_file=None, assignation of out-of-vocabulary buckets. key_dtype: The `key` data type. name: A name for this op (optional). + key_column_index: The column index from the text file to get the `key` + values from. The default is to use the line number, starting from zero. + value_column_index: The column index from the text file ro get the `value` + values from. The default is 0 that represents the whole line content. + delimiter: The delimiter to separate fields in a line. Returns: The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`. @@ -944,19 +962,22 @@ def index_table_from_file(vocabulary_file=None, # Keep the shared_name: # ____ shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size, - TextFileIndex.WHOLE_LINE, - TextFileIndex.LINE_NUMBER) + key_column_index, + value_column_index) else: # Keep the shared_name # ___ shared_name = "hash_table_%s_%s_%s" % (vocabulary_file, - TextFileIndex.WHOLE_LINE, - TextFileIndex.LINE_NUMBER) + key_column_index, + value_column_index) init = TextFileIdTableInitializer( vocabulary_file, vocab_size=vocab_size, key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype, - name="table_init") + name="table_init", + key_column_index=key_column_index, + value_column_index=value_column_index, + delimiter=delimiter) table = HashTable( init, default_value, shared_name=shared_name, name=hash_table_scope) From e0b662c1f6ba378e5c0d0da011d6f789ab6606b3 Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Tue, 14 Nov 2017 12:10:00 -0800 Subject: [PATCH 008/104] Removing generator expression in args_to_matching_eager PiperOrigin-RevId: 175715743 --- tensorflow/python/eager/execute.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/eager/execute.py b/tensorflow/python/eager/execute.py index c6457232e91..0316f33d7e8 100644 --- a/tensorflow/python/eager/execute.py +++ b/tensorflow/python/eager/execute.py @@ -168,7 +168,10 @@ def make_tensor(v, arg_name): def args_to_matching_eager(l, ctx, default_dtype=None): """Convert sequence `l` to eager same-type Tensors.""" EagerTensor = ops.EagerTensor # pylint: disable=invalid-name - if all(isinstance(x, EagerTensor) for x in l): + for x in l: + if not isinstance(x, EagerTensor): + break + else: # note: intentional for-else return l[0].dtype, l # TODO(josh11b): Could we do a better job if we also passed in the # allowed dtypes when that was known? From 301a6c41cbb111fae89657a49775920aa70525fd Mon Sep 17 00:00:00 2001 From: Derek Murray Date: Tue, 14 Nov 2017 12:13:19 -0800 Subject: [PATCH 009/104] Do not log a warning when `Rendezvous::Send()` fails in the Send op kernel. A failing call to `Send()` indicates that the step has been aborted by a corresponding call to `Rendezvous::StartAbort()`. As a result, the error logged by `Send()` is not particularly informative, and creates a non-deterministic amount of extra log spam for each step that fails as `Send()` calls are being issued. The failure that causes the step to be aborted is logged separately by the kernel that failed, unless that kernel deliberately does not log on failure. In particular, this change reduces log spam when using `Iterator.get_next()` in a multi-device setting. The `Iterator.get_next()` op deliberately does not log when an `OutOfRange` error (indicated the end of the dataset) is raised, because this is common and expected behavior, especially when using an initializable iterator that is reinitialized at the end of an epoch. Previously, when running in distributed mode or using a GPU, pending `Send()` calls may cause unwanted log messages to be printed. Fixes #12414. PiperOrigin-RevId: 175716290 --- tensorflow/core/kernels/sendrecv_ops.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow/core/kernels/sendrecv_ops.cc b/tensorflow/core/kernels/sendrecv_ops.cc index 9c242052f7c..542382872cc 100644 --- a/tensorflow/core/kernels/sendrecv_ops.cc +++ b/tensorflow/core/kernels/sendrecv_ops.cc @@ -91,9 +91,9 @@ void SendOp::Compute(OpKernelContext* ctx) { if (frame_iter == FrameAndIter(0, 0)) { // Use the cached rendezvous key. VLOG(2) << "Send " << parsed_key_.buf_; - OP_REQUIRES_OK(ctx, - ctx->rendezvous()->Send(parsed_key_, args, ctx->input(0), + ctx->SetStatus(ctx->rendezvous()->Send(parsed_key_, args, ctx->input(0), ctx->is_input_dead())); + return; } else { Rendezvous::ParsedKey in_loop_parsed; GetRendezvousKey(key_prefix_, frame_iter, &in_loop_parsed.buf_); @@ -101,9 +101,9 @@ void SendOp::Compute(OpKernelContext* ctx) { OP_REQUIRES_OK(ctx, Rendezvous::ParseKey(in_loop_parsed.buf_, &in_loop_parsed)); - OP_REQUIRES_OK(ctx, - ctx->rendezvous()->Send(in_loop_parsed, args, ctx->input(0), + ctx->SetStatus(ctx->rendezvous()->Send(in_loop_parsed, args, ctx->input(0), ctx->is_input_dead())); + return; } } From 5844c589143cb59e55e24776f8eb9b757f75d226 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 12:15:36 -0800 Subject: [PATCH 010/104] fix a typo in the comment. PiperOrigin-RevId: 175716586 --- tensorflow/core/lib/core/threadpool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/lib/core/threadpool.h b/tensorflow/core/lib/core/threadpool.h index 251d58817e7..b89b74b8dec 100644 --- a/tensorflow/core/lib/core/threadpool.h +++ b/tensorflow/core/lib/core/threadpool.h @@ -30,7 +30,7 @@ class ThreadPool { // Constructs a pool that contains "num_threads" threads with specified // "name". env->StartThread() is used to create individual threads with the // given ThreadOptions. If "low_latency_hint" is true the thread pool - // implementation may use it as a hint that lower latency if preferred at the + // implementation may use it as a hint that lower latency is preferred at the // cost of higher CPU usage, e.g. by letting one or more idle threads spin // wait. Conversely, if the threadpool is used to schedule high-latency // operations like I/O the hint should be set to false. From 6c728c3f5304b2f664608d1692392c9036eba28f Mon Sep 17 00:00:00 2001 From: Amit Patankar Date: Tue, 14 Nov 2017 12:16:20 -0800 Subject: [PATCH 011/104] Adding Python3.6 support for nightly binaries. PiperOrigin-RevId: 175716707 --- .../tools/ci_build/ci_parameterized_build.sh | 9 ++- .../install/install_python3.5_pip_packages.sh | 27 +------ .../install/install_python3.6_pip_packages.sh | 75 +++++++++++++++++++ 3 files changed, 84 insertions(+), 27 deletions(-) create mode 100755 tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh index db02f6ef10c..5f791d7bc70 100755 --- a/tensorflow/tools/ci_build/ci_parameterized_build.sh +++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh @@ -546,8 +546,9 @@ echo "" TMP_DIR="" DOCKERFILE_FLAG="" -if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ]]; then - # Modify Dockerfile for Python3.5 build +if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ] || + ["${TF_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then + # Modify Dockerfile for Python3.5 | Python3.6 build TMP_DIR=$(mktemp -d) echo "Docker build will occur in temporary directory: ${TMP_DIR}" @@ -563,10 +564,10 @@ if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ]]; then # Replace a line in the Dockerfile if sed -i \ - 's/RUN \/install\/install_pip_packages.sh/RUN \/install\/install_python3.5_pip_packages.sh/g' \ + "s/RUN \/install\/install_pip_packages.sh/RUN \/install\/install_${TF_BUILD_PYTHON_VERSION}_pip_packages.sh/g" \ "${DOCKERFILE}" then - echo "Copied and modified Dockerfile for Python 3.5 build: ${DOCKERFILE}" + echo "Copied and modified Dockerfile for ${TF_BUILD_PYTHON_VERSION} build: ${DOCKERFILE}" else die "ERROR: Faild to copy and modify Dockerfile: ${DOCKERFILE}" fi diff --git a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh index 81bce95d543..479242aa437 100755 --- a/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh +++ b/tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh @@ -18,33 +18,12 @@ # TODO(cais): Remove this file once we upgrade to ubuntu:16.04 docker images for # Python 3.5 builds. +# LINT.IfChange + # fkrull/deadsnakes is for Python3.5 add-apt-repository -y ppa:fkrull/deadsnakes apt-get update -set +e -# Upgrade swig to 3.0.8 -SWIG_VERSION="3.0.8" -swig_ver_flat=$(echo $SWIG_VERSION | sed 's/\.//g' | sed 's/^0*//g') -local_swig_ver=$(swig -version | grep -i version | awk '{print $3}') -local_swig_ver_flat=$(echo $local_swig_ver | sed 's/\.//g' | sed 's/^0*//g') -if [[ -z $local_swig_ver_flat ]]; then - local_swig_ver_flat=0 -fi -if (( $local_swig_ver_flat < $swig_ver_flat )); then - set -e - wget -q http://downloads.sourceforge.net/swig/swig-3.0.8.tar.gz - tar xzf swig-3.0.8.tar.gz - pushd swig-3.0.8 - apt-get install -y --no-install-recommends libpcre3-dev - ./configure - make - make install - rm -f /usr/bin/swig - ln -s /usr/local/bin/swig /usr/bin/swig - popd - rm -rf swig-3.0.8 swig-3.0.8.tar.gz -fi set -e # Install Python 3.5 and dev library apt-get install -y --no-install-recommends python3.5 libpython3.5-dev @@ -92,3 +71,5 @@ pip3.5 install portpicker pip3.5 install werkzeug pip3.5 install grpcio + +# LINT.ThenChange(//tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh) diff --git a/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh b/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh new file mode 100755 index 00000000000..c354aaa154e --- /dev/null +++ b/tensorflow/tools/ci_build/install/install_python3.6_pip_packages.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# Install packages required by Python3.6 build + +# TODO(amitpatankar): Remove this file once we upgrade to ubuntu:16.04 +# docker images for Python 3.6 builds. + +# LINT.IfChange + +# fkrull/deadsnakes is for Python3.6 +add-apt-repository -y ppa:fkrull/deadsnakes +apt-get update + +set -e +# Install Python 3.6 and dev library +apt-get install -y --no-install-recommends python3.6 libpython3.6-dev + +# Install pip3.6 +set +e +pip35_version=$(pip3.6 --version | grep "python 3.6") +if [[ -z $pip35_version ]]; then + set -e + wget -q https://bootstrap.pypa.io/get-pip.py + python3.6 get-pip.py + rm -f get-pip.py +fi + +set -e +# Install six. +pip3.6 install --upgrade absl-py +pip3.6 install --upgrade six==1.10.0 + +# Install protobuf. +pip3.6 install --upgrade protobuf==3.3.0 + +# Remove obsolete version of six, which can sometimes confuse virtualenv. +rm -rf /usr/lib/python3/dist-packages/six* + +# Install numpy, scipy and scikit-learn required by the builds + +# numpy needs to be installed from source to fix segfaults. See: +# https://github.com/tensorflow/tensorflow/issues/6968 +# This workaround isn't needed for Ubuntu 16.04 or later. +pip3.6 install --no-binary=:all: --upgrade numpy==1.12.0 + +pip3.6 install scipy==0.18.1 + +pip3.6 install scikit-learn==0.18.1 + +# pandas required by `inflow` +pip3 install pandas==0.19.2 + +# Install recent-enough version of wheel for Python 3.6 wheel builds +pip3.6 install wheel==0.29.0 + +pip3.6 install portpicker + +pip3.6 install werkzeug + +pip3.6 install grpcio + +# LINT.ThenChange(//tensorflow/tools/ci_build/install/install_python3.5_pip_packages.sh) From 79cbc15a815d8a3e6f3a76df5f419c25301be4d6 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Tue, 14 Nov 2017 12:21:22 -0800 Subject: [PATCH 012/104] Build the function graph only once when inferring shapes for function. This speeds things up quite a bit when ShapeRefiner::RunShapeFn needs more than one pass to compute the shapes. PiperOrigin-RevId: 175717420 --- .../core/common_runtime/shape_refiner.cc | 87 ++++++++++--------- .../core/common_runtime/shape_refiner.h | 19 ++-- 2 files changed, 53 insertions(+), 53 deletions(-) diff --git a/tensorflow/core/common_runtime/shape_refiner.cc b/tensorflow/core/common_runtime/shape_refiner.cc index 8e314c7ea57..10901da192f 100644 --- a/tensorflow/core/common_runtime/shape_refiner.cc +++ b/tensorflow/core/common_runtime/shape_refiner.cc @@ -129,80 +129,82 @@ Status InferShapesForFunctionSubNode(const Node* node, ShapeRefiner* refiner, // Maybe we won't support recursive functions at all in TF, because of // other maintanabilty issues. Status ShapeRefiner::InferShapesForFunction( - const tensorflow::FunctionLibraryDefinition& function_library, - const tensorflow::FunctionDef& function_def, bool keep_nested_shapes, + const tensorflow::FunctionDef* function_def, bool keep_nested_shapes, ExtendedInferenceContext* outer_context) { - InstantiationResult result; - TF_RETURN_IF_ERROR(InstantiateFunction( - function_def, outer_context->get_context()->attrs(), - [&function_library](const string& op, const OpDef** sig) { - return function_library.LookUpOpDef(op, sig); - }, - &result)); + const Graph* graph; + auto it = functions_.find(function_def); + if (it != functions_.end()) { + graph = it->second.get(); + } else { + InstantiationResult result; + TF_RETURN_IF_ERROR(InstantiateFunction( + *function_def, outer_context->get_context()->attrs(), + [this](const string& op, const OpDef** sig) { + return this->function_library_->LookUpOpDef(op, sig); + }, + &result)); - Graph graph(&function_library); - { + Graph* new_graph = new Graph(function_library_); GraphConstructorOptions options; options.allow_internal_ops = true; - TF_RETURN_IF_ERROR(ConvertNodeDefsToGraph(options, result.nodes, &graph)); + TF_RETURN_IF_ERROR( + ConvertNodeDefsToGraph(options, result.nodes, new_graph)); + functions_[function_def].reset(new_graph); + graph = new_graph; } - ShapeRefiner refiner(graph.versions().producer(), &function_library); - refiner.set_disable_constant_propagation(disable_constant_propagation_); - refiner.set_function_library_for_shape_inference(&function_library); - if (keep_nested_shapes) refiner.set_keep_nested_shape_inferences(); - + std::unordered_set function_nodes; + Status inference_status = Status::OK(); { - Status inference_status = Status::OK(); - auto node_shape_inference_lambda = [&refiner, &outer_context, + auto node_shape_inference_lambda = [this, &outer_context, &function_nodes, &inference_status](const Node* node) { if (!inference_status.ok()) return; inference_status = InferShapesForFunctionSubNode( - node, &refiner, outer_context->get_context()); + node, this, outer_context->get_context()); + function_nodes.insert(node); }; // Calls inference lambda for each node after visiting all predecessors. // Ensures that we are adding nodes to ShapeRefiner in the topological // order. - ReverseDFS(graph, {}, node_shape_inference_lambda); - - TF_RETURN_IF_ERROR(inference_status); + ReverseDFS(*graph, {}, node_shape_inference_lambda); } - if (keep_nested_shapes) { + if (keep_nested_shapes && inference_status.ok()) { // Fill the nested inferences map. // // The materialized function graph has extra nodes for arguments and // return values, which are not explicitly listed in the FunctionDef, // we filter out these special nodes here to not expose the implementation // details and keep only inferences for the nodes listed in the FunctionDef. - - auto stolen_contexts = refiner.StealInferenceContexts(); - std::unordered_map user_defined_nodes; - for (const auto& node_def : function_def.node_def()) { + for (const auto& node_def : function_def->node_def()) { user_defined_nodes[node_def.name()] = &node_def; } std::unordered_map> nested_inferences; - for (auto& stolen_kv : stolen_contexts) { - auto& stolen_name = stolen_kv.first->name(); - if (user_defined_nodes.find(stolen_name) != user_defined_nodes.end()) { - nested_inferences[stolen_name] = std::move(stolen_kv.second); - - // By default InferenceContext refers to a NodeDef from Graph, - // we have to change it to a NodeDef with longer lifetime, - // because the Graph is a temporary in this function. - nested_inferences[stolen_name]->get_context()->node_def_ = - user_defined_nodes[stolen_name]; + for (const Node* node : function_nodes) { + const string& node_name = node->name(); + if (user_defined_nodes.find(node_name) != user_defined_nodes.end()) { + nested_inferences[node_name] = std::move(node_to_context_[node]); + node_to_context_.erase(node); + // By default InferenceContext refers to a NodeDef from Graph. + // Change it to the publicly accessible NodeDef of the function + // definition. + nested_inferences[node_name]->get_context()->node_def_ = + user_defined_nodes[node_name]; } } - outer_context->set_nested_inferences(std::move(nested_inferences)); + } else { + // Delete the contexts created for the functions nodes to save memory. + for (const Node* node : function_nodes) { + node_to_context_.erase(node); + } } - return Status::OK(); + return inference_status; } Status ShapeRefiner::AddNode(const Node* node) { @@ -781,9 +783,8 @@ Status ShapeRefiner::RunShapeFn(const Node* node, auto* func_def = function_library_->Find(op_reg_data->op_def.name()); if (func_def) { - TF_RETURN_IF_ERROR(InferShapesForFunction( - *function_library_, *func_def, keep_nested_shape_inferences_, ec)); - return Status::OK(); + return InferShapesForFunction(func_def, keep_nested_shape_inferences_, + ec); } } diff --git a/tensorflow/core/common_runtime/shape_refiner.h b/tensorflow/core/common_runtime/shape_refiner.h index 570b4db1635..da42c30ce94 100644 --- a/tensorflow/core/common_runtime/shape_refiner.h +++ b/tensorflow/core/common_runtime/shape_refiner.h @@ -159,6 +159,7 @@ class ShapeRefiner { // With this enabled, shape inference can take more time since it descends // into all function calls. It doesn't do inference once for each function // definition, but once for each function call. + // The function library must outlive the shape refiner. void set_function_library_for_shape_inference( const tensorflow::FunctionLibraryDefinition* lib) { function_library_ = lib; @@ -210,10 +211,9 @@ class ShapeRefiner { // - outer_context will contain output shapes inferred from input shapes // - outer_context will contain nested inferences collection, iff // keep_nested_shapes is true - Status InferShapesForFunction( - const tensorflow::FunctionLibraryDefinition& function_library, - const tensorflow::FunctionDef& function_def, bool keep_nested_shapes, - ExtendedInferenceContext* outer_context); + Status InferShapesForFunction(const tensorflow::FunctionDef* function_def, + bool keep_nested_shapes, + ExtendedInferenceContext* outer_context); // Tries to infer tensor output based on the input shapes of the node. In some // cases, the shapes of the inputs are sufficient for inferring the contents @@ -260,12 +260,6 @@ class ShapeRefiner { Status RunShapeFn(const Node* node, const OpRegistrationData* op_reg_data, ExtendedInferenceContext* ec); - // Destructive operation, which steals ownership of inference contexts map. - std::unordered_map> - StealInferenceContexts() { - return std::move(node_to_context_); - } - int32 graph_def_version_; const OpRegistryInterface* const ops_registry_; @@ -299,6 +293,11 @@ class ShapeRefiner { // defined functions. By default that info is discarded to save memory. bool keep_nested_shape_inferences_ = false; + // Cache the graph corresponding to each functin definition for which shapes + // are refined. + std::unordered_map> + functions_; + TF_DISALLOW_COPY_AND_ASSIGN(ShapeRefiner); }; From 6a4391c19bc8346df45862865cb4db3ba231bd86 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Tue, 14 Nov 2017 12:28:23 -0800 Subject: [PATCH 013/104] Remove experimental tpu.outside_all_rewrites() API. PiperOrigin-RevId: 175718301 --- tensorflow/contrib/tpu/__init__.py | 1 - tensorflow/contrib/tpu/python/tpu/tpu.py | 21 --------------------- 2 files changed, 22 deletions(-) diff --git a/tensorflow/contrib/tpu/__init__.py b/tensorflow/contrib/tpu/__init__.py index 6a5fe06ff07..ec4c4e1be6f 100644 --- a/tensorflow/contrib/tpu/__init__.py +++ b/tensorflow/contrib/tpu/__init__.py @@ -24,7 +24,6 @@ @@initialize_system @@shutdown_system @@core -@@outside_all_rewrites @@replicate @@shard @@batch_parallel diff --git a/tensorflow/contrib/tpu/python/tpu/tpu.py b/tensorflow/contrib/tpu/python/tpu/tpu.py index d521297d994..bc3c888b1fc 100644 --- a/tensorflow/contrib/tpu/python/tpu/tpu.py +++ b/tensorflow/contrib/tpu/python/tpu/tpu.py @@ -19,7 +19,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import contextlib from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.tpu.python.ops import tpu_ops @@ -81,26 +80,6 @@ def core(num): return "device:TPU_REPLICATED_CORE:{}".format(num) -# Experimental API to 'break out' of a tpu.rewrite() (or shard(), etc.) context. -# In -# -# XXX -# with tpu.rewrite(...): -# YYY -# with tpu.outside_all_rewrites(): -# ZZZ -# -# the Ops in ZZZ are added outside the scope of the rewrite(). -# TODO(phawkins): currently outside_all_rewrites() pops out of all nested -# control flow scopes, for example loops. It would make more sense if it only -# popped out of a single scope. -@contextlib.contextmanager -def outside_all_rewrites(): - """Experimental API to 'break out' of a tpu.rewrite() (or shard(), etc.).""" - with ops.control_dependencies(None): - yield - - class TPUReplicateContext(control_flow_ops.ControlFlowContext): """A ControlFlowContext for nodes inside a TPU computation. From 5ae3049e4b8317ee5247dc5ebc3f2bbc0a5b3869 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 12:36:22 -0800 Subject: [PATCH 014/104] Enabling default extend_export_strategy call to use the base_export_strategy name as this is a common case. PiperOrigin-RevId: 175719299 --- .../learn/utils/saved_model_export_utils.py | 41 +++++++--- .../utils/saved_model_export_utils_test.py | 81 +++++++++++++++++-- 2 files changed, 102 insertions(+), 20 deletions(-) diff --git a/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py b/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py index 49413092a6b..6ffd2a13399 100644 --- a/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py +++ b/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py @@ -33,6 +33,7 @@ from __future__ import division from __future__ import print_function import os +import tempfile import time from tensorflow.contrib.layers.python.layers import feature_column @@ -644,18 +645,22 @@ def make_best_model_export_strategy(serving_input_fn, # TODO(b/67013778): Revisit this approach when corresponding changes to # TF Core are finalized. -def extend_export_strategy(base_export_strategy, post_export_fn, - post_export_name): +def extend_export_strategy(base_export_strategy, + post_export_fn, + post_export_name=None): """Extend ExportStrategy, calling post_export_fn after export. Args: base_export_strategy: An ExportStrategy that can be passed to the Experiment constructor. post_export_fn: A user-specified function to call after exporting the - SavedModel. Takes the export directory as an argument, and returns - a string path to a (potentially different) SavedModel. + SavedModel. Takes two arguments - the path to the SavedModel exported by + base_export_strategy and the directory where to export the SavedModel + modified by the post_export_fn. Returns the path to the exported + SavedModel. post_export_name: The directory name under the export base directory where - SavedModels generated by the post_export_fn will be written. + SavedModels generated by the post_export_fn will be written. If None, the + directory name of base_export_strategy is used. Returns: An ExportStrategy that can be passed to the Experiment constructor. @@ -675,12 +680,24 @@ def extend_export_strategy(base_export_strategy, post_export_fn, Raises: ValueError: If `estimator` is a ${tf.estimator.Estimator} instance - and `default_output_alternative_key` was specified. + and `default_output_alternative_key` was specified or if post_export_fn + does not return a valid directory. """ - export_dir = base_export_strategy.export(estimator, export_dir_base, - checkpoint_path) - if post_export_fn: - export_dir = post_export_fn(export_dir) - return export_dir + tmp_base_export_dir = tempfile.mkdtemp() + tmp_base_export = base_export_strategy.export( + estimator, tmp_base_export_dir, checkpoint_path) + tmp_post_export_dir = tempfile.mkdtemp() + tmp_post_export = post_export_fn(tmp_base_export, tmp_post_export_dir) - return export_strategy.ExportStrategy(post_export_name, export_fn) + if not tmp_post_export.startswith(tmp_post_export_dir): + raise ValueError('post_export_fn must return a sub-directory of {}' + .format(tmp_post_export_dir)) + export_relpath = os.path.relpath(tmp_post_export, tmp_post_export_dir) + + gfile.Rename( + os.path.join(tmp_post_export_dir, export_relpath), + os.path.join(export_dir_base, export_relpath)) + return os.path.join(export_dir_base, export_relpath) + + name = post_export_name if post_export_name else base_export_strategy.name + return export_strategy.ExportStrategy(name, export_fn) diff --git a/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py b/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py index 27f17b54221..ec3a88003f0 100644 --- a/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py +++ b/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils_test.py @@ -743,12 +743,19 @@ class SavedModelExportUtilsTest(test.TestCase): None) def test_extend_export_strategy(self): - def _base_export_fn(unused_estimator, export_dir_base, - unused_checkpoint_path=None): - return export_dir_base + "/e1" - def _post_export_fn(orig_path): - return orig_path + "/rewrite" + def _base_export_fn(unused_estimator, + export_dir_base, + unused_checkpoint_path=None): + base_path = os.path.join(export_dir_base, "e1") + gfile.MkDir(base_path) + return base_path + + def _post_export_fn(orig_path, new_path): + assert orig_path.endswith("/e1") + post_export_path = os.path.join(new_path, "rewrite") + gfile.MkDir(post_export_path) + return post_export_path base_export_strategy = export_strategy_lib.ExportStrategy( "Servo", _base_export_fn) @@ -758,9 +765,67 @@ class SavedModelExportUtilsTest(test.TestCase): self.assertEqual(final_export_strategy.name, "Servo2") test_estimator = TestEstimator() - final_path = final_export_strategy.export(test_estimator, "/path/to/orig", - "/path/to/checkpoint") - self.assertEqual("/path/to/orig/e1/rewrite", final_path) + tmpdir = tempfile.mkdtemp() + final_path = final_export_strategy.export(test_estimator, tmpdir, + os.path.join( + tmpdir, "checkpoint")) + self.assertEqual(os.path.join(tmpdir, "rewrite"), final_path) + + def test_extend_export_strategy_same_name(self): + + def _base_export_fn(unused_estimator, + export_dir_base, + unused_checkpoint_path=None): + base_path = os.path.join(export_dir_base, "e1") + gfile.MkDir(base_path) + return base_path + + def _post_export_fn(orig_path, new_path): + assert orig_path.endswith("/e1") + post_export_path = os.path.join(new_path, "rewrite") + gfile.MkDir(post_export_path) + return post_export_path + + base_export_strategy = export_strategy_lib.ExportStrategy( + "Servo", _base_export_fn) + + final_export_strategy = saved_model_export_utils.extend_export_strategy( + base_export_strategy, _post_export_fn) + self.assertEqual(final_export_strategy.name, "Servo") + + test_estimator = TestEstimator() + tmpdir = tempfile.mkdtemp() + final_path = final_export_strategy.export(test_estimator, tmpdir, + os.path.join( + tmpdir, "checkpoint")) + self.assertEqual(os.path.join(tmpdir, "rewrite"), final_path) + + def test_extend_export_strategy_raises_error(self): + + def _base_export_fn(unused_estimator, + export_dir_base, + unused_checkpoint_path=None): + base_path = os.path.join(export_dir_base, "e1") + gfile.MkDir(base_path) + return base_path + + def _post_export_fn(unused_orig_path, unused_new_path): + return tempfile.mkdtemp() + + base_export_strategy = export_strategy_lib.ExportStrategy( + "Servo", _base_export_fn) + + final_export_strategy = saved_model_export_utils.extend_export_strategy( + base_export_strategy, _post_export_fn) + + test_estimator = TestEstimator() + tmpdir = tempfile.mkdtemp() + with self.assertRaises(ValueError) as ve: + final_export_strategy.export(test_estimator, tmpdir, + os.path.join(tmpdir, "checkpoint")) + + self.assertTrue( + "post_export_fn must return a sub-directory" in str(ve.exception)) def _create_test_export_dir(export_dir_base): From 6d5793853cfdd27fe806bca4fad0f4e3c3a32b73 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 14 Nov 2017 12:53:47 -0800 Subject: [PATCH 015/104] The link fixer needs these to be all on one line. PiperOrigin-RevId: 175721637 --- tensorflow/docs_src/mobile/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/docs_src/mobile/index.md b/tensorflow/docs_src/mobile/index.md index a10db74364b..6bcd7d09d9c 100644 --- a/tensorflow/docs_src/mobile/index.md +++ b/tensorflow/docs_src/mobile/index.md @@ -2,8 +2,8 @@ TensorFlow was designed to be a good deep learning solution for mobile platforms. Currently we have two solutions for deploying machine learning -applications on mobile and embedded devices: @{$mobile/mobile_intro$TensorFlow -for Mobile} and @{$mobile/tflite$TensorFlow Lite}. +applications on mobile and embedded devices: +@{$mobile/mobile_intro$TensorFlow for Mobile} and @{$mobile/tflite$TensorFlow Lite}. ## TensorFlow Lite versus TensorFlow Mobile From d045a51072eed09b3fcb990ccd3ad4872ce0ada3 Mon Sep 17 00:00:00 2001 From: Rohan Jain Date: Tue, 14 Nov 2017 13:03:31 -0800 Subject: [PATCH 016/104] Enable prefetching on the resnet50 benchmark for eager. PiperOrigin-RevId: 175722984 --- tensorflow/contrib/eager/python/BUILD | 7 +- tensorflow/contrib/eager/python/datasets.py | 72 +++++++++++++++------ 2 files changed, 57 insertions(+), 22 deletions(-) diff --git a/tensorflow/contrib/eager/python/BUILD b/tensorflow/contrib/eager/python/BUILD index 6783f7beb08..92746b866af 100644 --- a/tensorflow/contrib/eager/python/BUILD +++ b/tensorflow/contrib/eager/python/BUILD @@ -50,21 +50,22 @@ py_library( srcs_version = "PY2AND3", visibility = ["//tensorflow:internal"], deps = [ + "//tensorflow/contrib/data/python/ops:prefetching_py", "//tensorflow/python:array_ops", "//tensorflow/python:dataset_ops_gen", "//tensorflow/python:errors", "//tensorflow/python:framework_ops", "//tensorflow/python:resource_variable_ops", + "//tensorflow/python/data/ops:iterator_ops", "//tensorflow/python/data/util:nest", "//tensorflow/python/eager:context", ], ) -py_test( +cuda_py_test( name = "datasets_test", srcs = ["datasets_test.py"], - srcs_version = "PY2AND3", - deps = [ + additional_deps = [ ":datasets", "//tensorflow/python:dtypes", "//tensorflow/python:framework_ops", diff --git a/tensorflow/contrib/eager/python/datasets.py b/tensorflow/contrib/eager/python/datasets.py index 98e6983658a..b559cce6b12 100644 --- a/tensorflow/contrib/eager/python/datasets.py +++ b/tensorflow/contrib/eager/python/datasets.py @@ -20,11 +20,15 @@ from __future__ import print_function import threading +from tensorflow.contrib.data.python.ops import prefetching_ops +from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.util import nest from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors +from tensorflow.python.framework import function from tensorflow.python.framework import ops -from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import resource_variable_ops @@ -32,12 +36,12 @@ _uid_counter = 0 _uid_lock = threading.Lock() -def _iterator_shared_name(): +def _generate_shared_name(prefix): with _uid_lock: global _uid_counter uid = _uid_counter _uid_counter += 1 - return "eager_iterator_{}".format(uid) + return "{}_{}".format(prefix, uid) class Iterator(object): @@ -72,11 +76,12 @@ class Iterator(object): with ops.device("/device:CPU:0"): ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access self._output_types = dataset.output_types + self._output_shapes = dataset.output_shapes self._flat_output_types = nest.flatten(dataset.output_types) self._flat_output_shapes = nest.flatten(dataset.output_shapes) self._resource = gen_dataset_ops.iterator( container="", - shared_name=_iterator_shared_name(), + shared_name=_generate_shared_name("eager_iterator"), output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) gen_dataset_ops.make_iterator(ds_variant, self._resource) @@ -84,6 +89,35 @@ class Iterator(object): self._resource_deleter = resource_variable_ops.EagerResourceDeleter( handle=self._resource, handle_device="/device:CPU:0") self._device = context.context().device_name + self._buffer_resource_handle = None + if not context.context().device_spec.device_type: + is_remote_device = False + else: + is_remote_device = context.context().device_spec.device_type != "CPU" + if is_remote_device: + with ops.device("/device:CPU:0"): + iter_string_handle = gen_dataset_ops.iterator_to_string_handle( + self._resource) + + @function.Defun(dtypes.string) + def remote_fn(h): + remote_iterator = iterator_ops.Iterator.from_string_handle( + h, self._output_types, self._output_shapes) + return remote_iterator.get_next() + + remote_fn.add_to_graph(None) + target = constant_op.constant("/device:CPU:0") + with ops.device(self._device): + self._buffer_resource_handle = prefetching_ops.function_buffering_resource( + string_arg=iter_string_handle, + f=remote_fn, + target_device=target, + buffer_size=10, + thread_pool_size=1, + container="", + shared_name=_generate_shared_name("function_buffer_resource")) + self._buffer_resource_deleter = resource_variable_ops.EagerResourceDeleter( + handle=self._buffer_resource_handle, handle_device=self._device) def __iter__(self): return self @@ -93,20 +127,20 @@ class Iterator(object): def next(self): """Return the next tf.Tensor from the dataset.""" - try: - # TODO(ashankar): Consider removing this ops.device() contextmanager - # and instead mimic ops placement in graphs: Operations on resource - # handles execute on the same device as where the resource is placed. - with ops.device("/device:CPU:0"): - ret = gen_dataset_ops.iterator_get_next( - self._resource, - output_types=self._flat_output_types, - output_shapes=self._flat_output_shapes) - except errors.OutOfRangeError: - raise StopIteration - # Copies tensors from CPU to the current device if necessary. - # TODO(rohanj): This should be replaced by the mechanism to have the - # runtime's threads copy tensors to the destination device. with ops.device(self._device): - ret = [array_ops.identity(x) for x in ret] + try: + if self._buffer_resource_handle is not None: + ret = prefetching_ops.function_buffering_resource_get_next( + function_buffer_resource=self._buffer_resource_handle, + output_types=self._flat_output_types) + else: + # TODO(ashankar): Consider removing this ops.device() contextmanager + # and instead mimic ops placement in graphs: Operations on resource + # handles execute on the same device as where the resource is placed. + ret = gen_dataset_ops.iterator_get_next( + self._resource, + output_types=self._flat_output_types, + output_shapes=self._flat_output_shapes) + except errors.OutOfRangeError: + raise StopIteration return nest.pack_sequence_as(self._output_types, ret) From f4025a66715a027592d45c435f95cdbe467608f1 Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Tue, 14 Nov 2017 13:09:50 -0800 Subject: [PATCH 017/104] Clear trace in C instead of python. PiperOrigin-RevId: 175723846 --- tensorflow/python/eager/pywrap_tensor.cc | 19 ++----------------- tensorflow/python/framework/ops.py | 5 ----- 2 files changed, 2 insertions(+), 22 deletions(-) diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc index 653f3ef84e3..91192fea62d 100644 --- a/tensorflow/python/eager/pywrap_tensor.cc +++ b/tensorflow/python/eager/pywrap_tensor.cc @@ -330,24 +330,9 @@ void EagerTensor_dealloc(EagerTensor* self) { // We have the global interpreter lock, so use this chance to perform delayed // refcount decrements. tensorflow::ClearDecrefCache(); - PyObject* id = PyLong_FromLongLong(self->id); - PyObject* func = PyObject_GetAttrString(reinterpret_cast(self), - "_delete_trace"); + auto id = self->id; Py_TYPE(self)->tp_free(self); - self = nullptr; - // Note that we run `func` after calling `tp_free`. Otherwise calling that - // function can potentially trigger garbage collection that observes `self` - // in this half deleted state and crashes. - // Note that `func` is a staticmethod and does not need `self` to be around - // for running. - // We clear (and later restore) any errors that have already been set. Else - // these erorrs may appear randomly as part of the function execution. - PyObject *a, *b, *c; - PyErr_Fetch(&a, &b, &c); - PyObject_CallFunctionObjArgs(func, id, nullptr); - PyErr_Restore(a, b, c); - Py_DECREF(func); - Py_DECREF(id); + TFE_Py_TapeStackDeleteTrace(id); } // Getter for `_id`. diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index b0abbfc7dcc..09e0a83c760 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -605,11 +605,6 @@ class Tensor(_TensorLike): class _EagerTensorBase(Tensor): """Base class for EagerTensor.""" - @staticmethod - def _delete_trace(tid): - """Helper function to be called by __del__ of the subclass.""" - tape.delete_trace(tid) - @property def dtype(self): # Note: using the intern table directly here as this is From 7ad948134fc7fa376c4cea909316561d6f98ef96 Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Tue, 14 Nov 2017 13:12:58 -0800 Subject: [PATCH 018/104] Do not create dtype objects in the eager hot path for execution, use enums PiperOrigin-RevId: 175724282 --- tensorflow/python/eager/execute.py | 8 ++++---- tensorflow/python/eager/python_eager_op_gen.cc | 5 ----- tensorflow/python/framework/constant_op.py | 4 ++-- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/tensorflow/python/eager/execute.py b/tensorflow/python/eager/execute.py index 0316f33d7e8..1b5f3f7f9d1 100644 --- a/tensorflow/python/eager/execute.py +++ b/tensorflow/python/eager/execute.py @@ -172,7 +172,7 @@ def args_to_matching_eager(l, ctx, default_dtype=None): if not isinstance(x, EagerTensor): break else: # note: intentional for-else - return l[0].dtype, l + return l[0]._datatype_enum(), l # pylint: disable=protected-access # TODO(josh11b): Could we do a better job if we also passed in the # allowed dtypes when that was known? @@ -196,7 +196,7 @@ def args_to_matching_eager(l, ctx, default_dtype=None): else: ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l] - return dtype, ret + return dtype.as_datatype_enum, ret def convert_to_mixed_eager_tensors(values, ctx): @@ -205,7 +205,7 @@ def convert_to_mixed_eager_tensors(values, ctx): t, context=ctx._handle, device=ctx.device_name) # pylint: disable=protected-access for t in values ] - types = [t.dtype for t in v] + types = [t._datatype_enum() for t in v] # pylint: disable=protected-access return types, v @@ -243,5 +243,5 @@ def args_to_mixed_eager_tensors(lists, ctx): for j in range(len(lists)): lists_ret[j].append( ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx)) - types.append(dtype) + types.append(dtype.as_datatype_enum) return types, lists_ret diff --git a/tensorflow/python/eager/python_eager_op_gen.cc b/tensorflow/python/eager/python_eager_op_gen.cc index e57488cb640..371df563bbb 100644 --- a/tensorflow/python/eager/python_eager_op_gen.cc +++ b/tensorflow/python/eager/python_eager_op_gen.cc @@ -589,8 +589,6 @@ void GenEagerPythonOp::AddEagerInferredAttrs() { strings::StrAppend(&result_, " ", VectorToTuple(p), " = ", inputs_var, "\n"); } - strings::StrAppend(&result_, " ", var_name, " = ", var_name, - ".as_datatype_enum\n"); } else if (attr.type() == "list(type)") { // NOTE: We ignore default values for these attrs, since it is // unclear how you would use it, and the one use case is @@ -617,9 +615,6 @@ void GenEagerPythonOp::AddEagerInferredAttrs() { } strings::StrAppend(&result_, " ", var_name, ", ", inputs_var, " = ", conversion, "(", inputs_var, ", _ctx)\n"); - strings::StrAppend(&result_, " ", var_name, - " = [_t.as_datatype_enum for _t in ", var_name, - "]\n"); } } } diff --git a/tensorflow/python/framework/constant_op.py b/tensorflow/python/framework/constant_op.py index d51e142da19..bf3be34d851 100644 --- a/tensorflow/python/framework/constant_op.py +++ b/tensorflow/python/framework/constant_op.py @@ -55,10 +55,10 @@ from tensorflow.python.framework import tensor_util def _eager_reshape(tensor, shape, ctx): """Eager-only version of Reshape op; requires tensor is an eager Tensor.""" - attr_t = tensor.dtype.as_datatype_enum + attr_t = tensor._datatype_enum() # pylint: disable=protected-access attr_tshape, (shape,) = execute.args_to_matching_eager( [shape], ctx, dtypes.int32) - attr_tshape = attr_tshape.as_datatype_enum + attr_tshape = attr_tshape inputs_flat = [tensor, shape] attrs = ("T", attr_t, "Tshape", attr_tshape) result, = execute.execute( From 144eaa8e273da43b7ca881d7dcac98b65f698f11 Mon Sep 17 00:00:00 2001 From: Anjali Sridhar Date: Tue, 14 Nov 2017 13:29:14 -0800 Subject: [PATCH 019/104] Update tf.keras Dataset, Engine and Layers to the Keras 2.0.9 API. PiperOrigin-RevId: 175726451 --- tensorflow/contrib/cmake/tf_python.cmake | 1 + tensorflow/python/keras/BUILD | 2 + .../python/keras/_impl/keras/callbacks.py | 15 ++-- .../keras/_impl/keras/callbacks_test.py | 12 ++-- .../keras/_impl/keras/datasets/__init__.py | 5 +- .../_impl/keras/datasets/boston_housing.py | 7 +- .../keras/_impl/keras/datasets/cifar10.py | 2 +- .../keras/_impl/keras/datasets/cifar100.py | 2 +- .../_impl/keras/datasets/fashion_mnist.py | 59 ++++++++++++++++ .../python/keras/_impl/keras/datasets/imdb.py | 27 ++++---- .../keras/_impl/keras/datasets/mnist.py | 4 +- .../keras/_impl/keras/datasets/reuters.py | 14 ++-- .../keras/_impl/keras/engine/topology.py | 25 +++++++ .../keras/_impl/keras/engine/training.py | 68 ++++++++++++++----- .../keras/_impl/keras/engine/training_test.py | 13 ++++ .../keras/_impl/keras/layers/convolutional.py | 58 ++++++++++------ .../keras/layers/convolutional_recurrent.py | 10 +-- .../python/keras/_impl/keras/layers/core.py | 29 ++++---- .../keras/_impl/keras/layers/core_test.py | 6 ++ .../python/keras/_impl/keras/layers/merge.py | 33 ++++++++- .../keras/_impl/keras/layers/merge_test.py | 14 ++++ .../keras/_impl/keras/layers/pooling.py | 4 +- .../keras/_impl/keras/layers/wrappers.py | 4 -- .../keras/_impl/keras/utils/io_utils.py | 2 +- .../keras/_impl/keras/utils/layer_utils.py | 40 +++++++++-- tensorflow/python/keras/datasets/__init__.py | 1 + .../keras/datasets/fashion_mnist/__init__.py | 0 tensorflow/python/layers/core.py | 12 +++- tensorflow/python/layers/core_test.py | 10 +++ .../api/golden/tensorflow.keras.-model.pbtxt | 2 +- ...sorflow.keras.datasets.fashion_mnist.pbtxt | 3 + .../golden/tensorflow.keras.datasets.pbtxt | 4 ++ ...orflow.keras.layers.-conv-l-s-t-m2-d.pbtxt | 2 +- ...flow.keras.layers.-separable-conv2-d.pbtxt | 2 +- ...ras.layers.-separable-convolution2-d.pbtxt | 2 +- ...rflow.keras.layers.-time-distributed.pbtxt | 4 -- .../tensorflow.keras.layers.-wrapper.pbtxt | 4 -- .../tensorflow.keras.models.-model.pbtxt | 2 +- 38 files changed, 381 insertions(+), 123 deletions(-) create mode 100644 tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py create mode 100644 tensorflow/python/keras/datasets/fashion_mnist/__init__.py create mode 100644 tensorflow/tools/api/golden/tensorflow.keras.datasets.fashion_mnist.pbtxt diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake index 9517aa49637..9b863f7bc61 100755 --- a/tensorflow/contrib/cmake/tf_python.cmake +++ b/tensorflow/contrib/cmake/tf_python.cmake @@ -238,6 +238,7 @@ add_python_module("tensorflow/python/keras/datasets") add_python_module("tensorflow/python/keras/datasets/boston_housing") add_python_module("tensorflow/python/keras/datasets/cifar10") add_python_module("tensorflow/python/keras/datasets/cifar100") +add_python_module("tensorflow/python/keras/datasets/fashion_mnist") add_python_module("tensorflow/python/keras/datasets/imdb") add_python_module("tensorflow/python/keras/datasets/mnist") add_python_module("tensorflow/python/keras/datasets/reuters") diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD index 55b5d7ff613..a46a92cd0c7 100644 --- a/tensorflow/python/keras/BUILD +++ b/tensorflow/python/keras/BUILD @@ -30,6 +30,7 @@ py_library( "_impl/keras/datasets/cifar.py", "_impl/keras/datasets/cifar10.py", "_impl/keras/datasets/cifar100.py", + "_impl/keras/datasets/fashion_mnist.py", "_impl/keras/datasets/imdb.py", "_impl/keras/datasets/mnist.py", "_impl/keras/datasets/reuters.py", @@ -89,6 +90,7 @@ py_library( "datasets/boston_housing/__init__.py", "datasets/cifar10/__init__.py", "datasets/cifar100/__init__.py", + "datasets/fashion_mnist/__init__.py", "datasets/imdb/__init__.py", "datasets/mnist/__init__.py", "datasets/reuters/__init__.py", diff --git a/tensorflow/python/keras/_impl/keras/callbacks.py b/tensorflow/python/keras/_impl/keras/callbacks.py index eb678c4d1d9..40a996a03f7 100644 --- a/tensorflow/python/keras/_impl/keras/callbacks.py +++ b/tensorflow/python/keras/_impl/keras/callbacks.py @@ -265,7 +265,7 @@ class ProgbarLogger(Callback): Arguments: count_mode: One of "steps" or "samples". Whether the progress bar should - count samples seens or steps (batches) seen. + count samples seen or steps (batches) seen. Raises: ValueError: In case of invalid `count_mode`. @@ -417,7 +417,7 @@ class ModelCheckpoint(Callback): self.epochs_since_last_save += 1 if self.epochs_since_last_save >= self.period: self.epochs_since_last_save = 0 - filepath = self.filepath.format(epoch=epoch, **logs) + filepath = self.filepath.format(epoch=epoch + 1, **logs) if self.save_best_only: current = logs.get(self.monitor) if current is None: @@ -427,7 +427,7 @@ class ModelCheckpoint(Callback): if self.monitor_op(current, self.best): if self.verbose > 0: print('Epoch %05d: %s improved from %0.5f to %0.5f,' - ' saving model to %s' % (epoch, self.monitor, self.best, + ' saving model to %s' % (epoch + 1, self.monitor, self.best, current, filepath)) self.best = current if self.save_weights_only: @@ -436,10 +436,11 @@ class ModelCheckpoint(Callback): self.model.save(filepath, overwrite=True) else: if self.verbose > 0: - print('Epoch %05d: %s did not improve' % (epoch, self.monitor)) + print('Epoch %05d: %s did not improve' % (epoch + 1, + self.monitor)) else: if self.verbose > 0: - print('Epoch %05d: saving model to %s' % (epoch, filepath)) + print('Epoch %05d: saving model to %s' % (epoch + 1, filepath)) if self.save_weights_only: self.model.save_weights(filepath, overwrite=True) else: @@ -519,14 +520,14 @@ class EarlyStopping(Callback): self.best = current self.wait = 0 else: + self.wait += 1 if self.wait >= self.patience: self.stopped_epoch = epoch self.model.stop_training = True - self.wait += 1 def on_train_end(self, logs=None): if self.stopped_epoch > 0 and self.verbose > 0: - print('Epoch %05d: early stopping' % (self.stopped_epoch)) + print('Epoch %05d: early stopping' % (self.stopped_epoch + 1)) class RemoteMonitor(Callback): diff --git a/tensorflow/python/keras/_impl/keras/callbacks_test.py b/tensorflow/python/keras/_impl/keras/callbacks_test.py index d9d7fb5a9fb..9f578a0fab3 100644 --- a/tensorflow/python/keras/_impl/keras/callbacks_test.py +++ b/tensorflow/python/keras/_impl/keras/callbacks_test.py @@ -203,12 +203,12 @@ class KerasCallbacksTest(test.TestCase): callbacks=cbks, epochs=4, verbose=1) - assert os.path.exists(filepath.format(epoch=1)) - assert os.path.exists(filepath.format(epoch=3)) - os.remove(filepath.format(epoch=1)) - os.remove(filepath.format(epoch=3)) - assert not os.path.exists(filepath.format(epoch=0)) - assert not os.path.exists(filepath.format(epoch=2)) + assert os.path.exists(filepath.format(epoch=2)) + assert os.path.exists(filepath.format(epoch=4)) + os.remove(filepath.format(epoch=2)) + os.remove(filepath.format(epoch=4)) + assert not os.path.exists(filepath.format(epoch=1)) + assert not os.path.exists(filepath.format(epoch=3)) # Invalid use: this will raise a warning but not an Exception. keras.callbacks.ModelCheckpoint( diff --git a/tensorflow/python/keras/_impl/keras/datasets/__init__.py b/tensorflow/python/keras/_impl/keras/datasets/__init__.py index 22afb6a5534..60db3766fbc 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/__init__.py +++ b/tensorflow/python/keras/_impl/keras/datasets/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================== """Keras datasets: utilities for downloading and pre-processing common datasets. + """ from __future__ import absolute_import from __future__ import division @@ -21,7 +22,7 @@ from __future__ import print_function from tensorflow.python.keras._impl.keras.datasets import boston_housing from tensorflow.python.keras._impl.keras.datasets import cifar10 from tensorflow.python.keras._impl.keras.datasets import cifar100 +from tensorflow.python.keras._impl.keras.datasets import fashion_mnist from tensorflow.python.keras._impl.keras.datasets import imdb from tensorflow.python.keras._impl.keras.datasets import mnist from tensorflow.python.keras._impl.keras.datasets import reuters - diff --git a/tensorflow/python/keras/_impl/keras/datasets/boston_housing.py b/tensorflow/python/keras/_impl/keras/datasets/boston_housing.py index e4f7fb9d212..4359be89280 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/boston_housing.py +++ b/tensorflow/python/keras/_impl/keras/datasets/boston_housing.py @@ -48,9 +48,10 @@ def load_data(path='boston_housing.npz', seed=113, test_split=0.2): f.close() np.random.seed(seed) - np.random.shuffle(x) - np.random.seed(seed) - np.random.shuffle(y) + indices = np.arrange(len(x)) + np.random.shuffle(indices) + x = x[indices] + y = y[indices] x_train = np.array(x[:int(len(x) * (1 - test_split))]) y_train = np.array(y[:int(len(x) * (1 - test_split))]) diff --git a/tensorflow/python/keras/_impl/keras/datasets/cifar10.py b/tensorflow/python/keras/_impl/keras/datasets/cifar10.py index 672249ff20f..4a687890158 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/cifar10.py +++ b/tensorflow/python/keras/_impl/keras/datasets/cifar10.py @@ -34,7 +34,7 @@ def load_data(): Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ dirname = 'cifar-10-batches-py' - origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' + origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' path = get_file(dirname, origin=origin, untar=True) num_train_samples = 50000 diff --git a/tensorflow/python/keras/_impl/keras/datasets/cifar100.py b/tensorflow/python/keras/_impl/keras/datasets/cifar100.py index 1be7483d273..b69c0724c58 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/cifar100.py +++ b/tensorflow/python/keras/_impl/keras/datasets/cifar100.py @@ -43,7 +43,7 @@ def load_data(label_mode='fine'): raise ValueError('label_mode must be one of "fine" "coarse".') dirname = 'cifar-100-python' - origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' + origin = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' path = get_file(dirname, origin=origin, untar=True) fpath = os.path.join(path, 'train') diff --git a/tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py b/tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py new file mode 100644 index 00000000000..17be684e4f8 --- /dev/null +++ b/tensorflow/python/keras/_impl/keras/datasets/fashion_mnist.py @@ -0,0 +1,59 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Fashion-MNIST dataset. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gzip +import os +import numpy as np +from tensorflow.python.keras._impl.keras.utils.data_utils import get_file + + +def load_data(): + """Loads the Fashion-MNIST dataset. + + Returns: + Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. + """ + dirname = os.path.join('datasets', 'fashion-mnist') + base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' + files = [ + 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', + 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz' + ] + + paths = [] + for given_file in files: + paths.append( + get_file(given_file, origin=base + given_file, cache_subdir=dirname)) + + with gzip.open(paths[0], 'rb') as lbpath: + y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8) + + with gzip.open(paths[1], 'rb') as imgpath: + x_train = np.frombuffer( + imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) + + with gzip.open(paths[2], 'rb') as lbpath: + y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8) + + with gzip.open(paths[3], 'rb') as imgpath: + x_test = np.frombuffer( + imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28) + + return (x_train, y_train), (x_test, y_test) diff --git a/tensorflow/python/keras/_impl/keras/datasets/imdb.py b/tensorflow/python/keras/_impl/keras/datasets/imdb.py index 0db9d61f6d5..0e83473899c 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/imdb.py +++ b/tensorflow/python/keras/_impl/keras/datasets/imdb.py @@ -1,4 +1,4 @@ -# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,23 +65,24 @@ def load_data(path='imdb.npz', have simply been skipped. """ path = get_file( - path, origin='https://s3.amazonaws.com/text-datasets/imdb.npz') + path, + origin='https://s3.amazonaws.com/text-datasets/imdb.npz', + file_hash='599dadb1135973df5b59232a0e9a887c') f = np.load(path) - x_train = f['x_train'] - labels_train = f['y_train'] - x_test = f['x_test'] - labels_test = f['y_test'] + x_train, labels_train = f['x_train'], f['y_train'] + x_test, labels_test = f['x_test'], f['y_test'] f.close() np.random.seed(seed) - np.random.shuffle(x_train) - np.random.seed(seed) - np.random.shuffle(labels_train) + indices = np.arrange(len(x_train)) + np.random.shuffle(indices) + x_train = x_train[indices] + labels_train = labels_train[indices] - np.random.seed(seed * 2) - np.random.shuffle(x_test) - np.random.seed(seed * 2) - np.random.shuffle(labels_test) + indices = np.arrange(len(x_test)) + np.random.shuffle(indices) + x_test = x_test[indices] + labels_test = labels_test[indices] xs = np.concatenate([x_train, x_test]) labels = np.concatenate([labels_train, labels_test]) diff --git a/tensorflow/python/keras/_impl/keras/datasets/mnist.py b/tensorflow/python/keras/_impl/keras/datasets/mnist.py index 02be5e2a407..e98f29537f4 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/mnist.py +++ b/tensorflow/python/keras/_impl/keras/datasets/mnist.py @@ -34,7 +34,9 @@ def load_data(path='mnist.npz'): Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. """ path = get_file( - path, origin='https://s3.amazonaws.com/img-datasets/mnist.npz') + path, + origin='https://s3.amazonaws.com/img-datasets/mnist.npz', + file_hash='8a61469f7ea1b51cbae51d4f78837e45') f = np.load(path) x_train = f['x_train'] y_train = f['y_train'] diff --git a/tensorflow/python/keras/_impl/keras/datasets/reuters.py b/tensorflow/python/keras/_impl/keras/datasets/reuters.py index c36bac5cc7d..d05eb0ef8ca 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/reuters.py +++ b/tensorflow/python/keras/_impl/keras/datasets/reuters.py @@ -64,15 +64,20 @@ def load_data(path='reuters.npz', have simply been skipped. """ path = get_file( - path, origin='https://s3.amazonaws.com/text-datasets/reuters.npz') + path, + origin='https://s3.amazonaws.com/text-datasets/reuters.npz', + file_hash='87aedbeb0cb229e378797a632c1997b6') npzfile = np.load(path) xs = npzfile['x'] labels = npzfile['y'] npzfile.close() np.random.seed(seed) - np.random.shuffle(xs) - np.random.seed(seed) + indices = np.arrange(len(xs)) + np.random.shuffle(indices) + xs = xs[indices] + labels = labels[indices] + np.random.shuffle(labels) if start_char is not None: @@ -129,7 +134,8 @@ def get_word_index(path='reuters_word_index.json'): """ path = get_file( path, - origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json') + origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json', + file_hash='4d44cc38712099c9e383dc6e5f11a921') f = open(path) data = json.load(f) f.close() diff --git a/tensorflow/python/keras/_impl/keras/engine/topology.py b/tensorflow/python/keras/_impl/keras/engine/topology.py index 1b7ddef9c45..814961bd1d4 100644 --- a/tensorflow/python/keras/_impl/keras/engine/topology.py +++ b/tensorflow/python/keras/_impl/keras/engine/topology.py @@ -1422,6 +1422,31 @@ def preprocess_weights_for_loading(layer, weights[0] = np.transpose(weights[0], (3, 2, 0, 1)) if layer.__class__.__name__ == 'ConvLSTM2D': weights[1] = np.transpose(weights[1], (3, 2, 0, 1)) + + # convert the weights of CuDNNLSTM so that they could be loaded into LSTM + if layer.__class__.__name__ == 'LSTM': + # determine if we're loading a CuDNNLSTM layer from the number of bias + # weights: + # CuDNNLSTM has (units * 8) weights; while LSTM has (units * 4) + units = weights[1].shape[0] + bias = weights[2] + if len(bias) == units * 8: + # reshape the kernels + kernels = np.split(weights[0], 4, axis=1) + kernels = [ + kernel.reshape(-1).reshape(kernel.shape, order='F') + for kernel in kernels + ] + weights[0] = np.concatenate(kernels, axis=1) + + # transpose the recurrent kernels + recurrent_kernels = np.split(weights[1], 4, axis=1) + recurrent_kernels = [kernel.T for kernel in recurrent_kernels] + weights[1] = np.concatenate(recurrent_kernels, axis=1) + + # split the bias into half and merge + weights[2] = bias[:units * 4] + bias[units * 4:] + return weights diff --git a/tensorflow/python/keras/_impl/keras/engine/training.py b/tensorflow/python/keras/_impl/keras/engine/training.py index b1e48439ba0..e6d29c49684 100644 --- a/tensorflow/python/keras/_impl/keras/engine/training.py +++ b/tensorflow/python/keras/_impl/keras/engine/training.py @@ -71,6 +71,9 @@ def _standardize_input_data(data, if data is None: return [None for _ in range(len(names))] if isinstance(data, dict): + for key, value in data.items(): + if value.__class__.__name__ == 'DataFrame': + data[key] = value.values arrays = [] for name in names: if name not in data: @@ -78,6 +81,9 @@ def _standardize_input_data(data, '". Need data for each key in: ' + str(names)) arrays.append(data[name]) elif isinstance(data, list): + for key, value in enumerate(data): + if value.__class__.__name__ == 'DataFrame': + data[key] = value.values if len(data) != len(names): if data and hasattr(data[0], 'shape'): raise ValueError( @@ -100,6 +106,9 @@ def _standardize_input_data(data, ' Numpy arrays instead. ' 'The list you passed was: ' + str(data)[:200]) arrays = data + elif data.__class__.__name__ == 'DataFrame': + # test if data is a DataFrame, without pandas installed + data = data.values else: if not hasattr(data, 'shape'): raise TypeError('Error when checking model ' + exception_prefix + @@ -367,7 +376,7 @@ def _make_batches(size, batch_size): """ num_batches = int(np.ceil(size / float(batch_size))) return [(i * batch_size, min(size, (i + 1) * batch_size)) - for i in range(0, num_batches)] + for i in range(num_batches)] def _slice_arrays(arrays, start=None, stop=None): @@ -627,6 +636,7 @@ class Model(Network): self.sample_weight_mode = sample_weight_mode self.loss = loss self.loss_weights = loss_weights + self.sample_weight_mode = sample_weight_mode # Prepare loss functions. if isinstance(loss, dict): @@ -936,9 +946,28 @@ class Model(Network): trainable_weights = self.trainable_weights self._collected_trainable_weights = trainable_weights + def _check_trainable_weights_consistency(self): + """Check trainable weights count consistency. + + This will raise a warning if `trainable_weights` and + `_collected_trainable_weights` are consistent (i.e. have the same + number of parameters). + Inconsistency will typically arise when one modifies `model.trainable` + without calling `model.compile` again. + """ + if not hasattr(self, '_collected_trainable_weights'): + return + + if len(self.trainable_weights) != len(self._collected_trainable_weights): + logging.warning( + 'Discrepancy between trainable weights and collected trainable' + ' weights, did you set `model.trainable` without calling' + ' `model.compile` after ?') + def _make_train_function(self): if not hasattr(self, 'train_function'): raise RuntimeError('You must compile your model before using it.') + self._check_trainable_weights_consistency() if self.train_function is None: inputs = (self._feed_inputs + self._feed_targets + @@ -1258,7 +1287,7 @@ class Model(Network): for i, batch_out in enumerate(batch_outs): unconcatenated_outs[i].append(batch_out) if verbose == 1: - progbar.update(step) + progbar.update(step + 1) if len(unconcatenated_outs) == 1: return np.concatenate(unconcatenated_outs[0], axis=0) return [ @@ -1313,9 +1342,13 @@ class Model(Network): """ num_samples = self._check_num_samples(ins, batch_size, steps, 'steps') outs = [] - if steps is not None: - if verbose == 1: + + if verbose == 1: + if steps is not None: progbar = Progbar(target=steps) + else: + progbar = Progbar(target=num_samples) + if steps is not None: for step in range(steps): batch_outs = f(ins) if isinstance(batch_outs, list): @@ -1329,7 +1362,7 @@ class Model(Network): outs.append(0.) outs[0] += batch_outs if verbose == 1: - progbar.update(step) + progbar.update(step + 1) for i in range(len(outs)): outs[i] /= steps else: @@ -1456,11 +1489,13 @@ class Model(Network): If all inputs in the model are named, you can also pass a dictionary mapping input names to Numpy arrays. + Can be `None` (default) if feeding from framework-native tensors. y: Numpy array of target data, or list of Numpy arrays if the model has multiple outputs. If all outputs in the model are named, you can also pass a dictionary mapping output names to Numpy arrays. + Can be `None` (default) if feeding from framework-native tensors. batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, it will default to 32. @@ -1621,8 +1656,8 @@ class Model(Network): validation_steps=validation_steps) def evaluate(self, - x, - y, + x=None, + y=None, batch_size=None, verbose=1, sample_weight=None, @@ -1637,11 +1672,13 @@ class Model(Network): If all inputs in the model are named, you can also pass a dictionary mapping input names to Numpy arrays. + Can be `None` (default) if feeding from framework-native tensors. y: Numpy array of target data, or list of Numpy arrays if the model has multiple outputs. If all outputs in the model are named, you can also pass a dictionary mapping output names to Numpy arrays. + Can be `None` (default) if feeding from framework-native tensors. batch_size: Integer. If unspecified, it will default to 32. verbose: Verbosity mode, 0 or 1. sample_weight: Array of weights to weight the contribution @@ -1877,8 +1914,7 @@ class Model(Network): Arguments: generator: A generator or an instance of Sequence (keras.utils.Sequence) - object in order to avoid duplicate data - when using multiprocessing. + object in order to avoid duplicate data when using multiprocessing. The output of the generator must be either - a tuple (inputs, targets) - a tuple (inputs, targets, sample_weights). @@ -1889,7 +1925,7 @@ class Model(Network): steps_per_epoch: Total number of steps (batches of samples) to yield from `generator` before declaring one epoch finished and starting the next epoch. It should typically - be equal to the number of unique samples if your dataset + be equal to the number of unique samples of your dataset divided by the batch size. epochs: Integer, total number of iterations on the data. verbose: Verbosity mode, 0, 1, or 2. @@ -1913,9 +1949,9 @@ class Model(Network): non picklable arguments to the generator as they can't be passed easily to children processes. - shuffle: Whether to shuffle the data at the beginning of each - epoch. Only used with instances of `Sequence` ( - keras.utils.Sequence). + shuffle: Whether to shuffle the order of the batches at the + beginning of each epoch. Only used with instances + of `Sequence` (keras.utils.Sequence). initial_epoch: Epoch at which to start training (useful for resuming a previous training run) **kwargs: support for legacy arguments. @@ -1944,7 +1980,7 @@ class Model(Network): ValueError: In case the generator yields data in an invalid format. """ - # Legacy support + # Legacy support if 'max_q_size' in kwargs: max_queue_size = kwargs.pop('max_q_size') logging.warning('The argument `max_q_size` has been renamed ' @@ -2142,8 +2178,8 @@ class Model(Network): generator: Generator yielding tuples (inputs, targets) or (inputs, targets, sample_weights) or an instance of Sequence (keras.utils.Sequence) - object in order to avoid duplicate data - when using multiprocessing. + object in order to avoid duplicate data + when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. max_queue_size: maximum size for the generator queue diff --git a/tensorflow/python/keras/_impl/keras/engine/training_test.py b/tensorflow/python/keras/_impl/keras/engine/training_test.py index bc9ad6693e5..e2a06e8e778 100644 --- a/tensorflow/python/keras/_impl/keras/engine/training_test.py +++ b/tensorflow/python/keras/_impl/keras/engine/training_test.py @@ -640,6 +640,19 @@ class LossMaskingTest(test.TestCase): class TestDynamicTrainability(test.TestCase): + def test_trainable_warning(self): + with self.test_session(): + x = np.random.random((5, 3)) + y = np.random.random((5, 2)) + + model = keras.models.Sequential() + model.add(keras.layers.Dense(2, input_dim=3)) + model.trainable = False + model.compile('rmsprop', 'mse') + model.trainable = True + model.train_on_batch(x, y) + self.assertRaises(Warning) + def test_trainable_argument(self): with self.test_session(): x = np.random.random((5, 3)) diff --git a/tensorflow/python/keras/_impl/keras/layers/convolutional.py b/tensorflow/python/keras/_impl/keras/layers/convolutional.py index ce96bc66f7c..1cbae912631 100644 --- a/tensorflow/python/keras/_impl/keras/layers/convolutional.py +++ b/tensorflow/python/keras/_impl/keras/layers/convolutional.py @@ -793,6 +793,7 @@ class SeparableConv2D(tf_convolutional_layers.SeparableConv2D, Layer): strides=(1, 1), padding='valid', data_format=None, + dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, @@ -815,6 +816,7 @@ class SeparableConv2D(tf_convolutional_layers.SeparableConv2D, Layer): strides=strides, padding=padding, data_format=data_format, + dilation_rate=dilation_rate, activation=activations.get(activation), use_bias=use_bias, depthwise_initializer=initializers.get(depthwise_initializer), @@ -831,30 +833,42 @@ class SeparableConv2D(tf_convolutional_layers.SeparableConv2D, Layer): def get_config(self): config = { - 'filters': self.filters, - 'kernel_size': self.kernel_size, - 'strides': self.strides, - 'padding': self.padding, - 'data_format': self.data_format, - 'activation': activations.serialize(self.activation), - 'use_bias': self.use_bias, - 'depthwise_initializer': initializers.serialize( - self.depthwise_initializer), - 'pointwise_initializer': initializers.serialize( - self.pointwise_initializer), - 'bias_initializer': initializers.serialize(self.bias_initializer), - 'depthwise_regularizer': regularizers.serialize( - self.depthwise_regularizer), - 'pointwise_regularizer': regularizers.serialize( - self.pointwise_regularizer), - 'bias_regularizer': regularizers.serialize(self.bias_regularizer), + 'filters': + self.filters, + 'kernel_size': + self.kernel_size, + 'strides': + self.strides, + 'padding': + self.padding, + 'data_format': + self.data_format, + 'dilation_rate': + self.dilation_rate, + 'activation': + activations.serialize(self.activation), + 'use_bias': + self.use_bias, + 'depthwise_initializer': + initializers.serialize(self.depthwise_initializer), + 'pointwise_initializer': + initializers.serialize(self.pointwise_initializer), + 'bias_initializer': + initializers.serialize(self.bias_initializer), + 'depthwise_regularizer': + regularizers.serialize(self.depthwise_regularizer), + 'pointwise_regularizer': + regularizers.serialize(self.pointwise_regularizer), + 'bias_regularizer': + regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), - 'depthwise_constraint': constraints.serialize( - self.depthwise_constraint), - 'pointwise_constraint': constraints.serialize( - self.pointwise_constraint), - 'bias_constraint': constraints.serialize(self.bias_constraint) + 'depthwise_constraint': + constraints.serialize(self.depthwise_constraint), + 'pointwise_constraint': + constraints.serialize(self.pointwise_constraint), + 'bias_constraint': + constraints.serialize(self.bias_constraint) } base_config = super(SeparableConv2D, self).get_config() return dict(list(base_config.items()) + list(config.items())) diff --git a/tensorflow/python/keras/_impl/keras/layers/convolutional_recurrent.py b/tensorflow/python/keras/_impl/keras/layers/convolutional_recurrent.py index 2335bd4df02..c88122ce188 100644 --- a/tensorflow/python/keras/_impl/keras/layers/convolutional_recurrent.py +++ b/tensorflow/python/keras/_impl/keras/layers/convolutional_recurrent.py @@ -536,7 +536,7 @@ class ConvLSTM2D(ConvRecurrent2D): conv_out = K.bias_add(conv_out, b, data_format=self.data_format) return conv_out - def reccurent_conv(self, x, w): + def recurrent_conv(self, x, w): conv_out = K.conv2d( x, w, strides=(1, 1), padding='same', data_format=self.data_format) return conv_out @@ -556,10 +556,10 @@ class ConvLSTM2D(ConvRecurrent2D): inputs * dp_mask[2], self.kernel_c, self.bias_c, padding=self.padding) x_o = self.input_conv( inputs * dp_mask[3], self.kernel_o, self.bias_o, padding=self.padding) - h_i = self.reccurent_conv(h_tm1 * rec_dp_mask[0], self.recurrent_kernel_i) - h_f = self.reccurent_conv(h_tm1 * rec_dp_mask[1], self.recurrent_kernel_f) - h_c = self.reccurent_conv(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c) - h_o = self.reccurent_conv(h_tm1 * rec_dp_mask[3], self.recurrent_kernel_o) + h_i = self.recurrent_conv(h_tm1 * rec_dp_mask[0], self.recurrent_kernel_i) + h_f = self.recurrent_conv(h_tm1 * rec_dp_mask[1], self.recurrent_kernel_f) + h_c = self.recurrent_conv(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c) + h_o = self.recurrent_conv(h_tm1 * rec_dp_mask[3], self.recurrent_kernel_o) i = self.recurrent_activation(x_i + h_i) f = self.recurrent_activation(x_f + h_f) diff --git a/tensorflow/python/keras/_impl/keras/layers/core.py b/tensorflow/python/keras/_impl/keras/layers/core.py index b2e0e7b8eeb..517129fab05 100644 --- a/tensorflow/python/keras/_impl/keras/layers/core.py +++ b/tensorflow/python/keras/_impl/keras/layers/core.py @@ -52,7 +52,7 @@ class Masking(Layer): Example: Consider a Numpy data array `x` of shape `(samples, timesteps, features)`, - to be fed to a LSTM layer. + to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you lack data for these timesteps. You can: @@ -121,7 +121,11 @@ class Dropout(tf_core_layers.Dropout, Layer): return output def get_config(self): - config = {'rate': self.rate} + config = { + 'rate': self.rate, + 'noise_shape': self.noise_shape, + 'seed': self.seed + } base_config = super(Dropout, self).get_config() return dict(list(base_config.items()) + list(config.items())) @@ -383,20 +387,18 @@ class Reshape(Layer): def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() - output_shape = [input_shape[0]] - output_shape += self._fix_unknown_dimension(input_shape[1:], - self.target_shape) + if None in input_shape[1:]: + output_shape = [input_shape[0]] + # input shape (partially) unknown? replace -1's with None's + output_shape += tuple(s if s != -1 else None for s in self.target_shape) + else: + output_shape = [input_shape[0]] + output_shape += self._fix_unknown_dimension(input_shape[1:], + self.target_shape) return tensor_shape.TensorShape(output_shape) def call(self, inputs): - # In case the target shape is not fully defined, - # we need access to the shape of x. - target_shape = self.target_shape - if -1 in target_shape: - # target shape not fully defined - target_shape = self._compute_output_shape(inputs.get_shape()) - target_shape = target_shape.as_list()[1:] - return K.reshape(inputs, (-1,) + tuple(target_shape)) + return K.reshape(inputs, (K.shape(inputs)[0],) + self.target_shape) def get_config(self): config = {'target_shape': self.target_shape} @@ -595,6 +597,7 @@ class Lambda(Layer): @classmethod def from_config(cls, config, custom_objects=None): + config = config.copy() globs = globals() if custom_objects: globs = dict(list(globs.items()) + list(custom_objects.items())) diff --git a/tensorflow/python/keras/_impl/keras/layers/core_test.py b/tensorflow/python/keras/_impl/keras/layers/core_test.py index 9cdebd375c8..dd768dc268e 100644 --- a/tensorflow/python/keras/_impl/keras/layers/core_test.py +++ b/tensorflow/python/keras/_impl/keras/layers/core_test.py @@ -111,6 +111,12 @@ class CoreLayersTest(test.TestCase): kwargs={'target_shape': (1, -1)}, input_shape=(3, 2, 4)) + with self.test_session(): + testing_utils.layer_test( + keras.layers.Reshape, + kwargs={'target_shape': (-1, 1)}, + input_shape=(None, None, 2)) + def test_permute(self): with self.test_session(): testing_utils.layer_test( diff --git a/tensorflow/python/keras/_impl/keras/layers/merge.py b/tensorflow/python/keras/_impl/keras/layers/merge.py index 84b65d87c2f..5f26ce44e39 100644 --- a/tensorflow/python/keras/_impl/keras/layers/merge.py +++ b/tensorflow/python/keras/_impl/keras/layers/merge.py @@ -299,6 +299,21 @@ class Maximum(_Merge): return output +class Minimum(_Merge): + """Layer that computes the minimum (element-wise) a list of inputs. + + It takes as input a list of tensors, + all of the same shape, and returns + a single tensor (also of the same shape). + """ + + def _merge_function(self, inputs): + output = inputs[0] + for i in range(1, len(inputs)): + output = K.minimum(output, inputs[i]) + return output + + class Concatenate(_Merge): """Layer that concatenates a list of inputs. @@ -375,9 +390,8 @@ class Concatenate(_Merge): masks = [] for input_i, mask_i in zip(inputs, mask): if mask_i is None: - # Input is unmasked. Append all 1s to masks, - # but cast it to bool first - masks.append(K.cast(K.ones_like(input_i), 'bool')) + # Input is unmasked. Append all 1s to masks + masks.append(K.ones_like(input_i, dtype='bool')) elif K.ndim(mask_i) < K.ndim(input_i): # Mask is smaller than the input, expand it masks.append(K.expand_dims(mask_i)) @@ -584,6 +598,19 @@ def maximum(inputs, **kwargs): return Maximum(**kwargs)(inputs) +def minimum(inputs, **kwargs): + """Functional interface to the `Minimum` layer. + + Arguments: + inputs: A list of input tensors (at least 2). + **kwargs: Standard layer keyword arguments. + + Returns: + A tensor, the element-wise minimum of the inputs. + """ + return Minimum(**kwargs)(inputs) + + def concatenate(inputs, axis=-1, **kwargs): """Functional interface to the `Concatenate` layer. diff --git a/tensorflow/python/keras/_impl/keras/layers/merge_test.py b/tensorflow/python/keras/_impl/keras/layers/merge_test.py index a5746582791..1f34c367e4b 100644 --- a/tensorflow/python/keras/_impl/keras/layers/merge_test.py +++ b/tensorflow/python/keras/_impl/keras/layers/merge_test.py @@ -116,6 +116,20 @@ class MergeLayersTest(test.TestCase): self.assertEqual(out.shape, (2, 4, 5)) self.assertAllClose(out, np.maximum(x1, x2), atol=1e-4) + def test_merge_minimum(self): + with self.test_session(): + i1 = keras.layers.Input(shape=(4, 5)) + i2 = keras.layers.Input(shape=(4, 5)) + o = keras.layers.minimum([i1, i2]) + self.assertListEqual(o.get_shape().as_list(), [None, 4, 5]) + model = keras.models.Model([i1, i2], o) + + x1 = np.random.random((2, 4, 5)) + x2 = np.random.random((2, 4, 5)) + out = model.predict([x1, x2]) + self.assertEqual(out.shape, (2, 4, 5)) + self.assertAllClose(out, np.minimum(x1, x2), atol=1e-4) + def test_merge_concatenate(self): with self.test_session(): i1 = keras.layers.Input(shape=(4, 5)) diff --git a/tensorflow/python/keras/_impl/keras/layers/pooling.py b/tensorflow/python/keras/_impl/keras/layers/pooling.py index e773e396796..afe4ebfdc53 100644 --- a/tensorflow/python/keras/_impl/keras/layers/pooling.py +++ b/tensorflow/python/keras/_impl/keras/layers/pooling.py @@ -367,7 +367,7 @@ class GlobalAveragePooling1D(_GlobalPooling1D): Output shape: 2D tensor with shape: - `(batch_size, channels)` + `(batch_size, features)` """ def call(self, inputs): @@ -382,7 +382,7 @@ class GlobalMaxPooling1D(_GlobalPooling1D): Output shape: 2D tensor with shape: - `(batch_size, channels)` + `(batch_size, features)` """ def call(self, inputs): diff --git a/tensorflow/python/keras/_impl/keras/layers/wrappers.py b/tensorflow/python/keras/_impl/keras/layers/wrappers.py index a0cca9dc2fc..6f786b78500 100644 --- a/tensorflow/python/keras/_impl/keras/layers/wrappers.py +++ b/tensorflow/python/keras/_impl/keras/layers/wrappers.py @@ -97,10 +97,6 @@ class Wrapper(Layer): return losses + super(Wrapper, self).get_losses_for(None) return super(Wrapper, self).get_losses_for(inputs) - @property - def constraints(self): - return self.layer.constraints - def get_weights(self): return self.layer.get_weights() diff --git a/tensorflow/python/keras/_impl/keras/utils/io_utils.py b/tensorflow/python/keras/_impl/keras/utils/io_utils.py index 5f2ba99be78..1c8299c27d2 100644 --- a/tensorflow/python/keras/_impl/keras/utils/io_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/io_utils.py @@ -84,7 +84,7 @@ class HDF5Matrix(object): if start is None: start = 0 if stop is None: - stop = self.data.shape[0] + stop = self.shape[0] if stop + self.start <= self.end: idx = slice(start + self.start, stop + self.start) else: diff --git a/tensorflow/python/keras/_impl/keras/utils/layer_utils.py b/tensorflow/python/keras/_impl/keras/utils/layer_utils.py index 86c02643556..053c0600a33 100644 --- a/tensorflow/python/keras/_impl/keras/utils/layer_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/layer_utils.py @@ -24,6 +24,18 @@ from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras.utils.conv_utils import convert_kernel +def count_params(weights): + """Count the total number of scalars composing the weights. + + Arguments: + weights: An iterable containing the weights on which to compute params + + Returns: + The total number of scalars composing the weights + """ + return int(np.sum([K.count_params(p) for p in set(weights)])) + + def print_summary(model, line_length=None, positions=None, print_fn=None): """Prints a summary of a model. @@ -46,12 +58,28 @@ def print_summary(model, line_length=None, positions=None, print_fn=None): sequential_like = True else: sequential_like = True - for v in model._nodes_by_depth.values(): # pylint: disable=protected-access + nodes_by_depth = model._nodes_by_depth.values() # pylint: disable=protected-access + nodes = [] + for v in nodes_by_depth: if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1): # If the model has multiple nodes or if the nodes have # multiple inbound_layers, the model is no longer sequential. sequential_like = False break + nodes += v + if sequential_like: + # search for shared layers + for layer in model.layers: + flag = False + for node in layer.inbound_nodes: + if node in nodes: + if flag: + sequential_like = False + break + else: + flag = True + if not sequential_like: + break if sequential_like: line_length = line_length or 65 @@ -61,7 +89,7 @@ def print_summary(model, line_length=None, positions=None, print_fn=None): # header names for the different log elements to_display = ['Layer (type)', 'Output Shape', 'Param #'] else: - line_length = line_length or 100 + line_length = line_length or 98 positions = positions or [.33, .55, .67, 1.] if positions[-1] <= 1: positions = [int(line_length * p) for p in positions] @@ -144,8 +172,12 @@ def print_summary(model, line_length=None, positions=None, print_fn=None): else: print_fn('_' * line_length) - trainable_count = int( - np.sum([K.count_params(p) for p in set(model.trainable_weights)])) + model._check_trainable_weights_consistency() # pylint: disable=protected-access + if hasattr(model, '_collected_trainable_weights'): + trainable_count = count_params(model._collected_trainable_weights) # pylint: disable=protected-access + else: + trainable_count = count_params(model.trainable_weights) + non_trainable_count = int( np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])) diff --git a/tensorflow/python/keras/datasets/__init__.py b/tensorflow/python/keras/datasets/__init__.py index b76f278964b..69e10bd63c7 100644 --- a/tensorflow/python/keras/datasets/__init__.py +++ b/tensorflow/python/keras/datasets/__init__.py @@ -21,6 +21,7 @@ from __future__ import print_function from tensorflow.python.keras.datasets import boston_housing from tensorflow.python.keras.datasets import cifar10 from tensorflow.python.keras.datasets import cifar100 +from tensorflow.python.keras.datasets import fashion_mnist from tensorflow.python.keras.datasets import imdb from tensorflow.python.keras.datasets import mnist from tensorflow.python.keras.datasets import reuters diff --git a/tensorflow/python/keras/datasets/fashion_mnist/__init__.py b/tensorflow/python/keras/datasets/fashion_mnist/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tensorflow/python/layers/core.py b/tensorflow/python/layers/core.py index 76e8fbef2f4..7be1fa5cfe9 100644 --- a/tensorflow/python/layers/core.py +++ b/tensorflow/python/layers/core.py @@ -286,11 +286,19 @@ class Dropout(base.Layer): self.noise_shape = noise_shape self.seed = seed - def _get_noise_shape(self, _): + def _get_noise_shape(self, inputs): # Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`, # which will override `self.noise_shape`, and allows for custom noise # shapes with dynamically sized inputs. - return self.noise_shape + if self.noise_shape is None: + return self.noise_shape + + symbolic_shape = array_ops.shape(inputs) + noise_shape = [ + symbolic_shape[axis] if shape is None else shape + for axis, shape in enumerate(self.noise_shape) + ] + return noise_shape def call(self, inputs, training=False): diff --git a/tensorflow/python/layers/core_test.py b/tensorflow/python/layers/core_test.py index b67df89f81f..2d47cc69798 100644 --- a/tensorflow/python/layers/core_test.py +++ b/tensorflow/python/layers/core_test.py @@ -387,6 +387,16 @@ class DropoutTest(test.TestCase): self.assertAllClose(np.ones((5, 5)), np_output) @test_util.run_in_graph_and_eager_modes() + def testDynamicNoiseShape(self): + inputs = array_ops.ones((5, 3, 2)) + noise_shape = [None, 1, None] + dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1) + dropped = dp.apply(inputs, training=True) + self.evaluate(variables.global_variables_initializer()) + np_output = self.evaluate(dropped) + self.assertAlmostEqual(0., np_output.min()) + self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :]) + def testCustomNoiseShape(self): inputs = array_ops.ones((5, 3, 2)) noise_shape = [5, 1, 2] diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt index b6f9eea2dea..64352508b58 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt @@ -152,7 +152,7 @@ tf_class { } member_method { name: "evaluate" - argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\'], " } member_method { name: "evaluate_generator" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.fashion_mnist.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.datasets.fashion_mnist.pbtxt new file mode 100644 index 00000000000..791cfda2334 --- /dev/null +++ b/tensorflow/tools/api/golden/tensorflow.keras.datasets.fashion_mnist.pbtxt @@ -0,0 +1,3 @@ +path: "tensorflow.keras.datasets.fashion_mnist" +tf_module { +} diff --git a/tensorflow/tools/api/golden/tensorflow.keras.datasets.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.datasets.pbtxt index d4aa436f328..36e3aafbe4d 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.datasets.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.datasets.pbtxt @@ -12,6 +12,10 @@ tf_module { name: "cifar100" mtype: "" } + member { + name: "fashion_mnist" + mtype: "" + } member { name: "imdb" mtype: "" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt index a0906e62cf5..8c2b110c6d3 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-conv-l-s-t-m2-d.pbtxt @@ -191,7 +191,7 @@ tf_class { argspec: "args=[\'self\', \'inputs\', \'training\'], varargs=None, keywords=None, defaults=[\'None\'], " } member_method { - name: "reccurent_conv" + name: "recurrent_conv" argspec: "args=[\'self\', \'x\', \'w\'], varargs=None, keywords=None, defaults=None" } member_method { diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt index 7867e3c1fd3..f289664ba27 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-conv2-d.pbtxt @@ -93,7 +93,7 @@ tf_class { } member_method { name: "__init__" - argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'1\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], " } member_method { name: "add_loss" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt index 0fb6e84f8de..d7887286125 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-separable-convolution2-d.pbtxt @@ -93,7 +93,7 @@ tf_class { } member_method { name: "__init__" - argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'filters\', \'kernel_size\', \'strides\', \'padding\', \'data_format\', \'dilation_rate\', \'depth_multiplier\', \'activation\', \'use_bias\', \'depthwise_initializer\', \'pointwise_initializer\', \'bias_initializer\', \'depthwise_regularizer\', \'pointwise_regularizer\', \'bias_regularizer\', \'activity_regularizer\', \'depthwise_constraint\', \'pointwise_constraint\', \'bias_constraint\'], varargs=None, keywords=kwargs, defaults=[\'(1, 1)\', \'valid\', \'None\', \'1\', \'1\', \'None\', \'True\', \'glorot_uniform\', \'glorot_uniform\', \'zeros\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], " } member_method { name: "add_loss" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt index 34c9efb3ca0..dedef65ff93 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-time-distributed.pbtxt @@ -9,10 +9,6 @@ tf_class { name: "activity_regularizer" mtype: "" } - member { - name: "constraints" - mtype: "" - } member { name: "dtype" mtype: "" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt index 9cee68874a9..313b3a9e155 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-wrapper.pbtxt @@ -8,10 +8,6 @@ tf_class { name: "activity_regularizer" mtype: "" } - member { - name: "constraints" - mtype: "" - } member { name: "dtype" mtype: "" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt index af9a44086fd..8916925b3ba 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt @@ -152,7 +152,7 @@ tf_class { } member_method { name: "evaluate" - argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'verbose\', \'sample_weight\', \'steps\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'1\', \'None\', \'None\'], " } member_method { name: "evaluate_generator" From 29d84f18369cfe08beae97cff0aa8bde601b4cfc Mon Sep 17 00:00:00 2001 From: Neal Wu Date: Tue, 14 Nov 2017 13:43:50 -0800 Subject: [PATCH 020/104] Remove wide_n_deep_tutorial.py in tensorflow/examples/learn in favor of wide_deep.py in the TensorFlow official models PiperOrigin-RevId: 175728483 --- tensorflow/docs_src/tutorials/linear.md | 2 +- tensorflow/examples/learn/BUILD | 8 - tensorflow/examples/learn/README.md | 2 +- tensorflow/examples/learn/examples_test.sh | 1 - .../examples/learn/wide_n_deep_tutorial.py | 252 ------------------ 5 files changed, 2 insertions(+), 263 deletions(-) delete mode 100644 tensorflow/examples/learn/wide_n_deep_tutorial.py diff --git a/tensorflow/docs_src/tutorials/linear.md b/tensorflow/docs_src/tutorials/linear.md index a6517549c36..d333d012790 100644 --- a/tensorflow/docs_src/tutorials/linear.md +++ b/tensorflow/docs_src/tutorials/linear.md @@ -175,7 +175,7 @@ the name of a `FeatureColumn`. Each key's value is a tensor containing the values of that feature for all data instances. See @{$input_fn$Building Input Functions with tf.estimator} for a more comprehensive look at input functions, and `input_fn` in the -[linear models tutorial code](https://www.tensorflow.org/code/tensorflow/examples/learn/wide_n_deep_tutorial.py) +[linear models tutorial code](https://github.com/tensorflow/models/tree/master/official/wide_deep/wide_deep.py) for an example implementation of an input function. The input function is passed to the `train()` and `evaluate()` calls that diff --git a/tensorflow/examples/learn/BUILD b/tensorflow/examples/learn/BUILD index 23a42a60ba4..aba7f600b53 100644 --- a/tensorflow/examples/learn/BUILD +++ b/tensorflow/examples/learn/BUILD @@ -113,13 +113,6 @@ py_binary( ], ) -py_binary( - name = "wide_n_deep_tutorial", - srcs = ["wide_n_deep_tutorial.py"], - srcs_version = "PY2AND3", - deps = ["//tensorflow:tensorflow_py"], -) - py_binary( name = "mnist", srcs = ["mnist.py"], @@ -153,7 +146,6 @@ sh_test( ":text_classification_character_cnn", ":text_classification_character_rnn", ":text_classification_cnn", - ":wide_n_deep_tutorial", ], tags = [ "manual", diff --git a/tensorflow/examples/learn/README.md b/tensorflow/examples/learn/README.md index 70d9db85ee5..b74a8f39d98 100644 --- a/tensorflow/examples/learn/README.md +++ b/tensorflow/examples/learn/README.md @@ -23,7 +23,7 @@ processing (`pip install -U pandas`). ## Specialized Models * [Building a Random Forest Model](https://www.tensorflow.org/code/tensorflow/examples/learn/random_forest_mnist.py) -* [Building a Wide & Deep Model](https://www.tensorflow.org/code/tensorflow/examples/learn/wide_n_deep_tutorial.py) +* [Building a Wide & Deep Model](https://github.com/tensorflow/models/tree/master/official/wide_deep/wide_deep.py) * [Building a Residual Network Model](https://www.tensorflow.org/code/tensorflow/examples/learn/resnet.py) ## Text classification diff --git a/tensorflow/examples/learn/examples_test.sh b/tensorflow/examples/learn/examples_test.sh index b8763de471c..ef5e8a5de25 100755 --- a/tensorflow/examples/learn/examples_test.sh +++ b/tensorflow/examples/learn/examples_test.sh @@ -56,4 +56,3 @@ test text_classification_builtin_rnn_model --test_with_fake_data test text_classification_character_cnn --test_with_fake_data test text_classification_character_rnn --test_with_fake_data test text_classification_cnn --test_with_fake_data -test wide_n_deep_tutorial diff --git a/tensorflow/examples/learn/wide_n_deep_tutorial.py b/tensorflow/examples/learn/wide_n_deep_tutorial.py deleted file mode 100644 index 072353392a9..00000000000 --- a/tensorflow/examples/learn/wide_n_deep_tutorial.py +++ /dev/null @@ -1,252 +0,0 @@ -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Example code for TensorFlow Wide & Deep Tutorial using TF High Level API. - -This example uses APIs in Tensorflow 1.4 or above. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import shutil -import sys -import tempfile - -import pandas as pd -from six.moves import urllib -import tensorflow as tf - - -CSV_COLUMNS = [ - "age", "workclass", "fnlwgt", "education", "education_num", - "marital_status", "occupation", "relationship", "race", "gender", - "capital_gain", "capital_loss", "hours_per_week", "native_country", - "income_bracket" -] - -gender = tf.feature_column.categorical_column_with_vocabulary_list( - "gender", ["Female", "Male"]) -education = tf.feature_column.categorical_column_with_vocabulary_list( - "education", [ - "Bachelors", "HS-grad", "11th", "Masters", "9th", - "Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th", - "Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th", - "Preschool", "12th" - ]) -marital_status = tf.feature_column.categorical_column_with_vocabulary_list( - "marital_status", [ - "Married-civ-spouse", "Divorced", "Married-spouse-absent", - "Never-married", "Separated", "Married-AF-spouse", "Widowed" - ]) -relationship = tf.feature_column.categorical_column_with_vocabulary_list( - "relationship", [ - "Husband", "Not-in-family", "Wife", "Own-child", "Unmarried", - "Other-relative" - ]) -workclass = tf.feature_column.categorical_column_with_vocabulary_list( - "workclass", [ - "Self-emp-not-inc", "Private", "State-gov", "Federal-gov", - "Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked" - ]) - -# To show an example of hashing: -occupation = tf.feature_column.categorical_column_with_hash_bucket( - "occupation", hash_bucket_size=1000) -native_country = tf.feature_column.categorical_column_with_hash_bucket( - "native_country", hash_bucket_size=1000) - -# Continuous base columns. -age = tf.feature_column.numeric_column("age") -education_num = tf.feature_column.numeric_column("education_num") -capital_gain = tf.feature_column.numeric_column("capital_gain") -capital_loss = tf.feature_column.numeric_column("capital_loss") -hours_per_week = tf.feature_column.numeric_column("hours_per_week") - -# Transformations. -age_buckets = tf.feature_column.bucketized_column( - age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) - -# Wide columns and deep columns. -base_columns = [ - gender, education, marital_status, relationship, workclass, occupation, - native_country, age_buckets, -] - -crossed_columns = [ - tf.feature_column.crossed_column( - ["education", "occupation"], hash_bucket_size=1000), - tf.feature_column.crossed_column( - [age_buckets, "education", "occupation"], hash_bucket_size=1000), - tf.feature_column.crossed_column( - ["native_country", "occupation"], hash_bucket_size=1000) -] - -deep_columns = [ - tf.feature_column.indicator_column(workclass), - tf.feature_column.indicator_column(education), - tf.feature_column.indicator_column(gender), - tf.feature_column.indicator_column(relationship), - # To show an example of embedding - tf.feature_column.embedding_column(native_country, dimension=8), - tf.feature_column.embedding_column(occupation, dimension=8), - age, - education_num, - capital_gain, - capital_loss, - hours_per_week, -] - - -FLAGS = None - - -def maybe_download(train_data, test_data): - """Maybe downloads training data and returns train and test file names.""" - if train_data: - train_file_name = train_data - else: - train_file = tempfile.NamedTemporaryFile(delete=False) - urllib.request.urlretrieve( - "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", - train_file.name) # pylint: disable=line-too-long - train_file_name = train_file.name - train_file.close() - print("Training data is downloaded to %s" % train_file_name) - - if test_data: - test_file_name = test_data - else: - test_file = tempfile.NamedTemporaryFile(delete=False) - urllib.request.urlretrieve( - "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", - test_file.name) # pylint: disable=line-too-long - test_file_name = test_file.name - test_file.close() - print("Test data is downloaded to %s"% test_file_name) - - return train_file_name, test_file_name - - -def build_estimator(model_dir, model_type): - """Build an estimator.""" - if model_type == "wide": - m = tf.estimator.LinearClassifier( - model_dir=model_dir, feature_columns=base_columns + crossed_columns) - elif model_type == "deep": - m = tf.estimator.DNNClassifier( - model_dir=model_dir, - feature_columns=deep_columns, - hidden_units=[100, 50]) - else: - m = tf.estimator.DNNLinearCombinedClassifier( - model_dir=model_dir, - linear_feature_columns=crossed_columns, - dnn_feature_columns=deep_columns, - dnn_hidden_units=[100, 50]) - return m - - -def input_fn(data_file, num_epochs, shuffle): - """Returns an `input_fn` required by Estimator train/evaluate. - - Args: - data_file: The file path to the dataset. - num_epochs: Number of epochs to iterate over data. If `None`, `input_fn` - will generate infinite stream of data. - shuffle: bool, whether to read the data in random order. - """ - df_data = pd.read_csv( - tf.gfile.Open(data_file), - names=CSV_COLUMNS, - skipinitialspace=True, - engine="python", - skiprows=1) - # remove NaN elements - df_data = df_data.dropna(how="any", axis=0) - labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int) - - return tf.estimator.inputs.pandas_input_fn( - x=df_data, - y=labels, - batch_size=100, - num_epochs=num_epochs, - shuffle=shuffle, - num_threads=1) - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - - train_file_name, test_file_name = maybe_download(FLAGS.train_data, - FLAGS.test_data) - - # Specify file path below if want to find the output easily - model_dir = FLAGS.model_dir if FLAGS.model_dir else tempfile.mkdtemp() - - estimator = build_estimator(model_dir, FLAGS.model_type) - - # `tf.estimator.TrainSpec`, `tf.estimator.EvalSpec`, and - # `tf.estimator.train_and_evaluate` API are available in TF 1.4. - train_spec = tf.estimator.TrainSpec( - input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True), - max_steps=FLAGS.train_steps) - - eval_spec = tf.estimator.EvalSpec( - input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False), - # set steps to None to run evaluation until all data consumed. - steps=None) - - tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) - - # Manual cleanup - shutil.rmtree(model_dir) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.register("type", "bool", lambda v: v.lower() == "true") - parser.add_argument( - "--model_dir", - type=str, - default="", - help="Base directory for output models." - ) - parser.add_argument( - "--model_type", - type=str, - default="wide_n_deep", - help="Valid model types: {'wide', 'deep', 'wide_n_deep'}." - ) - parser.add_argument( - "--train_steps", - type=int, - default=2000, - help="Number of training steps." - ) - parser.add_argument( - "--train_data", - type=str, - default="", - help="Path to the training data." - ) - parser.add_argument( - "--test_data", - type=str, - default="", - help="Path to the test data." - ) - FLAGS, unparsed = parser.parse_known_args() - tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) From b77b20f8e8ef5a670201032a8fc8daf157524b74 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 13:48:43 -0800 Subject: [PATCH 021/104] Automated g4 rollback of changelist 175571632 PiperOrigin-RevId: 175729221 --- tensorflow/compiler/xla/service/shaped_buffer.cc | 8 ++++++++ tensorflow/compiler/xla/service/shaped_buffer.h | 4 ++++ 2 files changed, 12 insertions(+) diff --git a/tensorflow/compiler/xla/service/shaped_buffer.cc b/tensorflow/compiler/xla/service/shaped_buffer.cc index a2a442eb1a3..a57ebf59e76 100644 --- a/tensorflow/compiler/xla/service/shaped_buffer.cc +++ b/tensorflow/compiler/xla/service/shaped_buffer.cc @@ -63,6 +63,14 @@ void ShapedBuffer::clear() { } } +void ShapedBuffer::AddBufferAtIndex( + const perftools::gputools::DeviceMemoryBase& buffer, + const ShapeIndex& shape_index) { + *mutable_shape_index_to_buffer_entry()->mutable_element(shape_index) = + buffers().size(); + mutable_buffers()->push_back(buffer); +} + const se::DeviceMemoryBase& ShapedBuffer::buffer( const ShapeIndex& index) const { return buffers_[shape_index_to_buffer_entry_.element(index)]; diff --git a/tensorflow/compiler/xla/service/shaped_buffer.h b/tensorflow/compiler/xla/service/shaped_buffer.h index e5ea06fb136..b440948700f 100644 --- a/tensorflow/compiler/xla/service/shaped_buffer.h +++ b/tensorflow/compiler/xla/service/shaped_buffer.h @@ -75,6 +75,10 @@ class ShapedBuffer { // Set all device memory pointers in the object to null. void clear(); + // Adds a new buffer at the given shape index. + void AddBufferAtIndex(const perftools::gputools::DeviceMemoryBase& buffer, + const ShapeIndex& shape_index); + protected: // The shape of the device buffer with layout. const Shape shape_; From 5302a66f01deaaf0774e127f79b0373da194529c Mon Sep 17 00:00:00 2001 From: Russell Power Date: Tue, 14 Nov 2017 13:55:42 -0800 Subject: [PATCH 022/104] Check for placeholder ops (other than features and labels) during graph validation. We can provide a more helpful error message at this point than if we detect this at XLA compile time. PiperOrigin-RevId: 175730190 --- tensorflow/contrib/tpu/python/tpu/tpu.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tensorflow/contrib/tpu/python/tpu/tpu.py b/tensorflow/contrib/tpu/python/tpu/tpu.py index bc3c888b1fc..9aa5a9c78db 100644 --- a/tensorflow/contrib/tpu/python/tpu/tpu.py +++ b/tensorflow/contrib/tpu/python/tpu/tpu.py @@ -31,6 +31,10 @@ from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variable_scope +_SUMMARY_OPS = ("ScalarSummary",) +_PLACEHOLDER_OPS = ("Placeholder",) + + def initialize_system(embedding_config=None, job=None): """Initializes a distributed TPU system for use with TensorFlow. @@ -103,6 +107,12 @@ class TPUReplicateContext(control_flow_ops.ControlFlowContext): def _AddOpInternal(self, op): # pylint: disable=protected-access + if op.type in _PLACEHOLDER_OPS: + raise ValueError("Placeholder %s is not supported." % op.name) + + if op.type in _SUMMARY_OPS: + raise ValueError("Summary operations are not currently supported.") + if any(x.dtype._is_ref_dtype for x in op.inputs): raise NotImplementedError( "Non-resource Variables are not supported inside TPU computations " From 98b52cfd420fc054ad082bf1865d9eabee0b7a3e Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 14:14:07 -0800 Subject: [PATCH 023/104] boosted_trees: Finalize the centering bias tree only after centering is done (to make sense). PiperOrigin-RevId: 175733336 --- .../boosted_trees/kernels/prediction_ops.cc | 19 +++++++++++++------ .../boosted_trees/kernels/training_ops.cc | 2 +- .../python/kernel_tests/training_ops_test.py | 6 ++---- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/tensorflow/contrib/boosted_trees/kernels/prediction_ops.cc b/tensorflow/contrib/boosted_trees/kernels/prediction_ops.cc index 766982b4f20..f8086b0c2bb 100644 --- a/tensorflow/contrib/boosted_trees/kernels/prediction_ops.cc +++ b/tensorflow/contrib/boosted_trees/kernels/prediction_ops.cc @@ -63,19 +63,26 @@ const char* kPredictionsTensorName = "predictions"; void CalculateTreesToInclude( const boosted_trees::trees::DecisionTreeEnsembleConfig& config, const std::vector& trees_to_drop, const int32 num_trees, - const bool only_finalized, std::vector* trees_to_include) { + const bool only_finalized, const bool center_bias, + std::vector* trees_to_include) { trees_to_include->reserve(num_trees - trees_to_drop.size()); int32 index = 0; // This assumes that trees_to_drop is a sorted list of tree ids. for (int32 tree = 0; tree < num_trees; ++tree) { - if ((!trees_to_drop.empty() && index < trees_to_drop.size() && - trees_to_drop[index] == tree) || - (only_finalized && config.tree_metadata_size() > 0 && - !config.tree_metadata(tree).is_finalized())) { + // Skip the tree if tree is in the list of trees_to_drop. + if (!trees_to_drop.empty() && index < trees_to_drop.size() && + trees_to_drop[index] == tree) { ++index; continue; } + // Or skip if the tree is not finalized and only_finalized is set, + // with the exception of centering bias. + if (only_finalized && !(center_bias && tree == 0) && + config.tree_metadata_size() > 0 && + !config.tree_metadata(tree).is_finalized()) { + continue; + } trees_to_include->push_back(tree); } } @@ -250,7 +257,7 @@ class GradientTreesPredictionOp : public OpKernel { CalculateTreesToInclude( ensemble_resource->decision_tree_ensemble(), dropped_trees, ensemble_resource->decision_tree_ensemble().trees_size(), - only_finalized_trees_, &trees_to_include); + only_finalized_trees_, center_bias_, &trees_to_include); // Allocate output predictions matrix. Tensor* output_predictions_t = nullptr; diff --git a/tensorflow/contrib/boosted_trees/kernels/training_ops.cc b/tensorflow/contrib/boosted_trees/kernels/training_ops.cc index 2a5c7949f2d..c77d90e243c 100644 --- a/tensorflow/contrib/boosted_trees/kernels/training_ops.cc +++ b/tensorflow/contrib/boosted_trees/kernels/training_ops.cc @@ -237,6 +237,7 @@ class CenterTreeEnsembleBiasOp : public OpKernel { VLOG(1) << "Continuing to center bias, delta=" << total_delta; } else { VLOG(1) << "Done centering bias, delta=" << total_delta; + ensemble_resource->LastTreeMetadata()->set_is_finalized(true); } Tensor* continue_centering_t = nullptr; OP_REQUIRES_OK( @@ -260,7 +261,6 @@ class CenterTreeEnsembleBiasOp : public OpKernel { for (size_t idx = 0; idx < logits_dimension; ++idx) { leaf->mutable_vector()->add_value(0.0); } - ensemble_resource->LastTreeMetadata()->set_is_finalized(true); return leaf; } else if (num_trees == 1) { // Confirms that the only tree is a bias and returns its leaf. diff --git a/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py b/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py index f0413fee5a8..c2e65b643df 100644 --- a/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py +++ b/tensorflow/contrib/boosted_trees/python/kernel_tests/training_ops_test.py @@ -181,7 +181,6 @@ class CenterTreeEnsembleBiasOpTest(test_util.TensorFlowTestCase): tree_weights: 1.0 tree_metadata { num_layers_grown: 1 - is_finalized: true } growing_metadata { num_trees_attempted: 1 @@ -189,7 +188,7 @@ class CenterTreeEnsembleBiasOpTest(test_util.TensorFlowTestCase): } """ self.assertEqual(new_stamp, 1) - self.assertEqual(stats.num_trees, 1) + self.assertEqual(stats.num_trees, 0) self.assertEqual(stats.num_layers, 1) self.assertEqual(stats.active_tree, 1) self.assertEqual(stats.active_layer, 1) @@ -231,7 +230,6 @@ class CenterTreeEnsembleBiasOpTest(test_util.TensorFlowTestCase): tree_weights: 1.0 tree_metadata { num_layers_grown: 1 - is_finalized: true } growing_metadata { num_trees_attempted: 1 @@ -239,7 +237,7 @@ class CenterTreeEnsembleBiasOpTest(test_util.TensorFlowTestCase): } """ self.assertEqual(new_stamp, 2) - self.assertEqual(stats.num_trees, 1) + self.assertEqual(stats.num_trees, 0) self.assertEqual(stats.num_layers, 1) self.assertEqual(stats.active_tree, 1) self.assertEqual(stats.active_layer, 1) From 3fab2f9bbdf5745643d2dd0a390e1dd762c85bc2 Mon Sep 17 00:00:00 2001 From: Allen Lavoie Date: Tue, 14 Nov 2017 14:28:04 -0800 Subject: [PATCH 024/104] Make save/restore non-members of tfe.Network. This should make it easier to move to core. tfe.Network.save -> tfe.save_network_checkpoint tfe.Network.restore -> tfe.restore_network_checkpoint Some minor changes in the restore-on-load logic to make it work as a non-member of Network (particularly in _add_deferred_restoration). The other code changes are trivial, just moving code around. PiperOrigin-RevId: 175735659 --- tensorflow/contrib/eager/python/network.py | 860 +++++++++--------- .../contrib/eager/python/network_test.py | 83 +- tensorflow/contrib/eager/python/tfe.py | 7 +- 3 files changed, 485 insertions(+), 465 deletions(-) diff --git a/tensorflow/contrib/eager/python/network.py b/tensorflow/contrib/eager/python/network.py index 1a5c6e8aec6..713ab1ee573 100644 --- a/tensorflow/contrib/eager/python/network.py +++ b/tensorflow/contrib/eager/python/network.py @@ -37,185 +37,6 @@ from tensorflow.python.training import training_util # functions in base.py which should be reused. -_DeferredRestoration = collections.namedtuple( - - "_DeferredRestoration", - [ - # The map_func to use (either user-specified or the default). - "map_func", - # Boolean, True if the user specified an explicit map_func, for error - # messages. - "map_func_is_user", - # A mapping from checkpoint names to initial values of not-yet-created - # variables which should be restored. These values come from parsing a - # checkpoint. - "checkpointed_variables_to_restore", - # A mapping from checkpoint name to variable objects of variables which - # have already been restored, for error checking. - "restored_variables", - # The session to restore with (if in graph mode). - "session", - # Names of the Network where the restore was requested, for error - # messages. - "network_name", - "network_scope_name" - ]) - - -def _default_naming_conflict_error_message( - mapped_name, first_variable, second_variable, - network_name, network_scope_name): - return ( - ("The default checkpoint variable name mapping strategy for Network " - "'%s' resulted in a naming conflict. We attempted to strip off the " - "variable prefix for the Network ('%s'), but this resulted in two " - "variables named '%s' (originally '%s' and '%s'). This should only " - "happen when using variable sharing (i.e. the Network contains Networks " - "or Layers which were first added to another Network, and therefore " - "have that Network's variable prefix). One solution is to pass " - "`map_func=lambda n: n` to Network.save and Network.restore to use " - "fully qualified variable names in the checkpoint, although this will " - "require that the variable prefix of the Network being restored into " - "is also '%s'. You may alternatively write an arbitrary mapping.") - % ( - network_name, network_scope_name, mapped_name, - first_variable._shared_name, - second_variable._shared_name, network_scope_name - )) - - -def _restore_custom_map_func_error_message( - mapped_name, first_variable, second_variable, - network_name, network_scope_name): - return ( - ("The map_func passed to Network.restore for the Network '%s' " - "resulted in two variables named '%s' (originally '%s' and '%s'). Since " - "this is also an error on Network.save, this Network was " - "probably not saved with this map_func. Note that map_func " - "always maps from full variable names to checkpoint names; " - "there is no need to specify an inverse mapping.\n\n" - "Try stripping less from the variable names, or renaming parts " - "of the Network. For reference, variables created by sub-Layers " - "of this Network are prefixed with '%s', but if they are " - "re-used after being added to another Network they will have " - "that Network's full variable prefix instead.") % ( - network_name, mapped_name, - first_variable._shared_name, - second_variable._shared_name, - network_scope_name)) - - -def _make_custom_getter_for_deferred_restorations(): - """Returns a custom getter which searches `deferred_restorations`. - - Returns: A tuple of (_custom_getter, deferred_restorations) - _custom_getter: The getter which should be added to variable_scopes where - variables will be created. - deferred_restorations: A list for _DeferredRestoration objects. Typically - empty when the getter is set, and expanded as deferred restorations are - requested. All new deferred restorations should be appended to the end of - the list, where they will have priority over older deferred restorations. - """ - deferred_restorations = [] - - def _custom_getter(getter, name, shape=None, dtype=None, - initializer=None, - *args, **kwargs): - """A custom getter which processes deferred restorations.""" - # Iterate over restorations, newest first (newer restorations will take - # precedence over older restorations, just like with immediate restorations - # into existing variables). - delayed_restoration = None - found_value = False - value_to_restore = None - for delayed_restoration in reversed( - deferred_restorations): - checkpoint_name = delayed_restoration.map_func(name) - if (checkpoint_name - in delayed_restoration.checkpointed_variables_to_restore): - found_value = True - value_to_restore = ( - delayed_restoration.checkpointed_variables_to_restore[ - checkpoint_name]) - if found_value: - break - # value_to_restore may be False because this variable is not in any - # checkpoint we are restoring, or None because we have explicitly set it to - # None when it was previously fetched. In either case, we don't need to - # set an initializer. - if found_value and value_to_restore is not None: - initializer = value_to_restore - shape = None - variable = getter(name, shape=shape, dtype=dtype, initializer=initializer, - *args, **kwargs) - if found_value and value_to_restore is not None: - # Mark as already restored from this checkpoint. - delayed_restoration.checkpointed_variables_to_restore[ - checkpoint_name] = None - if context.in_graph_mode(): - delayed_restoration.session.run(variable.initializer) - if found_value: - # Error checking should run even if we've already restored a value. - if delayed_restoration.restored_variables.setdefault( - checkpoint_name, variable) is not variable: - # Naming conflict. We've tried to initialize two variables with the - # same value from the checkpoint. - if delayed_restoration.map_func_is_user: - raise ValueError( - _restore_custom_map_func_error_message( - mapped_name=checkpoint_name, - first_variable=delayed_restoration.restored_variables[ - checkpoint_name], - second_variable=variable, - network_name=delayed_restoration.network_name, - network_scope_name=delayed_restoration.network_scope_name)) - else: - raise ValueError( - _default_naming_conflict_error_message( - mapped_name=checkpoint_name, - first_variable=delayed_restoration.restored_variables[ - checkpoint_name], - second_variable=variable, - network_name=delayed_restoration.network_name, - network_scope_name=delayed_restoration.network_scope_name)) - return variable - return _custom_getter, deferred_restorations - - -def _make_prefix_stripping_map_fn(scope_name): - """Closure for stripping the scope name of a Network. - - Implemented as a closure rather than a member function to avoid reference - cycles in deferred restorations (this function should not have a reference to - the Network which created it). - - Args: - scope_name: The Network.scope_name to strip from variables. - Returns: - A scope_name-stripping default `map_fn` for the Network. - """ - - def _strip_variable_prefix(original_variable_name): - """The default map_func for saving or restoring variables. - - Strips the variable prefix for the Network on which save/restore was called, - and leaves other variable names fully qualified in the checkpoint. - - Args: - original_variable_name: The _shared_name of the variable (no :0 - suffix) to map. - Returns: - The checkpoint name of the variable. - """ - scope_name_with_slash = scope_name + "/" - if original_variable_name.startswith(scope_name_with_slash): - return original_variable_name[len(scope_name_with_slash):] - else: - return original_variable_name - - return _strip_variable_prefix - - class Network(base.Layer): """Represents the composition of a set of Layers. @@ -250,8 +71,6 @@ class Network(base.Layer): # closed before build is called. self._variable_scope_counts_on_init = ( variable_scope._get_default_variable_store().variable_scopes_count) - self._custom_getter, self._deferred_restorations = ( - _make_custom_getter_for_deferred_restorations()) def _init_set_name(self, name): # Anonymous Networks (name=None) defer setting a final name until they are @@ -543,252 +362,6 @@ class Network(base.Layer): "at https://github.com/tensorflow/tensorflow/issues/new if this is " "important to you") - def save(self, save_path, global_step=None, map_func=None): - """Save variables from the Network to a checkpoint. - - Args: - save_path: Either a checkpoint prefix or the name of a directory to save - the checkpoint in (in which case the checkpoint will be named based on - the Network name). - global_step: The global step to use when naming the checkpoint. If None - (default), we will first try to get the default global step. If that - fails because no default global step exists, then the checkpoint is - created without a global step suffix. - map_func: A function mapping fully qualified variable names - (e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By - default (if `map_func=None`), the variable prefix for the network being - restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped - and all other variable names (shared with other Networks) are left - unchanged. - Returns: - The checkpoint prefix for the saved checkpoint, which may be passed to - `Network.restore`. - Raises: - ValueError: If the Network has not yet been called, or if map_func results - in a name collision. - """ - if not self.built: - raise ValueError( - "Attempt to save the Network before it was first called. This means " - "variables have not yet been created, so there is nothing to save.") - self._set_scope() # scope_name should be available to map_funcs - if global_step is None: - global_step = training_util.get_global_step() - if os.path.isdir(save_path): - # If we were passed a directory, default to naming based on the Network - # name. - save_path = os.path.join(save_path, self.name.replace("/", "_")) - user_map_func = map_func - if map_func is None: - map_func = _make_prefix_stripping_map_fn(self.scope_name) - variable_map = {} - for variable in self.variables: - mapped_name = map_func(variable._shared_name) - if variable_map.setdefault(mapped_name, variable) is not variable: - if user_map_func is None: - # Instead of erroring out, we could just re-try and silently use the - # full variable names in the checkpoint. This could be odd for deeply - # nested sub-Networks (since the full prefix from the nesting would - # get added), so for now we'll let the user deal with this case. - raise ValueError(_default_naming_conflict_error_message( - mapped_name=mapped_name, - first_variable=variable_map[mapped_name], - second_variable=variable, - network_name=self.name, - network_scope_name=self.scope_name)) - else: - # The user passed their own problematic map_func. - raise ValueError( - ("The map_func passed to Network.save for the Network '%s' " - "resulted in two variables named '%s' ('%s' and '%s'). Try " - "stripping less from the variable names, or renaming parts of " - "the Network. For reference, variables created by sub-Layers of " - "this Network are prefixed with '%s', but if they are re-used " - "after being added to another Network, they will have that " - "Network's full variable prefix instead.") % ( - self.name, mapped_name, - variable_map[mapped_name]._shared_name, - variable._shared_name, - self.scope_name)) - if context.in_eager_mode(): - sess = None - else: - sess = ops.get_default_session() - return saver_lib.Saver(variable_map).save( - sess=sess, save_path=save_path, write_meta_graph=False, - global_step=global_step) - - def _restore_existing_variables(self, save_path, map_func, user_map_func): - """Use a standard Saver to restore existing variables from a checkpoint. - - Args: - save_path: The checkpoint prefix or directory to read from. - map_func: The function to use when mapping from variable names to - checkpoint names. - user_map_func: The original map_func passed by the user, for error - checking. - Returns: - A dictionary mapping from checkpoint names to variable objects which have - been restored (for bookkeeping to avoid deferred restorations on these - variables). - Raises: - ValueError: If there is a name collision. - """ - existing_variables_by_checkpoint_name = {} - for variable in self.variables: - checkpoint_name = map_func(variable._shared_name) - if existing_variables_by_checkpoint_name.setdefault( - checkpoint_name, variable) is not variable: - if user_map_func is None: - raise ValueError(_default_naming_conflict_error_message( - mapped_name=checkpoint_name, - first_variable=existing_variables_by_checkpoint_name[ - checkpoint_name], - second_variable=variable, - network_name=self.name, - network_scope_name=self.scope_name)) - else: - raise ValueError(_restore_custom_map_func_error_message( - mapped_name=checkpoint_name, - first_variable=existing_variables_by_checkpoint_name[ - checkpoint_name], - second_variable=variable, - network_name=self.name, - network_scope_name=self.scope_name)) - if existing_variables_by_checkpoint_name: - if context.in_eager_mode(): - sess = None - else: - sess = ops.get_default_session() - saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore( - sess=sess, save_path=save_path) - return existing_variables_by_checkpoint_name - - def _set_restore_on_create(self, save_path, map_func, user_map_func, - existing_variables_by_checkpoint_name): - """If necessary, request deferred restorations of variables.""" - checkpoint_reader = checkpoint_utils.load_checkpoint(save_path) - checkpointed_variables_to_restore = {} - for checkpoint_name, _ in checkpoint_utils.list_variables(save_path): - if checkpoint_name in existing_variables_by_checkpoint_name: - # This variable was already created and restored. - continue - # Save the variable for later restoration in a custom getter. - checkpointed_variables_to_restore[checkpoint_name] = ( - checkpoint_reader.get_tensor(checkpoint_name)) - # Only set a deferred restoration if there are checkpoint variables which - # have not been assigned to existing variables. Note that this loses out on - # some opportunity for error checking, but avoids creating - # _DeferredRestoration objects once a Network has been built (so that - # restoring in a loop does not take increasing amounts of memory). - if checkpointed_variables_to_restore: - if context.in_eager_mode(): - sess = None - else: - sess = ops.get_default_session() - # We need a name for error messages. If we haven't been added to another - # Network yet, we're top-level. - self._finalize_name(False) - self._set_scope() - # Save a record of this restoration for use in the custom getter. - deferred_restoration = _DeferredRestoration( - map_func=map_func, - map_func_is_user=(user_map_func is not None), - checkpointed_variables_to_restore=checkpointed_variables_to_restore, - restored_variables={}, - session=sess, - network_name=self.name, - network_scope_name=self.scope_name) - self._deferred_restorations.append(deferred_restoration) - # Add the deferred registration to non-Network children, and request that - # Networks propagate the request to their children. - self._add_deferred_restoration(deferred_restoration) - - def _add_deferred_restoration(self, deferred_restoration): - """Add a deferred restoration to this Network and all children. - - Restorations which are requested later have higher priority, and the highest - priority matching restoration is applied to a variable when it is created. - - Args: - deferred_restoration: A _DeferredRestoration object. - """ - # Networks don't create variables at the moment, so this append isn't - # strictly necessary. We could get by with only adding deferred restorations - # to non-Network Layers. - self._set_scope() - # We use set_custom_getter because it avoids recursively calling up the - # variable_scope tree. We've done the tree traversal ourselves and have - # added the request to each Layer which needs it. - self._scope.set_custom_getter(self._custom_getter) - self._deferred_restorations.append(deferred_restoration) - for layer in self.layers: - if isinstance(layer, Network): - # For Networks, request that they propagate this deferred restoration - # to all of their children recursively. - layer._add_deferred_restoration(deferred_restoration) - else: - # For non-Network Layers, make sure they have a deferred restoration - # queue and a custom getter, then add our request to it. - if not hasattr(layer, "_custom_getter"): - assert not hasattr(layer, "_deferred_restorations") - layer._custom_getter, layer._deferred_restorations = ( - _make_custom_getter_for_deferred_restorations()) - self._set_scope_for_nonnetwork_sublayer(layer) - layer._scope.set_custom_getter(layer._custom_getter) - layer._deferred_restorations.append(deferred_restoration) - - def restore(self, save_path, map_func=None): - """Restore the Network from a checkpoint. - - If variables have already been created (typically when some or all of the - `Network` is built), they are assigned values from the checkpoint - immediately, overwriting any existing values (in graph mode the default - session is used for the assignments). - - If there are checkpoint entries which do not correspond to any existing - variables in the `Network`, these values are saved for deferred restoration; - their initial values will be the checkpointed values once they are - created. Requests for multiple deferred restorations behave the same way as - immediate restorations, in that later requests will take priority over - earlier requests relevant to the same variable. - - If this `Network` shares `Layer`s with another network, those `Layer`s will - also have their variables restored from the checkpoint. - - Args: - save_path: The return value of `Network.save`, or a directory to search - for a checkpoint. - map_func: A function mapping fully qualified variable names - (e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By - default (if `map_func=None`), the variable prefix for the network being - restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped - and all other variable names (shared with other Networks) are left - unchanged. Note that this is the _same_ map_func as `Network.save`, not - an inverse mapping. - """ - self._finalize_name(parent_network=False) - self._set_scope() # scope_name should be available to map_funcs - if os.path.isdir(save_path): - # If we don't have a name yet, set no parent. - save_path = os.path.join(save_path, self.name.replace("/", "_")) - user_map_func = map_func - if map_func is None: - map_func = _make_prefix_stripping_map_fn(self.scope_name) - # Step one is to restore any existing variables from the checkpoint. - existing_variables_by_checkpoint_name = self._restore_existing_variables( - save_path=save_path, - map_func=map_func, - user_map_func=user_map_func) - # Step two is to set a custom getter which restores variables on creation, - # for those variables which have not been added to sub-Layers yet. - self._set_restore_on_create( - save_path=save_path, - map_func=map_func, - user_map_func=user_map_func, - existing_variables_by_checkpoint_name=( - existing_variables_by_checkpoint_name)) - # TODO(josh11b): Support other Layer methods needed for graph mode, such as for # losses and updates @@ -838,3 +411,436 @@ class Sequential(Network): else: inputs = l(inputs) return inputs + + +_DeferredRestoration = collections.namedtuple( + + "_DeferredRestoration", + [ + # The map_func to use (either user-specified or the default). + "map_func", + # Boolean, True if the user specified an explicit map_func, for error + # messages. + "map_func_is_user", + # A mapping from checkpoint names to initial values of not-yet-created + # variables which should be restored. These values come from parsing a + # checkpoint. + "checkpointed_variables_to_restore", + # A mapping from checkpoint name to variable objects of variables which + # have already been restored, for error checking. + "restored_variables", + # The session to restore with (if in graph mode). + "session", + # Names of the Network where the restore was requested, for error + # messages. + "network_name", + "network_scope_name" + ]) + + +def _default_naming_conflict_error_message( + mapped_name, first_variable, second_variable, + network_name, network_scope_name): + return ( + ("The default checkpoint variable name mapping strategy for Network " + "'%s' resulted in a naming conflict. We attempted to strip off the " + "variable prefix for the Network ('%s'), but this resulted in two " + "variables named '%s' (originally '%s' and '%s'). This should only " + "happen when using variable sharing (i.e. the Network contains Networks " + "or Layers which were first added to another Network, and therefore " + "have that Network's variable prefix). One solution is to pass " + "`map_func=lambda n: n` to save and restore to use fully qualified " + "variable names in the checkpoint, although this will require that the " + "variable prefix of the Network being restored into is also '%s'. You " + "may alternatively write an arbitrary mapping.") + % ( + network_name, network_scope_name, mapped_name, + first_variable._shared_name, + second_variable._shared_name, network_scope_name + )) + + +def _restore_custom_map_func_error_message( + mapped_name, first_variable, second_variable, + network_name, network_scope_name): + return ( + ("The map_func passed to restore_network_checkpoint for the Network '%s' " + "resulted in two variables named '%s' (originally '%s' and '%s'). Since " + "this is also an error when saving, this Network was " + "probably not saved with this map_func. Note that map_func " + "always maps from full variable names to checkpoint names; " + "there is no need to specify an inverse mapping.\n\n" + "Try stripping less from the variable names, or renaming parts " + "of the Network. For reference, variables created by sub-Layers " + "of this Network are prefixed with '%s', but if they are " + "re-used after being added to another Network they will have " + "that Network's full variable prefix instead.") % ( + network_name, mapped_name, + first_variable._shared_name, + second_variable._shared_name, + network_scope_name)) + + +def _make_custom_getter_for_deferred_restorations(): + """Returns a custom getter which searches `deferred_restorations`. + + Returns: A tuple of (_custom_getter, deferred_restorations) + _custom_getter: The getter which should be added to variable_scopes where + variables will be created. + deferred_restorations: A list for _DeferredRestoration objects. Typically + empty when the getter is set, and expanded as deferred restorations are + requested. All new deferred restorations should be appended to the end of + the list, where they will have priority over older deferred restorations. + """ + deferred_restorations = [] + + def _custom_getter(getter, name, shape=None, dtype=None, + initializer=None, + *args, **kwargs): + """A custom getter which processes deferred restorations.""" + # Iterate over restorations, newest first (newer restorations will take + # precedence over older restorations, just like with immediate restorations + # into existing variables). + delayed_restoration = None + found_value = False + value_to_restore = None + for delayed_restoration in reversed( + deferred_restorations): + checkpoint_name = delayed_restoration.map_func(name) + if (checkpoint_name + in delayed_restoration.checkpointed_variables_to_restore): + found_value = True + value_to_restore = ( + delayed_restoration.checkpointed_variables_to_restore[ + checkpoint_name]) + if found_value: + break + # value_to_restore may be False because this variable is not in any + # checkpoint we are restoring, or None because we have explicitly set it to + # None when it was previously fetched. In either case, we don't need to + # set an initializer. + if found_value and value_to_restore is not None: + initializer = value_to_restore + shape = None + variable = getter(name, shape=shape, dtype=dtype, initializer=initializer, + *args, **kwargs) + if found_value and value_to_restore is not None: + # Mark as already restored from this checkpoint. + delayed_restoration.checkpointed_variables_to_restore[ + checkpoint_name] = None + if context.in_graph_mode(): + delayed_restoration.session.run(variable.initializer) + if found_value: + # Error checking should run even if we've already restored a value. + if delayed_restoration.restored_variables.setdefault( + checkpoint_name, variable) is not variable: + # Naming conflict. We've tried to initialize two variables with the + # same value from the checkpoint. + if delayed_restoration.map_func_is_user: + raise ValueError( + _restore_custom_map_func_error_message( + mapped_name=checkpoint_name, + first_variable=delayed_restoration.restored_variables[ + checkpoint_name], + second_variable=variable, + network_name=delayed_restoration.network_name, + network_scope_name=delayed_restoration.network_scope_name)) + else: + raise ValueError( + _default_naming_conflict_error_message( + mapped_name=checkpoint_name, + first_variable=delayed_restoration.restored_variables[ + checkpoint_name], + second_variable=variable, + network_name=delayed_restoration.network_name, + network_scope_name=delayed_restoration.network_scope_name)) + return variable + return _custom_getter, deferred_restorations + + +def _make_prefix_stripping_map_fn(scope_name): + """Closure for stripping the scope name of a Network. + + Implemented as a closure rather than a member function to avoid reference + cycles in deferred restorations (this function should not have a reference to + the Network which created it). + + Args: + scope_name: The Network.scope_name to strip from variables. + Returns: + A scope_name-stripping default `map_fn` for the Network. + """ + + def _strip_variable_prefix(original_variable_name): + """The default map_func for saving or restoring variables. + + Strips the variable prefix for the Network on which save/restore was called, + and leaves other variable names fully qualified in the checkpoint. + + Args: + original_variable_name: The _shared_name of the variable (no :0 + suffix) to map. + Returns: + The checkpoint name of the variable. + """ + scope_name_with_slash = scope_name + "/" + if original_variable_name.startswith(scope_name_with_slash): + return original_variable_name[len(scope_name_with_slash):] + else: + return original_variable_name + + return _strip_variable_prefix + + +def save_network_checkpoint( + network, save_path, global_step=None, map_func=None): + """Save variables from the Network to a checkpoint. + + Args: + network: A Network object to save. + save_path: Either a checkpoint prefix or the name of a directory to save + the checkpoint in (in which case the checkpoint will be named based on + the Network name). + global_step: The global step to use when naming the checkpoint. If None + (default), we will first try to get the default global step. If that + fails because no default global step exists, then the checkpoint is + created without a global step suffix. + map_func: A function mapping fully qualified variable names + (e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By + default (if `map_func=None`), the variable prefix for the network being + restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped + and all other variable names (shared with other Networks) are left + unchanged. + Returns: + The checkpoint prefix for the saved checkpoint, which may be passed to + `Network.restore`. + Raises: + ValueError: If the Network has not yet been called, or if map_func results + in a name collision. + """ + if not network.built: + raise ValueError( + "Attempt to save the Network before it was first called. This means " + "variables have not yet been created, so there is nothing to save.") + network._set_scope() # scope_name should be available to map_funcs + if global_step is None: + global_step = training_util.get_global_step() + if os.path.isdir(save_path): + # If we were passed a directory, default to naming based on the Network + # name. + save_path = os.path.join(save_path, network.name.replace("/", "_")) + user_map_func = map_func + if map_func is None: + map_func = _make_prefix_stripping_map_fn(network.scope_name) + variable_map = {} + for variable in network.variables: + mapped_name = map_func(variable._shared_name) + if variable_map.setdefault(mapped_name, variable) is not variable: + if user_map_func is None: + # Instead of erroring out, we could just re-try and silently use the + # full variable names in the checkpoint. This could be odd for deeply + # nested sub-Networks (since the full prefix from the nesting would + # get added), so for now we'll let the user deal with this case. + raise ValueError(_default_naming_conflict_error_message( + mapped_name=mapped_name, + first_variable=variable_map[mapped_name], + second_variable=variable, + network_name=network.name, + network_scope_name=network.scope_name)) + else: + # The user passed their own problematic map_func. + raise ValueError( + ("The map_func passed to save_network_checkpoint for the Network " + "'%s' resulted in two variables named '%s' ('%s' and '%s'). Try " + "stripping less from the variable names, or renaming parts of " + "the Network. For reference, variables created by sub-Layers of " + "this Network are prefixed with '%s', but if they are re-used " + "after being added to another Network, they will have that " + "Network's full variable prefix instead.") % ( + network.name, mapped_name, + variable_map[mapped_name]._shared_name, + variable._shared_name, + network.scope_name)) + if context.in_eager_mode(): + sess = None + else: + sess = ops.get_default_session() + return saver_lib.Saver(variable_map).save( + sess=sess, save_path=save_path, write_meta_graph=False, + global_step=global_step) + + +def _add_deferred_restoration(layer, deferred_restoration): + """Add a deferred restoration to this Layer and all children. + + Restorations which are requested later have higher priority, and the highest + priority matching restoration is applied to a variable when it is created. + + Args: + layer: The Layer (may not be a Network) to operate on. + deferred_restoration: A _DeferredRestoration object. + """ + # Networks don't create variables at the moment, so this append isn't strictly + # necessary. We could get by with only adding deferred restorations to + # non-Network Layers. + if isinstance(layer, Network): + layer._set_scope() + # Make sure this Layer has a deferred restoration queue and a custom getter, + # then add our request to it. + if not hasattr(layer, "_custom_getter"): + assert not hasattr(layer, "_deferred_restorations") + layer._custom_getter, layer._deferred_restorations = ( + _make_custom_getter_for_deferred_restorations()) + # We use set_custom_getter because it avoids recursively calling up the + # variable_scope tree. We've done the tree traversal ourselves and have added + # the request to each Layer which needs it. + layer._scope.set_custom_getter(layer._custom_getter) + layer._deferred_restorations.append(deferred_restoration) + if isinstance(layer, Network): + for sublayer in layer.layers: + if not isinstance(sublayer, Network): + layer._set_scope_for_nonnetwork_sublayer(sublayer) + _add_deferred_restoration(sublayer, deferred_restoration) + + +def _restore_existing_variables(network, save_path, map_func, user_map_func): + """Use a standard Saver to restore existing variables from a checkpoint. + + Args: + network: A Network object to restore. + save_path: The checkpoint prefix or directory to read from. + map_func: The function to use when mapping from variable names to + checkpoint names. + user_map_func: The original map_func passed by the user, for error + checking. + Returns: + A dictionary mapping from checkpoint names to variable objects which have + been restored (for bookkeeping to avoid deferred restorations on these + variables). + Raises: + ValueError: If there is a name collision. + """ + existing_variables_by_checkpoint_name = {} + for variable in network.variables: + checkpoint_name = map_func(variable._shared_name) + if existing_variables_by_checkpoint_name.setdefault( + checkpoint_name, variable) is not variable: + if user_map_func is None: + raise ValueError(_default_naming_conflict_error_message( + mapped_name=checkpoint_name, + first_variable=existing_variables_by_checkpoint_name[ + checkpoint_name], + second_variable=variable, + network_name=network.name, + network_scope_name=network.scope_name)) + else: + raise ValueError(_restore_custom_map_func_error_message( + mapped_name=checkpoint_name, + first_variable=existing_variables_by_checkpoint_name[ + checkpoint_name], + second_variable=variable, + network_name=network.name, + network_scope_name=network.scope_name)) + if existing_variables_by_checkpoint_name: + if context.in_eager_mode(): + sess = None + else: + sess = ops.get_default_session() + saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore( + sess=sess, save_path=save_path) + return existing_variables_by_checkpoint_name + + +def _set_restore_on_create(network, save_path, map_func, user_map_func, + existing_variables_by_checkpoint_name): + """If necessary, request deferred restorations of variables.""" + checkpoint_reader = checkpoint_utils.load_checkpoint(save_path) + checkpointed_variables_to_restore = {} + for checkpoint_name, _ in checkpoint_utils.list_variables(save_path): + if checkpoint_name in existing_variables_by_checkpoint_name: + # This variable was already created and restored. + continue + # Save the variable for later restoration in a custom getter. + checkpointed_variables_to_restore[checkpoint_name] = ( + checkpoint_reader.get_tensor(checkpoint_name)) + # Only set a deferred restoration if there are checkpoint variables which + # have not been assigned to existing variables. Note that this loses out on + # some opportunity for error checking, but avoids creating + # _DeferredRestoration objects once a Network has been built (so that + # restoring in a loop does not take increasing amounts of memory). + if checkpointed_variables_to_restore: + if context.in_eager_mode(): + sess = None + else: + sess = ops.get_default_session() + # We need a name for error messages. If we haven't been added to another + # Network yet, we're top-level. + network._finalize_name(False) + network._set_scope() + # Save a record of this restoration for use in the custom getter. + deferred_restoration = _DeferredRestoration( + map_func=map_func, + map_func_is_user=(user_map_func is not None), + checkpointed_variables_to_restore=checkpointed_variables_to_restore, + restored_variables={}, + session=sess, + network_name=network.name, + network_scope_name=network.scope_name) + # Add the deferred registration to non-Network children, and request that + # Networks propagate the request to their children. + _add_deferred_restoration(network, deferred_restoration) + + +def restore_network_checkpoint(network, save_path, map_func=None): + """Restore the Network from a checkpoint. + + If variables have already been created (typically when some or all of the + `Network` is built), they are assigned values from the checkpoint immediately, + overwriting any existing values (in graph mode the default session is used for + the assignments). + + If there are checkpoint entries which do not correspond to any existing + variables in the `Network`, these values are saved for deferred restoration; + their initial values will be the checkpointed values once they are + created. Requests for multiple deferred restorations behave the same way as + immediate restorations, in that later requests will take priority over earlier + requests relevant to the same variable. + + If this `Network` shares `Layer`s with another network, those `Layer`s will + also have their variables restored from the checkpoint. + + Args: + network: A Network object to restore. + save_path: The return value of `tfe.save_network_checkpoint`, or a directory + to search for a checkpoint. + map_func: A function mapping fully qualified variable names + (e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By + default (if `map_func=None`), the variable prefix for the network being + restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped + and all other variable names (shared with other Networks) are left + unchanged. Note that this is the _same_ map_func as + `tfe.save_network_checkpoint`, not an inverse mapping. + """ + network._finalize_name(parent_network=False) + network._set_scope() # scope_name should be available to map_funcs + if os.path.isdir(save_path): + # If we don't have a name yet, set no parent. + save_path = os.path.join(save_path, network.name.replace("/", "_")) + user_map_func = map_func + if map_func is None: + map_func = _make_prefix_stripping_map_fn(network.scope_name) + # Step one is to restore any existing variables from the checkpoint. + existing_variables_by_checkpoint_name = _restore_existing_variables( + network=network, + save_path=save_path, + map_func=map_func, + user_map_func=user_map_func) + # Step two is to set a custom getter which restores variables on creation, + # for those variables which have not been added to sub-Layers yet. + _set_restore_on_create( + network=network, + save_path=save_path, + map_func=map_func, + user_map_func=user_map_func, + existing_variables_by_checkpoint_name=( + existing_variables_by_checkpoint_name)) diff --git a/tensorflow/contrib/eager/python/network_test.py b/tensorflow/contrib/eager/python/network_test.py index 1127055c050..e66486d1655 100644 --- a/tensorflow/contrib/eager/python/network_test.py +++ b/tensorflow/contrib/eager/python/network_test.py @@ -46,8 +46,8 @@ class NetworkTest(test.TestCase): def _save_modify_load_network_built(self, net, global_step=None): checkpoint_directory = self.get_temp_dir() - checkpoint_path = net.save( - save_path=checkpoint_directory, global_step=global_step) + checkpoint_path = network.save_network_checkpoint( + network=net, save_path=checkpoint_directory, global_step=global_step) input_value = constant_op.constant([[42.0]]) original_output = self.evaluate(net(input_value)) for var in net.variables: @@ -56,13 +56,13 @@ class NetworkTest(test.TestCase): self.evaluate(net(input_value)), original_output) # Either the returned explicit checkpoint path or the directory should work. - net.restore(save_path=checkpoint_directory) + network.restore_network_checkpoint(net, save_path=checkpoint_directory) self.assertAllEqual( original_output, self.evaluate(net(input_value))) for var in net.variables: self.evaluate(var.assign(var + 2.)) - net.restore(save_path=checkpoint_path) + network.restore_network_checkpoint(net, save_path=checkpoint_path) self.assertAllEqual( original_output, self.evaluate(net(input_value))) @@ -91,7 +91,7 @@ class NetworkTest(test.TestCase): net = MyNetwork(name="abcd") with self.assertRaisesRegexp( ValueError, "Attempt to save the Network before it was first called"): - net.save(self.get_temp_dir()) + network.save_network_checkpoint(net, self.get_temp_dir()) net(constant_op.constant([[2.0]])) self.evaluate(net.trainable_variables[0].assign([[17.0]])) self._save_modify_load_network_built(net, global_step=None) @@ -105,7 +105,7 @@ class NetworkTest(test.TestCase): self.evaluate(net.variables[0].assign([[3.]])) default_global_step = training_util.get_or_create_global_step() self.evaluate(default_global_step.assign(4242)) - save_path = net.save(self.get_temp_dir()) + save_path = network.save_network_checkpoint(net, self.get_temp_dir()) self.assertIn("abcd-4242", save_path) # TODO(allenl): This test creates garbage in some Python versions @@ -116,10 +116,10 @@ class NetworkTest(test.TestCase): test_input = constant_op.constant([[2.0]]) net1(test_input) self.evaluate(net1.trainable_variables[0].assign([[17.0]])) - save_path = net1.save(save_dir) + save_path = network.save_network_checkpoint(net1, save_dir) # With a pre-build restore we should have the same value. net2 = MyNetwork() - net2.restore(save_path) + network.restore_network_checkpoint(net2, save_path) self.assertAllEqual(self.evaluate(net1(test_input)), self.evaluate(net2(test_input))) self.assertIsNot(net1.variables[0], net2.variables[0]) @@ -176,11 +176,12 @@ class NetworkTest(test.TestCase): "checkpoint_creator/first_layer/kernel": "owner_1/first_layer/kernel", "checkpoint_creator/second_layer/kernel": "second_layer/kernel", } - save_path = checkpoint_creator.save( + save_path = network.save_network_checkpoint( + checkpoint_creator, self.get_temp_dir(), map_func=lambda full_name: name_mapping[full_name]) load_into = User(use_layer=first_owner.first) - load_into.restore(save_path) + network.restore_network_checkpoint(load_into, save_path) self.assertEqual(0, len(first_owner.variables)) self.assertAllEqual(self.evaluate(checkpoint_creator(one)), self.evaluate(load_into(one))) @@ -201,7 +202,8 @@ class NetworkTest(test.TestCase): else: return "user_2/" + original_name with self.assertRaisesRegexp(ValueError, "garbage collected"): - load_into.restore(save_path, map_func=_restore_map_func) + network.restore_network_checkpoint( + load_into, save_path, map_func=_restore_map_func) @test_util.run_in_graph_and_eager_modes() def testRestoreIntoSubNetwork(self): @@ -221,17 +223,18 @@ class NetworkTest(test.TestCase): whole_model_saver(one) self.evaluate(whole_model_saver.variables[0].assign([[15.]])) self.evaluate(whole_model_saver.variables[1].assign([[16.]])) - whole_model_checkpoint = whole_model_saver.save(self.get_temp_dir()) + whole_model_checkpoint = network.save_network_checkpoint( + whole_model_saver, self.get_temp_dir()) save_from = MyNetwork() save_from(one) self.evaluate(save_from.variables[0].assign([[5.]])) - checkpoint = save_from.save(self.get_temp_dir()) + checkpoint = network.save_network_checkpoint(save_from, self.get_temp_dir()) save_into_parent = Parent() - save_into_parent.restore(whole_model_checkpoint) - save_into_parent.first.restore(checkpoint) - save_into_parent.first.restore(checkpoint) # deferred loading multiple - # times is fine + network.restore_network_checkpoint(save_into_parent, whole_model_checkpoint) + network.restore_network_checkpoint(save_into_parent.first, checkpoint) + # deferred loading multiple times is fine + network.restore_network_checkpoint(save_into_parent.first, checkpoint) save_into_parent(one) # deferred loading self.assertAllEqual([[5.]], self.evaluate(save_into_parent.variables[0])) self.assertAllEqual([[16.]], self.evaluate(save_into_parent.variables[1])) @@ -240,9 +243,9 @@ class NetworkTest(test.TestCase): # (deferred restoration should happen the same way non-deferred happens, # with later restorations overwriting older ones). save_into_parent = Parent() - save_into_parent.first.restore(checkpoint) # deferred loading multiple - # times is fine - save_into_parent.restore(whole_model_checkpoint) + # deferred loading multiple times is fine + network.restore_network_checkpoint(save_into_parent.first, checkpoint) + network.restore_network_checkpoint(save_into_parent, whole_model_checkpoint) save_into_parent(one) # deferred loading # We've overwritten the sub-Network restore. self.assertAllEqual([[15.]], self.evaluate(save_into_parent.variables[0])) @@ -250,12 +253,12 @@ class NetworkTest(test.TestCase): self.evaluate(save_into_parent.variables[0].assign([[3.]])) self.evaluate(save_into_parent.variables[1].assign([[4.]])) - save_into_parent.second.restore(checkpoint) + network.restore_network_checkpoint(save_into_parent.second, checkpoint) self.assertAllEqual([[5.]], self.evaluate(save_into_parent.variables[1])) with self.assertRaisesRegexp(errors_impl.NotFoundError, "not found in checkpoint"): # The checkpoint is incompatible. - save_into_parent.restore(checkpoint) + network.restore_network_checkpoint(save_into_parent, checkpoint) @test_util.run_in_graph_and_eager_modes() def testCustomMapCollisionErrors(self): @@ -277,25 +280,30 @@ class NetworkTest(test.TestCase): self.evaluate(make_checkpoint.variables[1].assign([[3.]])) with self.assertRaisesRegexp( ValueError, - "The map_func passed to Network.save for the Network 'parent_1' " - "resulted in two variables named 'foo'"): - make_checkpoint.save(self.get_temp_dir(), map_func=lambda n: "foo") - checkpoint = make_checkpoint.first.save( - self.get_temp_dir(), map_func=lambda n: "foo") + "The map_func passed to save_network_checkpoint for the Network " + "'parent_1' resulted in two variables named 'foo'"): + network.save_network_checkpoint( + make_checkpoint, self.get_temp_dir(), map_func=lambda n: "foo") + checkpoint = network.save_network_checkpoint( + network=make_checkpoint.first, + save_path=self.get_temp_dir(), + map_func=lambda n: "foo") loader = Parent() - loader.restore(checkpoint, map_func=lambda n: "foo") + network.restore_network_checkpoint( + loader, checkpoint, map_func=lambda n: "foo") with self.assertRaisesRegexp( ValueError, - ("The map_func passed to Network.restore for the Network" + ("The map_func passed to restore_network_checkpoint for the Network" " 'parent_2' resulted in two variables named 'foo'")): loader(one) loader = Parent() loader(one) with self.assertRaisesRegexp( ValueError, - ("The map_func passed to Network.restore for the Network" + ("The map_func passed to restore_network_checkpoint for the Network" " 'parent_3' resulted in two variables named 'foo'")): - loader.restore(checkpoint, map_func=lambda n: "foo") + network.restore_network_checkpoint( + loader, checkpoint, map_func=lambda n: "foo") @test_util.run_in_graph_and_eager_modes() def testDefaultMapCollisionErrors(self): @@ -323,7 +331,7 @@ class NetworkTest(test.TestCase): ValueError, ("The default checkpoint variable name mapping strategy for Network " "'parent_1' resulted in a naming conflict.")): - make_checkpoint.save(self.get_temp_dir()) + network.save_network_checkpoint(make_checkpoint, self.get_temp_dir()) class Compatible(network.Network): @@ -337,14 +345,15 @@ class NetworkTest(test.TestCase): successful_checkpoint = Compatible() successful_checkpoint(one) self.evaluate(successful_checkpoint.variables[0].assign([[-1.]])) - checkpoint_path = successful_checkpoint.save(self.get_temp_dir()) + checkpoint_path = network.save_network_checkpoint( + successful_checkpoint, self.get_temp_dir()) load_checkpoint = Parent() load_checkpoint(one) with self.assertRaisesRegexp( ValueError, ("The default checkpoint variable name mapping strategy for Network " "'parent_2' resulted in a naming conflict.")): - load_checkpoint.restore(checkpoint_path) + network.restore_network_checkpoint(load_checkpoint, checkpoint_path) def testNoReferenceCyclesAfterCall(self): @@ -494,17 +503,17 @@ class NetworkTest(test.TestCase): self.assertStartsWith( expected_start="scope1/scope2/my_network_1/dense_1/", actual=net.trainable_weights[0].name) - save_path = net.save(self.get_temp_dir()) + save_path = network.save_network_checkpoint(net, self.get_temp_dir()) self.assertIn("scope1_scope2_my_network_1", save_path) restore_net = MyNetwork() # Delayed restoration - restore_net.restore(save_path) + network.restore_network_checkpoint(restore_net, save_path) restore_net(constant_op.constant([[1.0]])) self.assertAllEqual([[42.]], self.evaluate(restore_net.variables[0])) self.evaluate(restore_net.variables[0].assign([[-1.]])) # Immediate restoration - restore_net.restore(save_path) + network.restore_network_checkpoint(restore_net, save_path) self.assertAllEqual([[42.]], self.evaluate(restore_net.variables[0])) diff --git a/tensorflow/contrib/eager/python/tfe.py b/tensorflow/contrib/eager/python/tfe.py index b6c687c8294..577d3efef63 100644 --- a/tensorflow/contrib/eager/python/tfe.py +++ b/tensorflow/contrib/eager/python/tfe.py @@ -46,13 +46,16 @@ To use, at program startup, call `tfe.enable_eager_execution()`. @@seterr @@Iterator -@@Network @@Saver @@restore_variables_on_create @@Variable @@get_optimizer_variables @@EagerVariableStore +@@Network +@@save_network_checkpoint +@@restore_network_checkpoint + @@in_eager_mode @@in_graph_mode @@ -74,6 +77,8 @@ from __future__ import print_function from tensorflow.contrib.eager.python import metrics from tensorflow.contrib.eager.python.datasets import Iterator from tensorflow.contrib.eager.python.network import Network +from tensorflow.contrib.eager.python.network import save_network_checkpoint +from tensorflow.contrib.eager.python.network import restore_network_checkpoint from tensorflow.contrib.eager.python.saver import get_optimizer_variables from tensorflow.contrib.eager.python.saver import restore_variables_on_create from tensorflow.contrib.eager.python.saver import Saver From 21bb1160d37e8cd6e4ea6141497b91a6e9a0a529 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 14:33:31 -0800 Subject: [PATCH 025/104] Switch use of gRPC Generic API to Generic Unary, reducing CQ trips from 4->1 PiperOrigin-RevId: 175736600 --- .../rpc/grpc_remote_worker.cc | 22 ++-- .../rpc/grpc_remote_worker.h | 4 +- .../core/distributed_runtime/rpc/grpc_state.h | 107 +++++------------- .../core/distributed_runtime/rpc/grpc_util.cc | 21 ---- .../core/distributed_runtime/rpc/grpc_util.h | 23 ---- .../rpc/grpc_worker_cache.cc | 7 +- 6 files changed, 40 insertions(+), 144 deletions(-) diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc index 2b9798d413c..170c72deca7 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc @@ -39,12 +39,10 @@ namespace tensorflow { class GrpcRemoteWorker : public WorkerInterface { public: - explicit GrpcRemoteWorker(GrpcCounter* live_rpc_counter, - SharedGrpcChannelPtr channel, + explicit GrpcRemoteWorker(SharedGrpcChannelPtr channel, ::grpc::CompletionQueue* completion_queue, WorkerCacheLogger* logger) - : counter_(live_rpc_counter), - channel_(std::move(channel)), + : channel_(std::move(channel)), stub_(channel_), cq_(completion_queue), getstatus_(Method(GrpcWorkerMethod::kGetStatus)), @@ -182,23 +180,21 @@ class GrpcRemoteWorker : public WorkerInterface { void IssueRequest(const protobuf::Message* request, protobuf::Message* response, const ::grpc::string& method, StatusCallback done, CallOptions* call_opts = nullptr) { - new RPCState(counter_, &stub_, cq_, method, *request, - response, std::move(done), call_opts); + new RPCState(&stub_, cq_, method, *request, response, + std::move(done), call_opts); } void IssueRequest(const protobuf::Message* request, TensorResponse* response, const ::grpc::string& method, StatusCallback done, CallOptions* call_opts = nullptr) { - new RPCState(counter_, &stub_, cq_, method, *request, - response, std::move(done), call_opts); + new RPCState(&stub_, cq_, method, *request, response, + std::move(done), call_opts); } // Helper function for initializing the RpcMethod objects below. const char* Method(GrpcWorkerMethod id) { return GrpcWorkerMethodName(id); } - GrpcCounter* const counter_; SharedGrpcChannelPtr channel_; ::grpc::GenericStub stub_; - ::grpc::CompletionQueue* cq_; const ::grpc::string getstatus_; @@ -218,12 +214,10 @@ class GrpcRemoteWorker : public WorkerInterface { TF_DISALLOW_COPY_AND_ASSIGN(GrpcRemoteWorker); }; -WorkerInterface* NewGrpcRemoteWorker(GrpcCounter* live_rpc_counter, - SharedGrpcChannelPtr channel, +WorkerInterface* NewGrpcRemoteWorker(SharedGrpcChannelPtr channel, ::grpc::CompletionQueue* completion_queue, WorkerCacheLogger* logger) { - return new GrpcRemoteWorker(live_rpc_counter, std::move(channel), - completion_queue, logger); + return new GrpcRemoteWorker(std::move(channel), completion_queue, logger); } } // namespace tensorflow diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h index 174dfcc7072..8ad41335409 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h +++ b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.h @@ -26,12 +26,10 @@ class CompletionQueue; namespace tensorflow { -class GrpcCounter; class WorkerCacheLogger; class WorkerInterface; -WorkerInterface* NewGrpcRemoteWorker(GrpcCounter* live_rpc_counter, - SharedGrpcChannelPtr channel, +WorkerInterface* NewGrpcRemoteWorker(SharedGrpcChannelPtr channel, ::grpc::CompletionQueue* completion_queue, WorkerCacheLogger* logger); diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_state.h b/tensorflow/core/distributed_runtime/rpc/grpc_state.h index 087b49ba765..3f80bdfb70d 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_state.h +++ b/tensorflow/core/distributed_runtime/rpc/grpc_state.h @@ -34,24 +34,18 @@ template class RPCState : public GrpcClientCQTag { public: // Default behavior is to set fail_fast = False and handle timeouts manually. - RPCState(GrpcCounter* counter, ::grpc::GenericStub* stub, - ::grpc::CompletionQueue* cq, const ::grpc::string& method, - const protobuf::Message& request, Response* response, - StatusCallback done, CallOptions* call_opts) - : RPCState(counter, stub, cq, method, request, response, std::move(done), + RPCState(::grpc::GenericStub* stub, ::grpc::CompletionQueue* cq, + const ::grpc::string& method, const protobuf::Message& request, + Response* response, StatusCallback done, CallOptions* call_opts) + : RPCState(stub, cq, method, request, response, std::move(done), call_opts, /*fail_fast=*/false, /*timeout_in_ms=*/0) {} template - RPCState(GrpcCounter* counter, ::grpc::GenericStub* stub, - ::grpc::CompletionQueue* cq, const ::grpc::string& method, - const Request& request, Response* response, StatusCallback done, - CallOptions* call_opts, bool fail_fast, int64 timeout_in_ms) - : counter_(counter), call_opts_(call_opts), done_(std::move(done)) { - // TODO(sanjay): The counter will no longer be needed once we - // get a GenericStub API which allows us to manage an entire - // RPC with a single completion event instead of four events. - counter_->Increment(); - + RPCState(::grpc::GenericStub* stub, ::grpc::CompletionQueue* cq, + const ::grpc::string& method, const Request& request, + Response* response, StatusCallback done, CallOptions* call_opts, + bool fail_fast, int64 timeout_in_ms) + : call_opts_(call_opts), done_(std::move(done)) { context_.set_fail_fast(fail_fast); if (timeout_in_ms > 0) { context_.set_deadline(gpr_time_from_millis(timeout_in_ms, GPR_TIMESPAN)); @@ -61,84 +55,43 @@ class RPCState : public GrpcClientCQTag { call_opts->SetCancelCallback([this]() { context_.TryCancel(); }); } - failure_.store(false); - remaining_callbacks_.store(4); // Init/Read/Write/Finish callbacks response_ = response; GrpcMaybeUnparseProto(request, &request_buf_); - // TODO(sanjay): When new enough grpc is available, enable the following: - // context_.set_initial_metadata_corked(true); - // We can then skip the extra state transition for init callback. - call_ = std::move(stub->Call(&context_, method, cq, this)); - call_initialized_.Notify(); + call_ = + std::move(stub->PrepareUnaryCall(&context_, method, request_buf_, cq)); + call_->StartCall(); + call_->Finish(&response_buf_, &status_, this); } - // Called multiple times: when init done, read done, write done, call done. void OnCompleted(bool ok) override { - if (!ok) failure_.store(true); - const int old_count = remaining_callbacks_.fetch_sub(1); - if (old_count > 1) { - if (old_count == 4) { - // Init callback finished. Issue remaining ops. - - // Annoyingly enough, the way the generic call API works is - // inherently racy. We can get the following sequence of events: - // 1. stub->Call() starts. - // 2. some stuff happens inside grpc - // 3. grpc delivers the completion event - // 4. tensorflow event handling thread calls init metadata callback - // 5. stub->Call() finishes - // 6. the result of stub->Call() is stored in call_ - // We are currently inside the callback and therefore need to - // wait for step 6 to finish before attempting to touch call_. - call_initialized_.WaitForNotification(); - - if (ok) { - // TODO(sanjay): Use WriteLast() when grpc version we are using - // is new enough. - call_->Write(request_buf_, this); - call_->Read(&response_buf_, this); - } else { - // Skip Write and Read. - remaining_callbacks_.fetch_sub(2); - } - call_->Finish(&status_, this); - } - // Still waiting for some more callbacks to finish. - return; - } else { // old_count == 1, i.e., all callbacks have finished - // Last callback finished; clean up. - if (call_opts_) { - call_opts_->ClearCancelCallback(); - } - Status s = FromGrpcStatus(status_); - if (s.ok() && failure_.load()) { - s.Update(errors::Internal("callback error")); - } - if (s.ok() && !GrpcMaybeParseProto(response_buf_, response_)) { - s.Update(errors::Internal("could not parse rpc response")); - } - if (!s.ok()) { - VLOG(2) << "Call returned with non-ok status: " << s; - } - done_(s); - counter_->Decrement(); - delete this; + if (call_opts_) { + call_opts_->ClearCancelCallback(); } + Status s = FromGrpcStatus(status_); + if (s.ok() && !ok) { + // Since this function is only being used for processing the response + // to Finish for client-side unary calls, ok should never be false + s.Update(errors::Internal("unexpected ok value at rpc completion")); + } + if (s.ok() && !GrpcMaybeParseProto(response_buf_, response_)) { + s.Update(errors::Internal("could not parse rpc response")); + } + if (!s.ok()) { + VLOG(2) << "Call returned with non-ok status: " << s; + } + done_(s); + delete this; } private: - GrpcCounter* const counter_; CallOptions* call_opts_; ::grpc::ClientContext context_; - std::unique_ptr<::grpc::GenericClientAsyncReaderWriter> call_; + std::unique_ptr<::grpc::GenericClientAsyncResponseReader> call_; Response* response_; ::grpc::ByteBuffer request_buf_; ::grpc::ByteBuffer response_buf_; ::grpc::Status status_; StatusCallback done_; - std::atomic failure_; - std::atomic remaining_callbacks_; - Notification call_initialized_; }; } // namespace tensorflow diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_util.cc b/tensorflow/core/distributed_runtime/rpc/grpc_util.cc index 9a97978c503..c80728544b0 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_util.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_util.cc @@ -135,25 +135,4 @@ bool GrpcMaybeParseProto(const grpc::ByteBuffer& src, string* dst) { return true; } -void GrpcCounter::Increment() { - mutex_lock l(mu_); - counter_++; -} - -void GrpcCounter::Decrement() { - mutex_lock l(mu_); - DCHECK_GT(counter_, 0); - counter_--; - if (counter_ == 0) { - empty_.notify_all(); - } -} - -void GrpcCounter::WaitUntilUnused() { - mutex_lock l(mu_); - while (counter_ != 0) { - empty_.wait(l); - } -} - } // namespace tensorflow diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_util.h b/tensorflow/core/distributed_runtime/rpc/grpc_util.h index 04a54e672cb..0ddcd89130b 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_util.h +++ b/tensorflow/core/distributed_runtime/rpc/grpc_util.h @@ -84,29 +84,6 @@ class GrpcByteBufferSource : public ::grpc::protobuf::io::ZeroCopyInputStream { ::grpc::protobuf::int64 byte_count_; }; -// GrpcCounter is used to delay shutdown until all active RPCs are done. -class GrpcCounter { - public: - GrpcCounter() {} - - GrpcCounter(const GrpcCounter&) = delete; - GrpcCounter& operator=(const GrpcCounter&) = delete; - - // Increment the count of live RPCs. - void Increment(); - - // Decrement the count of live RPCs. - void Decrement(); - - // Wait until count of live RPCs is zero. - void WaitUntilUnused(); - - private: - mutex mu_; - condition_variable empty_; - int counter_ = 0; -}; - } // namespace tensorflow #endif // THIRD_PARTY_TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_ diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc index 06695db7790..a7b93e04607 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc @@ -51,9 +51,6 @@ class GrpcWorkerCache : public WorkerCachePartial { // Explicit destructor to control destruction order. ~GrpcWorkerCache() override { - // Wait until all live rpcs are done since otherwise the completion - // queue shutdown will interfere with rpc operation. - live_rpc_counter_.WaitUntilUnused(); completion_queue_.Shutdown(); delete polling_thread_; // Blocks until thread exits. delete channel_cache_; @@ -69,8 +66,7 @@ class GrpcWorkerCache : public WorkerCachePartial { } else { SharedGrpcChannelPtr channel = channel_cache_->FindWorkerChannel(target); if (!channel) return nullptr; - return NewGrpcRemoteWorker(&live_rpc_counter_, channel, - &completion_queue_, &logger_); + return NewGrpcRemoteWorker(channel, &completion_queue_, &logger_); } } @@ -94,7 +90,6 @@ class GrpcWorkerCache : public WorkerCachePartial { private: const string local_target_; WorkerInterface* const local_worker_; // Not owned. - GrpcCounter live_rpc_counter_; GrpcChannelCache* channel_cache_; // Owned. ::grpc::CompletionQueue completion_queue_; Thread* polling_thread_; // Owned. From e8b2049f5be7accae9f272972acfc5afb36c5ef2 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 14:34:32 -0800 Subject: [PATCH 026/104] Fix docstring typo: "GraphKey" -> "GraphKeys". PiperOrigin-RevId: 175736748 --- tensorflow/python/training/optimizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/python/training/optimizer.py b/tensorflow/python/training/optimizer.py index 9f5e8ec9389..b31d02eb8d7 100644 --- a/tensorflow/python/training/optimizer.py +++ b/tensorflow/python/training/optimizer.py @@ -381,7 +381,7 @@ class Optimizer(object): loss: A Tensor containing the value to minimize. var_list: Optional list or tuple of `tf.Variable` to update to minimize `loss`. Defaults to the list of variables collected in the graph - under the key `GraphKey.TRAINABLE_VARIABLES`. + under the key `GraphKeys.TRAINABLE_VARIABLES`. gate_gradients: How to gate the computation of gradients. Can be `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`. aggregation_method: Specifies the method used to combine gradient terms. From b8054c19b7d72cfb7eb07552a8d0385ffd8810d7 Mon Sep 17 00:00:00 2001 From: "Joshua V. Dillon" Date: Tue, 14 Nov 2017 14:41:12 -0800 Subject: [PATCH 027/104] Add `tf.contrib.bayesflow.layers`, a collection of probabilistic (neural) layers. PiperOrigin-RevId: 175737756 --- tensorflow/contrib/bayesflow/BUILD | 21 +- tensorflow/contrib/bayesflow/__init__.py | 19 +- .../layers_dense_variational_test.py | 304 +++++++ .../contrib/bayesflow/python/ops/layers.py | 37 + .../ops/layers_dense_variational_impl.py | 797 ++++++++++++++++++ 5 files changed, 1173 insertions(+), 5 deletions(-) create mode 100644 tensorflow/contrib/bayesflow/python/kernel_tests/layers_dense_variational_test.py create mode 100644 tensorflow/contrib/bayesflow/python/ops/layers.py create mode 100644 tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py diff --git a/tensorflow/contrib/bayesflow/BUILD b/tensorflow/contrib/bayesflow/BUILD index 9f3650e8f9c..a262d4aecdb 100644 --- a/tensorflow/contrib/bayesflow/BUILD +++ b/tensorflow/contrib/bayesflow/BUILD @@ -19,6 +19,7 @@ py_library( srcs = ["__init__.py"] + glob(["python/ops/*.py"]), srcs_version = "PY2AND3", deps = [ + "//tensorflow/contrib/distributions:distributions_py", "//tensorflow/contrib/framework:framework_py", "//tensorflow/python:array_ops", "//tensorflow/python:control_flow_ops", @@ -32,7 +33,6 @@ py_library( "//tensorflow/python:random_ops", "//tensorflow/python:state_ops", "//tensorflow/python:util", - "//tensorflow/python/ops/distributions", "//third_party/py/numpy", ], ) @@ -99,6 +99,25 @@ cuda_py_test( ], ) +cuda_py_test( + name = "layers_dense_variational_test", + size = "small", + srcs = ["python/kernel_tests/layers_dense_variational_test.py"], + additional_deps = [ + ":bayesflow_py", + "//third_party/py/numpy", + "//tensorflow/contrib/distributions:distributions_py", + "//tensorflow/python/ops/distributions", + "//tensorflow/python:array_ops", + "//tensorflow/python:client_testlib", + "//tensorflow/python:framework_for_generated_wrappers", + "//tensorflow/python:gradients", + "//tensorflow/python:linalg_ops", + "//tensorflow/python:math_ops", + "//tensorflow/python:nn_ops", + ], +) + cuda_py_test( name = "monte_carlo_test", size = "small", diff --git a/tensorflow/contrib/bayesflow/__init__.py b/tensorflow/contrib/bayesflow/__init__.py index a638753f2f0..95b9452b1ad 100644 --- a/tensorflow/contrib/bayesflow/__init__.py +++ b/tensorflow/contrib/bayesflow/__init__.py @@ -25,6 +25,7 @@ from tensorflow.contrib.bayesflow.python.ops import csiszar_divergence from tensorflow.contrib.bayesflow.python.ops import custom_grad from tensorflow.contrib.bayesflow.python.ops import halton_sequence from tensorflow.contrib.bayesflow.python.ops import hmc +from tensorflow.contrib.bayesflow.python.ops import layers from tensorflow.contrib.bayesflow.python.ops import metropolis_hastings from tensorflow.contrib.bayesflow.python.ops import monte_carlo from tensorflow.contrib.bayesflow.python.ops import optimizers @@ -33,9 +34,19 @@ from tensorflow.contrib.bayesflow.python.ops import optimizers from tensorflow.python.util.all_util import remove_undocumented -_allowed_symbols = ['csiszar_divergence', 'custom_grad', 'entropy', - 'metropolis_hastings', 'monte_carlo', 'halton_sequence', - 'hmc', 'optimizers', 'special_math', 'stochastic_variables', - 'variational_inference'] +_allowed_symbols = [ + 'csiszar_divergence', + 'custom_grad', + 'entropy', + 'halton_sequence', + 'hmc', + 'layers', + 'metropolis_hastings', + 'monte_carlo', + 'optimizers', + 'special_math', + 'stochastic_variables', + 'variational_inference', +] remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/bayesflow/python/kernel_tests/layers_dense_variational_test.py b/tensorflow/contrib/bayesflow/python/kernel_tests/layers_dense_variational_test.py new file mode 100644 index 00000000000..50358fd1c2b --- /dev/null +++ b/tensorflow/contrib/bayesflow/python/kernel_tests/layers_dense_variational_test.py @@ -0,0 +1,304 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for dense Bayesian layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorflow.contrib.bayesflow.python.ops import layers_dense_variational_impl as prob_layers_lib +from tensorflow.python.framework import ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops.distributions import normal as normal_lib +from tensorflow.python.platform import test + + +class Counter(object): + """Helper class to manage incrementing a counting `int`.""" + + def __init__(self): + self._value = -1 + + @property + def value(self): + return self._value + + def __call__(self): + self._value += 1 + return self._value + + +class MockDistribution(normal_lib.Normal): + """Monitors DenseVariational calls to the underlying distribution.""" + + def __init__(self, result_sample, result_log_prob, loc=None, scale=None): + self.result_sample = result_sample + self.result_log_prob = result_log_prob + self.result_loc = loc + self.result_scale = scale + self.called_log_prob = Counter() + self.called_sample = Counter() + self.called_loc = Counter() + self.called_scale = Counter() + + def log_prob(self, *args, **kwargs): + self.called_log_prob() + return self.result_log_prob + + def sample(self, *args, **kwargs): + self.called_sample() + return self.result_sample + + @property + def loc(self): + self.called_loc() + return self.result_loc + + @property + def scale(self): + self.called_scale() + return self.result_scale + + +class MockKLDivergence(object): + """Monitors DenseVariational calls to the divergence implementation.""" + + def __init__(self, result): + self.result = result + self.args = [] + self.called = Counter() + + def __call__(self, *args, **kwargs): + self.called() + self.args.append(args) + return self.result + + +class DenseVariationalLocalReparametrization(test.TestCase): + + def testKLPenaltyKernel(self): + with self.test_session(): + dense_vi = prob_layers_lib.DenseVariational(units=2) + inputs = random_ops.random_uniform([2, 3], seed=1) + + # No keys. + loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) + self.assertEqual(len(loss_keys), 0) + self.assertListEqual(dense_vi.losses, loss_keys) + + _ = dense_vi(inputs) + + # Yes keys. + loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) + self.assertEqual(len(loss_keys), 1) + self.assertListEqual(dense_vi.losses, loss_keys) + + def testKLPenaltyBoth(self): + def _make_normal(dtype, *args): # pylint: disable=unused-argument + return normal_lib.Normal( + loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)) + with self.test_session(): + dense_vi = prob_layers_lib.DenseVariational( + units=2, + bias_posterior_fn=prob_layers_lib.default_mean_field_normal_fn(), + bias_prior_fn=_make_normal) + inputs = random_ops.random_uniform([2, 3], seed=1) + + # No keys. + loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) + self.assertEqual(len(loss_keys), 0) + self.assertListEqual(dense_vi.losses, loss_keys) + + _ = dense_vi(inputs) + + # Yes keys. + loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) + self.assertEqual(len(loss_keys), 2) + self.assertListEqual(dense_vi.losses, loss_keys) + + def testVariationalNonLocal(self): + batch_size, in_size, out_size = 2, 3, 4 + with self.test_session() as sess: + seed = Counter() + inputs = random_ops.random_uniform([batch_size, in_size], seed=seed()) + + kernel_size = [in_size, out_size] + kernel_posterior = MockDistribution( + result_log_prob=random_ops.random_uniform(kernel_size, seed=seed()), + result_sample=random_ops.random_uniform(kernel_size, seed=seed())) + kernel_prior = MockDistribution( + result_log_prob=random_ops.random_uniform(kernel_size, seed=seed()), + result_sample=random_ops.random_uniform(kernel_size, seed=seed())) + kernel_divergence = MockKLDivergence( + result=random_ops.random_uniform(kernel_size, seed=seed())) + + bias_size = [out_size] + bias_posterior = MockDistribution( + result_log_prob=random_ops.random_uniform(bias_size, seed=seed()), + result_sample=random_ops.random_uniform(bias_size, seed=seed())) + bias_prior = MockDistribution( + result_log_prob=random_ops.random_uniform(bias_size, seed=seed()), + result_sample=random_ops.random_uniform(bias_size, seed=seed())) + bias_divergence = MockKLDivergence( + result=random_ops.random_uniform(bias_size, seed=seed())) + + expected_outputs = ( + math_ops.matmul(inputs, kernel_posterior.result_sample) + + bias_posterior.result_sample) + + dense_vi = prob_layers_lib.DenseVariational( + units=2, + kernel_use_local_reparameterization=False, + kernel_posterior_fn=lambda *args: kernel_posterior, + kernel_posterior_tensor_fn=lambda d: d.sample(seed=42), + kernel_prior_fn=lambda *args: kernel_prior, + kernel_divergence_fn=kernel_divergence, + bias_posterior_fn=lambda *args: bias_posterior, + bias_posterior_tensor_fn=lambda d: d.sample(seed=43), + bias_prior_fn=lambda *args: bias_prior, + bias_divergence_fn=bias_divergence) + + outputs = dense_vi(inputs) + + kl_penalty = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) + + [ + expected_outputs_, actual_outputs_, + expected_kernel_, actual_kernel_, + expected_kernel_divergence_, actual_kernel_divergence_, + expected_bias_, actual_bias_, + expected_bias_divergence_, actual_bias_divergence_, + ] = sess.run([ + expected_outputs, outputs, + kernel_posterior.result_sample, dense_vi.kernel.posterior_tensor, + kernel_divergence.result, kl_penalty[0], + bias_posterior.result_sample, dense_vi.bias.posterior_tensor, + bias_divergence.result, kl_penalty[1], + ]) + + self.assertAllClose( + expected_kernel_, actual_kernel_, + rtol=1e-6, atol=0.) + self.assertAllClose( + expected_bias_, actual_bias_, + rtol=1e-6, atol=0.) + self.assertAllClose( + expected_outputs_, actual_outputs_, + rtol=1e-6, atol=0.) + self.assertAllClose( + expected_kernel_divergence_, actual_kernel_divergence_, + rtol=1e-6, atol=0.) + self.assertAllClose( + expected_bias_divergence_, actual_bias_divergence_, + rtol=1e-6, atol=0.) + + self.assertAllEqual( + [[kernel_posterior, kernel_prior, kernel_posterior.result_sample]], + kernel_divergence.args) + + self.assertAllEqual( + [[bias_posterior, bias_prior, bias_posterior.result_sample]], + bias_divergence.args) + + def testVariationalLocal(self): + batch_size, in_size, out_size = 2, 3, 4 + with self.test_session() as sess: + seed = Counter() + inputs = random_ops.random_uniform([batch_size, in_size], seed=seed()) + + kernel_size = [in_size, out_size] + kernel_posterior = MockDistribution( + loc=random_ops.random_uniform(kernel_size, seed=seed()), + scale=random_ops.random_uniform(kernel_size, seed=seed()), + result_log_prob=random_ops.random_uniform(kernel_size, seed=seed()), + result_sample=random_ops.random_uniform(kernel_size, seed=seed())) + kernel_prior = MockDistribution( + result_log_prob=random_ops.random_uniform(kernel_size, seed=seed()), + result_sample=random_ops.random_uniform(kernel_size, seed=seed())) + kernel_divergence = MockKLDivergence( + result=random_ops.random_uniform(kernel_size, seed=seed())) + + bias_size = [out_size] + bias_posterior = MockDistribution( + result_log_prob=random_ops.random_uniform(bias_size, seed=seed()), + result_sample=random_ops.random_uniform(bias_size, seed=seed())) + bias_prior = MockDistribution( + result_log_prob=random_ops.random_uniform(bias_size, seed=seed()), + result_sample=random_ops.random_uniform(bias_size, seed=seed())) + bias_divergence = MockKLDivergence( + result=random_ops.random_uniform(bias_size, seed=seed())) + + expected_kernel_posterior_affine = normal_lib.Normal( + loc=math_ops.matmul(inputs, kernel_posterior.result_loc), + scale=math_ops.matmul( + inputs**2., kernel_posterior.result_scale**2)**0.5) + expected_kernel_posterior_affine_tensor = ( + expected_kernel_posterior_affine.sample(seed=42)) + expected_outputs = (expected_kernel_posterior_affine_tensor + + bias_posterior.result_sample) + + dense_vi = prob_layers_lib.DenseVariational( + units=2, + kernel_use_local_reparameterization=True, + kernel_posterior_fn=lambda *args: kernel_posterior, + kernel_posterior_tensor_fn=lambda d: d.sample(seed=42), + kernel_prior_fn=lambda *args: kernel_prior, + kernel_divergence_fn=kernel_divergence, + bias_posterior_fn=lambda *args: bias_posterior, + bias_posterior_tensor_fn=lambda d: d.sample(seed=43), + bias_prior_fn=lambda *args: bias_prior, + bias_divergence_fn=bias_divergence) + + outputs = dense_vi(inputs) + + kl_penalty = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES) + + [ + expected_outputs_, actual_outputs_, + expected_kernel_divergence_, actual_kernel_divergence_, + expected_bias_, actual_bias_, + expected_bias_divergence_, actual_bias_divergence_, + ] = sess.run([ + expected_outputs, outputs, + kernel_divergence.result, kl_penalty[0], + bias_posterior.result_sample, dense_vi.bias.posterior_tensor, + bias_divergence.result, kl_penalty[1], + ]) + + self.assertAllClose( + expected_bias_, actual_bias_, + rtol=1e-6, atol=0.) + self.assertAllClose( + expected_outputs_, actual_outputs_, + rtol=1e-6, atol=0.) + self.assertAllClose( + expected_kernel_divergence_, actual_kernel_divergence_, + rtol=1e-6, atol=0.) + self.assertAllClose( + expected_bias_divergence_, actual_bias_divergence_, + rtol=1e-6, atol=0.) + + self.assertAllEqual( + [[kernel_posterior, kernel_prior, None]], + kernel_divergence.args) + + self.assertAllEqual( + [[bias_posterior, bias_prior, bias_posterior.result_sample]], + bias_divergence.args) + + +if __name__ == "__main__": + test.main() diff --git a/tensorflow/contrib/bayesflow/python/ops/layers.py b/tensorflow/contrib/bayesflow/python/ops/layers.py new file mode 100644 index 00000000000..dcead38af82 --- /dev/null +++ b/tensorflow/contrib/bayesflow/python/ops/layers.py @@ -0,0 +1,37 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Probabilistic neural layers. + +See ${python/contrib.bayesflow.layers}. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# go/tf-wildcard-import +# pylint: disable=wildcard-import +from tensorflow.contrib.bayesflow.python.ops.layers_dense_variational_impl import * +# pylint: enable=wildcard-import +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = [ + 'DenseVariational', + 'dense_variational', + 'default_loc_scale_fn', + 'default_mean_field_normal_fn', +] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py b/tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py new file mode 100644 index 00000000000..b05ce0ffc1d --- /dev/null +++ b/tensorflow/contrib/bayesflow/python/ops/layers_dense_variational_impl.py @@ -0,0 +1,797 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dense Bayesian layer using KL-divergence based variational inference. + +@@DenseVariational +@@dense_variational + +@@default_loc_scale_fn +@@default_mean_field_normal_fn +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensorflow.contrib.distributions.python.ops import deterministic as deterministic_lib +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.layers import base as layers_lib +from tensorflow.python.ops import init_ops +from tensorflow.python.ops import nn +from tensorflow.python.ops import nn_ops +from tensorflow.python.ops import standard_ops +from tensorflow.python.ops.distributions import kullback_leibler as kl_lib +from tensorflow.python.ops.distributions import normal as normal_lib + + +__all__ = [ + "DenseVariational", + "dense_variational", + "default_loc_scale_fn", + "default_mean_field_normal_fn", +] + + +def default_loc_scale_fn( + is_singular=False, + loc_initializer=init_ops.random_normal_initializer(stddev=0.1), + untransformed_scale_initializer=init_ops.random_normal_initializer( + mean=-3., stddev=0.1), + loc_regularizer=None, + untransformed_scale_regularizer=None, + loc_constraint=None, + untransformed_scale_constraint=None): + """Makes closure which creates `loc`, `scale` params from `tf.get_variable`. + + This function produces a closure which produces `loc`, `scale` using + `tf.get_variable`. The closure accepts the following arguments: + + dtype: Type of parameter's event. + shape: Python `list`-like representing the parameter's event shape. + name: Python `str` name prepended to any created (or existing) + `tf.Variable`s. + trainable: Python `bool` indicating all created `tf.Variable`s should be + added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. + add_variable_fn: `tf.get_variable`-like `callable` used to create (or + access existing) `tf.Variable`s. + + Args: + is_singular: Python `bool` indicating if `scale is None`. Default: `False`. + loc_initializer: Initializer function for the `loc` parameters. + The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`. + untransformed_scale_initializer: Initializer function for the `scale` + parameters. Default value: `tf.random_normal_initializer(mean=-3., + stddev=0.1)`. This implies the softplus transformed result has mean + approximately `0.05` and std. deviation approximately `0.005`. + loc_regularizer: Regularizer function for the `loc` parameters. + The default (`None`) is to use the `tf.get_variable` default. + untransformed_scale_regularizer: Regularizer function for the `scale` + parameters. The default (`None`) is to use the `tf.get_variable` default. + loc_constraint: An optional projection function to be applied to the + loc after being updated by an `Optimizer`. The function must take as input + the unprojected variable and must return the projected variable (which + must have the same shape). Constraints are not safe to use when doing + asynchronous distributed training. + The default (`None`) is to use the `tf.get_variable` default. + untransformed_scale_constraint: An optional projection function to be + applied to the `scale` parameters after being updated by an `Optimizer` + (e.g. used to implement norm constraints or value constraints). The + function must take as input the unprojected variable and must return the + projected variable (which must have the same shape). Constraints are not + safe to use when doing asynchronous distributed training. The default + (`None`) is to use the `tf.get_variable` default. + + Returns: + default_loc_scale_fn: Python `callable` which instantiates `loc`, `scale` + parameters from args: `dtype, shape, name, trainable, add_variable_fn`. + """ + def _fn(dtype, shape, name, trainable, add_variable_fn): + """Creates `loc`, `scale` parameters.""" + loc = add_variable_fn( + name=name + "_loc", + shape=shape, + initializer=loc_initializer, + regularizer=loc_regularizer, + constraint=loc_constraint, + dtype=dtype, + trainable=trainable) + if is_singular: + return loc, None + untransformed_scale = add_variable_fn( + name=name + "_untransformed_scale", + shape=shape, + initializer=untransformed_scale_initializer, + regularizer=untransformed_scale_regularizer, + constraint=untransformed_scale_constraint, + dtype=dtype, + trainable=trainable) + scale = (np.finfo(dtype.as_numpy_dtype).eps + + nn_ops.softplus(untransformed_scale)) + return loc, scale + return _fn + + +def default_mean_field_normal_fn( + is_singular=False, + loc_initializer=None, + untransformed_scale_initializer=None, + loc_regularizer=None, + untransformed_scale_regularizer=None, + loc_constraint=None, + untransformed_scale_constraint=None): + """Creates a function to build Normal distributions with trainable params. + + This function produces a closure which produces `tf.distributions.Normal` + parameterized by a loc` and `scale` each created using `tf.get_variable`. The + produced closure accepts the following arguments: + + name: Python `str` name prepended to any created (or existing) + `tf.Variable`s. + shape: Python `list`-like representing the parameter's event shape. + dtype: Type of parameter's event. + trainable: Python `bool` indicating all created `tf.Variable`s should be + added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. + add_variable_fn: `tf.get_variable`-like `callable` used to create (or + access existing) `tf.Variable`s. + + Args: + is_singular: Python `bool` if `True`, forces the special case limit of + `scale->0`, i.e., a `Deterministic` distribution. + loc_initializer: Initializer function for the `loc` parameters. + If `None` (default), values are initialized using the default + initializer used by `tf.get_variable`. + untransformed_scale_initializer: Initializer function for the `scale` + parameters. If `None` (default), values are initialized using the default + initializer used by `tf.get_variable`. + loc_regularizer: Regularizer function for the `loc` parameters. + untransformed_scale_regularizer: Regularizer function for the `scale` + parameters. + loc_constraint: An optional projection function to be applied to the + loc after being updated by an `Optimizer`. The function must take as input + the unprojected variable and must return the projected variable (which + must have the same shape). Constraints are not safe to use when doing + asynchronous distributed training. + untransformed_scale_constraint: An optional projection function to be + applied to the `scale` parameters after being updated by an `Optimizer` + (e.g. used to implement norm constraints or value constraints). The + function must take as input the unprojected variable and must return the + projected variable (which must have the same shape). Constraints are not + safe to use when doing asynchronous distributed training. + + Returns: + make_normal_fn: Python `callable` which creates a `tf.distributions.Normal` + using from args: `dtype, shape, name, trainable, add_variable_fn`. + """ + loc_scale_fn_ = default_loc_scale_fn( + is_singular, + loc_initializer, + untransformed_scale_initializer, + loc_regularizer, + untransformed_scale_regularizer, + loc_constraint, + untransformed_scale_constraint) + def _fn(dtype, shape, name, trainable, add_variable_fn): + """Creates a batch of `Deterministic` or `Normal` distributions.""" + loc, scale = loc_scale_fn_(dtype, shape, name, trainable, add_variable_fn) + if scale is None: + return deterministic_lib.Deterministic(loc=loc) + return normal_lib.Normal(loc=loc, scale=scale) + return _fn + + +class DenseVariational(layers_lib.Layer): + """Densely-connected variational class. + + This layer implements the Bayesian variational inference analogue to: + `outputs = activation(matmul(inputs, kernel) + bias)` + by assuming the `kernel` and/or the `bias` are random variables. + + The layer implements a stochastic dense calculation by making a Monte Carlo + approximation of a [variational Bayesian method based on KL divergence]( + https://en.wikipedia.org/wiki/Variational_Bayesian_methods), i.e., + + ```none + -log p(y|x) = -log int_{R**d} p(y|x,w) p(w) dw + = -log int_{R**d} p(y,w|x) q(w|x) / q(w|x) dw + <= E_q(W|x)[-log p(y,W|x) + log q(W|x)] # Jensen's + = E_q(W|x)[-log p(y|x,W)] + KL[q(W|x), p(W)] + ~= m**-1 sum{ -log(y|x,w[j]) : w[j] ~ q(W|x), j=1..m } + + KL[q(W|x), p(W)] + ``` + + where `W` denotes the (independent) `kernel` and `bias` random variables, `w` + is a random variate or outcome of `W`, `y` is the label, `x` is the evidence`, + and `~=` denotes an approximation which becomes exact as `m->inf`. The above + bound is sometimes referred to as the negative Evidence Lower BOund or + negative [ELBO](https://arxiv.org/abs/1601.00670). In context of a DNN, this + layer is appropriate to use when the final loss is a negative log-likelihood. + + The Monte-Carlo sum portion is used for the feed-forward calculation of the + DNN. The KL divergence portion can be added to the final loss via: + `loss += sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))`. + + The arguments permit separate specification of the surrogate posterior + (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias` + random variables (which together comprise `W`). + + Args: + units: Integer or Long, dimensionality of the output space. + activation: Activation function (`callable`). Set it to None to maintain a + linear activation. + activity_regularizer: Regularizer function for the output. + trainable: Boolean, if `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + kernel_use_local_reparameterization: Python `bool` indicating whether + `kernel` calculation should employ the Local Reparameterization Trick. + When `True`, `kernel_posterior_fn` must create an instance of + `tf.distributions.Normal`. + kernel_posterior_fn: Python `callable` which creates + `tf.distributions.Distribution` instance representing the surrogate + posterior of the `kernel` parameter. Default value: + `default_mean_field_normal_fn()`. + kernel_posterior_tensor_fn: Python `callable` which takes a + `tf.distributions.Distribution` instance and returns a representative + value. Default value: `lambda d: d.sample()`. + kernel_prior_fn: Python `callable` which creates `tf.distributions` + instance. See `default_mean_field_normal_fn` docstring for required + parameter signature. + Default value: `tf.distributions.Normal(loc=0., scale=1.)`. + kernel_divergence_fn: Python `callable` which takes the surrogate posterior + distribution, prior distribution and random variate sample(s) from the + surrogate posterior and computes or approximates the KL divergence. The + distributions are `tf.distributions.Distribution`-like instances and the + sample is a `Tensor`. + bias_posterior_fn: Python `callable` which creates + `tf.distributions.Distribution` instance representing the surrogate + posterior of the `bias` parameter. Default value: + `default_mean_field_normal_fn(is_singular=True)` (which creates an + instance of `tf.distributions.Deterministic`). + bias_posterior_tensor_fn: Python `callable` which takes a + `tf.distributions.Distribution` instance and returns a representative + value. Default value: `lambda d: d.sample()`. + bias_prior_fn: Python `callable` which creates `tf.distributions` instance. + See `default_mean_field_normal_fn` docstring for required parameter + signature. Default value: `None` (no prior, no variational inference) + bias_divergence_fn: Python `callable` which takes the surrogate posterior + distribution, prior distribution and random variate sample(s) from the + surrogate posterior and computes or approximates the KL divergence. The + distributions are `tf.distributions.Distribution`-like instances and the + sample is a `Tensor`. + name: Python `str`, the name of the layer. Layers with the same name will + share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in + such cases. + reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous + layer by the same name. + + Properties: + units: Python integer, dimensionality of the output space. + activation: Activation function (`callable`). + activity_regularizer: Regularizer function for the output. + kernel_use_local_reparameterization: Python `bool` indicating whether + `kernel` calculation should employ the Local Reparameterization Trick. + kernel: `VariationalKernelParamater` instance containing all `kernel` + related properties and `callable`s. + bias: `VariationalParameter` instance containing all `kernel` + related properties and `callable`s. + """ + + def __init__( + self, + units, + activation=None, + activity_regularizer=None, + trainable=True, + kernel_use_local_reparameterization=True, + kernel_posterior_fn=default_mean_field_normal_fn(), + kernel_posterior_tensor_fn=lambda d: d.sample(), + kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda + loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)), + kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p), + bias_posterior_fn=default_mean_field_normal_fn(is_singular=True), + bias_posterior_tensor_fn=lambda d: d.sample(), + bias_prior_fn=None, + bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p), + name=None, + **kwargs): + super(DenseVariational, self).__init__( + trainable=trainable, + name=name, + activity_regularizer=activity_regularizer, + **kwargs) + self._units = units + self._activation = activation + self._input_spec = layers_lib.InputSpec(min_ndim=2) + self._kernel_use_local_reparameterization = ( + kernel_use_local_reparameterization) + self._kernel = VariationalKernelParameter( + kernel_posterior_fn, + kernel_posterior_tensor_fn, + kernel_prior_fn, + kernel_divergence_fn) + self._bias = VariationalParameter( + bias_posterior_fn, + bias_posterior_tensor_fn, + bias_prior_fn, + bias_divergence_fn) + + @property + def units(self): + return self._units + + @property + def activation(self): + return self._activation + + @property + def input_spec(self): + return self._input_spec + + @input_spec.setter + def input_spec(self, value): + self._input_spec = value + + @property + def kernel_use_local_reparameterization(self): + return self._kernel_use_local_reparameterization + + @property + def kernel(self): + return self._kernel + + @property + def bias(self): + return self._bias + + def build(self, input_shape): + input_shape = tensor_shape.TensorShape(input_shape) + in_size = input_shape.with_rank_at_least(2)[-1].value + if in_size is None: + raise ValueError("The last dimension of the inputs to `Dense` " + "should be defined. Found `None`.") + self._input_spec = layers_lib.InputSpec(min_ndim=2, axes={-1: in_size}) + dtype = dtypes.as_dtype(self.dtype) + + # Must have a posterior kernel. + self.kernel.posterior = self.kernel.posterior_fn( + dtype, [in_size, self.units], "kernel_posterior", + self.trainable, self.add_variable) + + if self.kernel.prior_fn is None: + self.kernel_prior = None + else: + self.kernel.prior = self.kernel.prior_fn( + dtype, [in_size, self.units], "kernel_prior", + self.trainable, self.add_variable) + self._built_kernel_divergence = False + + if self.bias.posterior_fn is None: + self.bias.posterior = None + else: + self.bias.posterior = self.bias.posterior_fn( + dtype, [self.units], "bias_posterior", + self.trainable, self.add_variable) + + if self.bias.prior_fn is None: + self.bias.prior = None + else: + self.bias.prior = self.bias.prior_fn( + dtype, [self.units], "bias_prior", + self.trainable, self.add_variable) + self._built_bias_divergence = False + + self.built = True + + def call(self, inputs): + inputs = ops.convert_to_tensor(inputs, dtype=self.dtype) + + outputs = self._apply_variational_kernel(inputs) + outputs = self._apply_variational_bias(outputs) + if self.activation is not None: + outputs = self.activation(outputs) # pylint: disable=not-callable + if not self._built_kernel_divergence: + self._apply_divergence(self.kernel, name="divergence_kernel") + self._built_kernel_divergence = True + if not self._built_bias_divergence: + self._apply_divergence(self.bias, name="divergence_bias") + self._built_bias_divergence = True + return outputs + + def _apply_variational_kernel(self, inputs): + if not self.kernel_use_local_reparameterization: + self.kernel.posterior_tensor = self.kernel.posterior_tensor_fn( + self.kernel.posterior) + self.kernel.posterior_affine = None + self.kernel.posterior_affine_tensor = None + return self._matmul(inputs, self.kernel.posterior_tensor) + if not isinstance(self.kernel.posterior, normal_lib.Normal): + raise TypeError("`kernel_use_local_reparameterization=True` requires " + "`kernel_posterior_fn` produce an instance of " + "`tf.distributions.Normal` (saw: \"{}\").".format( + type(self.kernel.posterior).__name__)) + self.kernel.posterior_affine = normal_lib.Normal( + loc=self._matmul(inputs, self.kernel.posterior.loc), + scale=standard_ops.sqrt(self._matmul( + standard_ops.square(inputs), + standard_ops.square(self.kernel.posterior.scale)))) + self.kernel.posterior_affine_tensor = ( + self.kernel.posterior_tensor_fn(self.kernel.posterior_affine)) + self.kernel.posterior_tensor = None + return self.kernel.posterior_affine_tensor + + def _apply_variational_bias(self, inputs): + if self.bias.posterior is None: + self.bias.posterior_tensor = None + return inputs + self.bias.posterior_tensor = self.bias.posterior_tensor_fn( + self.bias.posterior) + return nn.bias_add(inputs, self.bias.posterior_tensor) + + def _apply_divergence(self, param, name): + if (param.divergence_fn is None or + param.posterior is None or + param.prior is None): + param.divergence = None + return + param.divergence = standard_ops.identity( + param.divergence_fn( + param.posterior, param.prior, param.posterior_tensor), + name=name) + self.add_loss(param.divergence) + + def _matmul(self, inputs, kernel): + if inputs.shape.ndims <= 2: + return standard_ops.matmul(inputs, kernel) + # To handle broadcasting, we must use `tensordot`. + return standard_ops.tensordot(inputs, kernel, axes=[[-1], [0]]) + + def _compute_output_shape(self, input_shape): + input_shape = tensor_shape.TensorShape(input_shape).with_rank_at_least(2) + if input_shape[-1].value is None: + raise ValueError( + "The innermost dimension of input_shape must be defined, " + "but saw: {}".format(input_shape)) + return input_shape[:-1].concatenate(self.units) + + +def dense_variational( + inputs, + units, + activation=None, + activity_regularizer=None, + trainable=True, + kernel_use_local_reparameterization=True, + kernel_posterior_fn=default_mean_field_normal_fn(), + kernel_posterior_tensor_fn=lambda d: d.sample(), + kernel_prior_fn=lambda dtype, *args: normal_lib.Normal( # pylint: disable=g-long-lambda + loc=dtype.as_numpy_dtype(0.), scale=dtype.as_numpy_dtype(1.)), + kernel_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p), + bias_posterior_fn=default_mean_field_normal_fn(is_singular=True), + bias_posterior_tensor_fn=lambda d: d.sample(), + bias_prior_fn=None, + bias_divergence_fn=lambda q, p, ignore: kl_lib.kl_divergence(q, p), + name=None, + reuse=None): + """Densely-connected variational layer. + + This layer implements the Bayesian variational inference analogue to: + `outputs = activation(matmul(inputs, kernel) + bias)` + by assuming the `kernel` and/or the `bias` are random variables. + + The layer implements a stochastic dense calculation by making a Monte Carlo + approximation of a [variational Bayesian method based on KL divergence]( + https://en.wikipedia.org/wiki/Variational_Bayesian_methods), i.e., + + ```none + -log p(y|x) = -log int_{R**d} p(y|x,w) p(w) dw + = -log int_{R**d} p(y,w|x) q(w|x) / q(w|x) dw + <= E_q(W|x)[-log p(y,W|x) + log q(W|x)] # Jensen's + = E_q(W|x)[-log p(y|x,W)] + KL[q(W|x), p(W)] + ~= m**-1 sum{ -log(y|x,w[j]) : w[j] ~ q(W|x), j=1..m } + + KL[q(W|x), p(W)] + ``` + + where `W` denotes the (independent) `kernel` and `bias` random variables, `w` + is a random variate or outcome of `W`, `y` is the label, `x` is the evidence`, + and `~=` denotes an approximation which becomes exact as `m->inf`. The above + bound is sometimes referred to as the negative Evidence Lower BOund or + negative [ELBO](https://arxiv.org/abs/1601.00670). In context of a DNN, this + layer is appropriate to use when the final loss is a negative log-likelihood. + + The Monte-Carlo sum portion is used for the feed-forward calculation of the + DNN. The KL divergence portion can be added to the final loss via: + `loss += sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))`. + + The arguments permit separate specification of the surrogate posterior + (`q(W|x)`), prior (`p(W)`), and divergence for both the `kernel` and `bias` + random variables (which together comprise `W`). + + Args: + inputs: Tensor input. + units: Integer or Long, dimensionality of the output space. + activation: Activation function (`callable`). Set it to None to maintain a + linear activation. + activity_regularizer: Regularizer function for the output. + trainable: Boolean, if `True` also add variables to the graph collection + `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). + kernel_use_local_reparameterization: Python `bool` indicating whether + `kernel` calculation should employ the Local Reparameterization Trick. + When `True`, `kernel_posterior_fn` must create an instance of + `tf.distributions.Normal`. + kernel_posterior_fn: Python `callable` which creates + `tf.distributions.Distribution` instance representing the surrogate + posterior of the `kernel` parameter. Default value: + `default_mean_field_normal_fn()`. + kernel_posterior_tensor_fn: Python `callable` which takes a + `tf.distributions.Distribution` instance and returns a representative + value. Default value: `lambda d: d.sample()`. + kernel_prior_fn: Python `callable` which creates `tf.distributions` + instance. See `default_mean_field_normal_fn` docstring for required + parameter signature. + Default value: `tf.distributions.Normal(loc=0., scale=1.)`. + kernel_divergence_fn: Python `callable` which takes the surrogate posterior + distribution, prior distribution and random variate sample(s) from the + surrogate posterior and computes or approximates the KL divergence. The + distributions are `tf.distributions.Distribution`-like instances and the + sample is a `Tensor`. + bias_posterior_fn: Python `callable` which creates + `tf.distributions.Distribution` instance representing the surrogate + posterior of the `bias` parameter. Default value: + `default_mean_field_normal_fn(is_singular=True)` (which creates an + instance of `tf.distributions.Deterministic`). + bias_posterior_tensor_fn: Python `callable` which takes a + `tf.distributions.Distribution` instance and returns a representative + value. Default value: `lambda d: d.sample()`. + bias_prior_fn: Python `callable` which creates `tf.distributions` instance. + See `default_mean_field_normal_fn` docstring for required parameter + signature. Default value: `None` (no prior, no variational inference) + bias_divergence_fn: Python `callable` which takes the surrogate posterior + distribution, prior distribution and random variate sample(s) from the + surrogate posterior and computes or approximates the KL divergence. The + distributions are `tf.distributions.Distribution`-like instances and the + sample is a `Tensor`. + name: Python `str`, the name of the layer. Layers with the same name will + share `tf.Variable`s, but to avoid mistakes we require `reuse=True` in + such cases. + reuse: Python `bool`, whether to reuse the `tf.Variable`s of a previous + layer by the same name. + + Returns: + output: `Tensor` representing a the affine transformed input under a random + draw from the surrogate posterior distribution. + """ + layer = DenseVariational( + units, + activation=activation, + activity_regularizer=activity_regularizer, + trainable=trainable, + kernel_use_local_reparameterization=( + kernel_use_local_reparameterization), + kernel_posterior_fn=kernel_posterior_fn, + kernel_posterior_tensor_fn=kernel_posterior_tensor_fn, + kernel_prior_fn=kernel_prior_fn, + kernel_divergence_fn=kernel_divergence_fn, + bias_posterior_fn=bias_posterior_fn, + bias_posterior_tensor_fn=bias_posterior_tensor_fn, + bias_prior_fn=bias_prior_fn, + bias_divergence_fn=bias_divergence_fn, + name=name, + dtype=inputs.dtype.base_dtype, + _scope=name, + _reuse=reuse) + return layer.apply(inputs) + + +class NotSet(object): + """Helper to track whether a `VariationalParameter` value has been set.""" + pass + + +class VariationalParameter(object): + """Struct-like container of variational parameter properties. + + A `VariationalParameter` is intitialized with Python `callable`s which set the + value of correspondingly named members. Corresponding values have "set once" + semantics, i.e., once set to any value they are immutable. + """ + + def __init__( + self, + posterior_fn, + posterior_tensor_fn, + prior_fn, + divergence_fn): + """Creates the `VariationalParameter` struct-like object. + + Args: + posterior_fn: Python `callable` which creates a + `tf.distribution.Distribution` like object representing the posterior + distribution. See `VariationalParameter.posterior_fn` for `callable`'s + required parameters. + posterior_tensor_fn: Python `callable` which computes a `Tensor` + which represents the `posterior`. + prior_fn: Python `callable` which creates a + `tf.distribution.Distribution` like object representing the prior + distribution. See `VariationalParameter.prior_fn` for `callable`'s + required parameters. + divergence_fn: Python `callable` which computes the KL divergence from + `posterior` to `prior`. See `VariationalParameter.divergence_fn` for + required `callable`'s parameters. + """ + self._posterior_fn = posterior_fn + self._posterior = NotSet() + self._posterior_tensor_fn = posterior_tensor_fn + self._posterior_tensor = NotSet() + self._prior_fn = prior_fn + self._prior = NotSet() + self._divergence_fn = divergence_fn + self._divergence = NotSet() + self._init_helper() + + @property + def posterior_fn(self): + """`callable` which creates `tf.distributions.Distribution`-like posterior. + + The `callable` must accept the following parameters: + name: Python `str` name prepended to any created (or existing) + `tf.Variable`s. + shape: Python `list`-like representing the parameter's event shape. + dtype: Type of parameter's event. + trainable: Python `bool` indicating all created `tf.Variable`s should be + added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. + add_variable_fn: `tf.get_variable`-like `callable` used to create (or + access existing) `tf.Variable`s. + + Returns: + posterior_fn: The Python `callable` specified in `__init__`. + """ + return self._posterior_fn + + @property + def posterior(self): + """`tf.distributions.Distribution`-like instance representing posterior.""" + return self._posterior + + @posterior.setter + def posterior(self, value): + """One-time setter of the `posterior` distribution.""" + if not isinstance(self._posterior, NotSet): + raise ValueError("Cannot override already set attribute.") + self._posterior = value + + @property + def posterior_tensor_fn(self): + """Creates `Tensor` representing the `posterior` distribution. + + The `callable` must accept the following parameters: + posterior: `tf.distributions.Distribution`-like instance. + + Returns: + posterior_tensor_fn: The Python `callable` specified in + `__init__`. + """ + return self._posterior_tensor_fn + + @property + def posterior_tensor(self): + """`Tensor` representing the `posterior` distribution.""" + return self._posterior_tensor + + @posterior_tensor.setter + def posterior_tensor(self, value): + """One-time setter of the `posterior_tensor`.""" + if not isinstance(self._posterior_tensor, NotSet): + raise ValueError("Cannot override already set attribute.") + self._posterior_tensor = value + + @property + def prior_fn(self): + """`callable` which creates `tf.distributions.Distribution`-like prior. + + The `callable` must accept the following parameters: + name: Python `str` name prepended to any created (or existing) + `tf.Variable`s. + shape: Python `list`-like representing the parameter's event shape. + dtype: Type of parameter's event. + trainable: Python `bool` indicating all created `tf.Variable`s should be + added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. + add_variable_fn: `tf.get_variable`-like `callable` used to create (or + access existing) `tf.Variable`s. + + Returns: + prior_fn: The Python `callable` specified in `__init__`. + """ + return self._prior_fn + + @property + def prior(self): + """`tf.distributions.Distribution`-like instance representing posterior.""" + return self._prior + + @prior.setter + def prior(self, value): + """One-time setter of the `prior` distribution.""" + if not isinstance(self._prior, NotSet): + raise ValueError("Cannot override already set attribute.") + self._prior = value + + @property + def divergence_fn(self): + """`callable` which computes KL-divergence `Tensor` from posterior to prior. + + The `callable` must accept the following parameters: + posterior: `tf.distributions.Distribution`-like instance. + prior: `tf.distributions.Distribution`-like instance. + posterior_tensor: `Tensor` representing value of posterior. + + Returns: + divergence_fn: The Python `callable` specified in `__init__`. + """ + return self._divergence_fn + + @property + def divergence(self): + """`Tensor` representing KL-divergence from posterior to prior.""" + return self._divergence + + @divergence.setter + def divergence(self, value): + """One-time setter of the `divergence`.""" + if not isinstance(self._divergence, NotSet): + raise ValueError("Cannot override already set attribute.") + self._divergence = value + + def _init_helper(self): + pass + + +class VariationalKernelParameter(VariationalParameter): + """Struct-like container of variational kernel properties. + + A `VariationalKernelParameter` is intitialized with Python `callable`s which + set the value of correspondingly named members. Corresponding values have "set + once" semantics, i.e., once set to any value they are immutable. + """ + + @property + def posterior_affine(self): + """`tf.distributions.Distribution` affine transformed posterior.""" + return self._posterior_affine + + @posterior_affine.setter + def posterior_affine(self, value): + """One-time setter of `posterior_affine`.""" + if not isinstance(self._posterior_affine, NotSet): + raise ValueError("Cannot override already set attribute.") + self._posterior_affine = value + + @property + def posterior_affine_tensor(self): + """`Tensor` representing the `posterior_affine` distribution.""" + return self._posterior_affine_tensor + + @posterior_affine_tensor.setter + def posterior_affine_tensor(self, value): + """One-time setter of the `posterior_affine_tensor`.""" + if not isinstance(self._posterior_affine_tensor, NotSet): + raise ValueError("Cannot override already set attribute.") + self._posterior_affine_tensor = value + + def _init_helper(self): + self._posterior_affine = NotSet() + self._posterior_affine_tensor = NotSet() From a5a192865e4c1732b414d6a503d07775f7163a5c Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 14:44:28 -0800 Subject: [PATCH 028/104] Refactors the WALS estimator so that part of the control flow logic happens in the SweepHook. This fixes a bug that causes both input batches (rows and columns) to be fetched during any given sweep. PiperOrigin-RevId: 175738242 --- .../contrib/factorization/python/ops/wals.py | 429 +++++++++--------- .../factorization/python/ops/wals_test.py | 112 +++-- 2 files changed, 264 insertions(+), 277 deletions(-) diff --git a/tensorflow/contrib/factorization/python/ops/wals.py b/tensorflow/contrib/factorization/python/ops/wals.py index 3976395d78e..b2f22eb2fce 100644 --- a/tensorflow/contrib/factorization/python/ops/wals.py +++ b/tensorflow/contrib/factorization/python/ops/wals.py @@ -19,7 +19,6 @@ from __future__ import division from __future__ import print_function from tensorflow.contrib.factorization.python.ops import factorization_ops -from tensorflow.contrib.framework.python.ops import variables as framework_variables from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.python.framework import dtypes @@ -32,175 +31,64 @@ from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.summary import summary from tensorflow.python.training import session_run_hook +from tensorflow.python.training import training_util class _SweepHook(session_run_hook.SessionRunHook): """Keeps track of row/col sweeps, and runs prep ops before each sweep.""" - def __init__(self, is_row_sweep_var, train_ops, num_rows, num_cols, - input_row_indices, input_col_indices, row_prep_ops, - col_prep_ops, init_op, completed_sweeps_var): + def __init__(self, is_row_sweep_var, is_sweep_done_var, init_op, + row_prep_ops, col_prep_ops, row_train_op, col_train_op, + switch_op): """Initializes SweepHook. Args: is_row_sweep_var: A Boolean tf.Variable, determines whether we are currently doing a row or column sweep. It is updated by the hook. - train_ops: A list of ops. The ops created by this hook will have - control dependencies on `train_ops`. - num_rows: int, the total number of rows to be processed. - num_cols: int, the total number of columns to be processed. - input_row_indices: A Tensor of type int64. The indices of the input rows - that are processed during the current sweep. All elements of - `input_row_indices` must be in [0, num_rows). - input_col_indices: A Tensor of type int64. The indices of the input - columns that are processed during the current sweep. All elements of - `input_col_indices` must be in [0, num_cols). - row_prep_ops: list of ops, to be run before the beginning of each row - sweep, in the given order. - col_prep_ops: list of ops, to be run before the beginning of each column - sweep, in the given order. + is_sweep_done_var: A Boolean tf.Variable, determines whether we are + starting a new sweep (this is used to determine when to run the prep ops + below). init_op: op to be run once before training. This is typically a local initialization op (such as cache initialization). - completed_sweeps_var: An integer tf.Variable, indicates the number of - completed sweeps. It is updated by the hook. + row_prep_ops: A list of TensorFlow ops, to be run before the beginning of + each row sweep (and during initialization), in the given order. + col_prep_ops: A list of TensorFlow ops, to be run before the beginning of + each column sweep (and during initialization), in the given order. + row_train_op: A TensorFlow op to be run during row sweeps. + col_train_op: A TensorFlow op to be run during column sweeps. + switch_op: A TensorFlow op to be run before each sweep. """ - self._num_rows = num_rows - self._num_cols = num_cols + self._is_row_sweep_var = is_row_sweep_var + self._is_sweep_done_var = is_sweep_done_var + self._init_op = init_op self._row_prep_ops = row_prep_ops self._col_prep_ops = col_prep_ops - self._init_op = init_op - self._is_row_sweep_var = is_row_sweep_var - self._completed_sweeps_var = completed_sweeps_var - # Boolean variable that determines whether the init_ops have been run. + self._row_train_op = row_train_op + self._col_train_op = col_train_op + self._switch_op = switch_op + # Boolean variable that determines whether the init_op has been run. self._is_initialized = False - # Ops to run jointly with train_ops, responsible for updating - # `is_row_sweep_var` and incrementing the `global_step` and - # `completed_sweeps` counters. - self._update_op, self._is_sweep_done_var, self._switch_op = ( - self._create_hook_ops(input_row_indices, input_col_indices, train_ops)) - - def _create_hook_ops(self, input_row_indices, input_col_indices, train_ops): - """Creates ops to update is_row_sweep_var, global_step and completed_sweeps. - - Creates two boolean tensors `processed_rows` and `processed_cols`, which - keep track of which rows/cols have been processed during the current sweep. - Returns ops that should be run after each row / col update. - - When `self._is_row_sweep_var` is True, it sets - processed_rows[input_row_indices] to True. - - When `self._is_row_sweep_var` is False, it sets - processed_cols[input_col_indices] to True. - - Args: - input_row_indices: A Tensor. The indices of the input rows that are - processed during the current sweep. - input_col_indices: A Tensor. The indices of the input columns that - are processed during the current sweep. - train_ops: A list of ops. The ops created by this function have control - dependencies on `train_ops`. - - Returns: - A tuple consisting of: - update_op: An op to be run jointly with training. It updates the state - and increments counters (global step and completed sweeps). - is_sweep_done_var: A Boolean tf.Variable, specifies whether the sweep is - done, i.e. all rows (during a row sweep) or all columns (during a - column sweep) have been processed. - switch_op: An op to be run in `self.before_run` when the sweep is done. - """ - processed_rows_init = array_ops.fill(dims=[self._num_rows], value=False) - with ops.colocate_with(processed_rows_init): - processed_rows = variable_scope.variable( - processed_rows_init, - collections=[ops.GraphKeys.GLOBAL_VARIABLES], - trainable=False, - name="sweep_hook_processed_rows") - processed_cols_init = array_ops.fill(dims=[self._num_cols], value=False) - with ops.colocate_with(processed_cols_init): - processed_cols = variable_scope.variable( - processed_cols_init, - collections=[ops.GraphKeys.GLOBAL_VARIABLES], - trainable=False, - name="sweep_hook_processed_cols") - switch_ops = control_flow_ops.group( - state_ops.assign( - self._is_row_sweep_var, - math_ops.logical_not(self._is_row_sweep_var)), - state_ops.assign(processed_rows, processed_rows_init), - state_ops.assign(processed_cols, processed_cols_init)) - is_sweep_done_var = variable_scope.variable( - False, - collections=[ops.GraphKeys.GLOBAL_VARIABLES], - trainable=False, - name="is_sweep_done") - - # After running the `train_ops`, updates `processed_rows` or - # `processed_cols` tensors, depending on whether this is a row or col sweep. - with ops.control_dependencies(train_ops): - with ops.colocate_with(processed_rows): - update_processed_rows = state_ops.scatter_update( - processed_rows, - input_row_indices, - math_ops.logical_and( - self._is_row_sweep_var, - array_ops.ones_like(input_row_indices, dtype=dtypes.bool))) - with ops.colocate_with(processed_cols): - update_processed_cols = state_ops.scatter_update( - processed_cols, - input_col_indices, - math_ops.logical_and( - math_ops.logical_not(self._is_row_sweep_var), - array_ops.ones_like(input_col_indices, dtype=dtypes.bool))) - update_processed_op = control_flow_ops.group( - update_processed_rows, update_processed_cols) - - with ops.control_dependencies([update_processed_op]): - is_sweep_done = math_ops.logical_or( - math_ops.reduce_all(processed_rows), - math_ops.reduce_all(processed_cols)) - # Increments global step. - global_step = framework_variables.get_global_step() - if global_step is not None: - global_step_incr_op = state_ops.assign_add( - global_step, 1, name="global_step_incr").op - else: - global_step_incr_op = control_flow_ops.no_op() - # Increments completed sweeps. - completed_sweeps_incr_op = state_ops.assign_add( - self._completed_sweeps_var, - math_ops.cast(is_sweep_done, dtypes.int32), - use_locking=True).op - update_ops = control_flow_ops.group( - global_step_incr_op, - completed_sweeps_incr_op, - state_ops.assign(is_sweep_done_var, is_sweep_done)) - - return update_ops, is_sweep_done_var, switch_ops def before_run(self, run_context): """Runs the appropriate prep ops, and requests running update ops.""" - # Runs the appropriate init ops and prep ops. sess = run_context.session is_sweep_done = sess.run(self._is_sweep_done_var) if not self._is_initialized: - logging.info("SweepHook running cache init op.") + logging.info("SweepHook running init op.") sess.run(self._init_op) if is_sweep_done: sess.run(self._switch_op) + is_row_sweep = sess.run(self._is_row_sweep_var) if is_sweep_done or not self._is_initialized: - logging.info("SweepHook running sweep prep ops.") - row_sweep = sess.run(self._is_row_sweep_var) - prep_ops = self._row_prep_ops if row_sweep else self._col_prep_ops + logging.info("SweepHook running prep ops for the {} sweep.".format( + "row" if is_row_sweep else "col")) + prep_ops = self._row_prep_ops if is_row_sweep else self._col_prep_ops for prep_op in prep_ops: sess.run(prep_op) - self._is_initialized = True - - # Requests running `self._update_op` jointly with the training op. logging.info("Next fit step starting.") - return session_run_hook.SessionRunArgs(fetches=[self._update_op]) - - def after_run(self, run_context, run_values): - logging.info("Fit step done.") + return session_run_hook.SessionRunArgs( + fetches=[self._row_train_op if is_row_sweep else self._col_train_op]) class _StopAtSweepHook(session_run_hook.SessionRunHook): @@ -246,6 +134,9 @@ def _wals_factorization_model_function(features, labels, mode, params): Returns: A ModelFnOps object. + + Raises: + ValueError: If `mode` is not recognized. """ assert labels is None use_factors_weights_cache = (params["use_factors_weights_cache_for_training"] @@ -269,86 +160,156 @@ def _wals_factorization_model_function(features, labels, mode, params): use_gramian_cache=use_gramian_cache) # Get input rows and cols. We either update rows or columns depending on - # the value of row_sweep, which is maintained using a session hook + # the value of row_sweep, which is maintained using a session hook. input_rows = features[WALSMatrixFactorization.INPUT_ROWS] input_cols = features[WALSMatrixFactorization.INPUT_COLS] - input_row_indices, _ = array_ops.unique(input_rows.indices[:, 0]) - input_col_indices, _ = array_ops.unique(input_cols.indices[:, 0]) - # Train ops, controlled using the SweepHook - # We need to run the following ops: - # Before a row sweep: - # row_update_prep_gramian_op - # initialize_row_update_op - # During a row sweep: - # update_row_factors_op - # Before a col sweep: - # col_update_prep_gramian_op - # initialize_col_update_op - # During a col sweep: - # update_col_factors_op + # TRAIN mode: + if mode == model_fn.ModeKeys.TRAIN: + # Training consists of the folowing ops (controlled using a SweepHook). + # Before a row sweep: + # row_update_prep_gramian_op + # initialize_row_update_op + # During a row sweep: + # update_row_factors_op + # Before a col sweep: + # col_update_prep_gramian_op + # initialize_col_update_op + # During a col sweep: + # update_col_factors_op - is_row_sweep_var = variable_scope.variable( - True, - trainable=False, - name="is_row_sweep", - collections=[ops.GraphKeys.GLOBAL_VARIABLES]) - completed_sweeps_var = variable_scope.variable( - 0, - trainable=False, - name=WALSMatrixFactorization.COMPLETED_SWEEPS, - collections=[ops.GraphKeys.GLOBAL_VARIABLES]) + is_row_sweep_var = variable_scope.variable( + True, + trainable=False, + name="is_row_sweep", + collections=[ops.GraphKeys.GLOBAL_VARIABLES]) + is_sweep_done_var = variable_scope.variable( + False, + trainable=False, + name="is_sweep_done", + collections=[ops.GraphKeys.GLOBAL_VARIABLES]) + completed_sweeps_var = variable_scope.variable( + 0, + trainable=False, + name=WALSMatrixFactorization.COMPLETED_SWEEPS, + collections=[ops.GraphKeys.GLOBAL_VARIABLES]) + loss_var = variable_scope.variable( + 0., + trainable=False, + name=WALSMatrixFactorization.LOSS, + collections=[ops.GraphKeys.GLOBAL_VARIABLES]) + # The root weighted squared error = + # \sqrt( \sum_{i,j} w_ij * (a_ij - r_ij)^2 / \sum_{i,j} w_ij ) + rwse_var = variable_scope.variable( + 0., + trainable=False, + name=WALSMatrixFactorization.RWSE, + collections=[ops.GraphKeys.GLOBAL_VARIABLES]) - # The row sweep is determined by is_row_sweep_var (controlled by the - # sweep_hook) in TRAIN mode, and manually in EVAL mode. - is_row_sweep = (features[WALSMatrixFactorization.PROJECT_ROW] - if mode == model_fn.ModeKeys.EVAL else is_row_sweep_var) + summary.scalar("loss", loss_var) + summary.scalar("root_weighted_squared_error", rwse_var) + summary.scalar("completed_sweeps", completed_sweeps_var) - def update_row_factors(): - return model.update_row_factors(sp_input=input_rows, transpose_input=False) + # Increments global step. + global_step = training_util.get_global_step() + if global_step: + global_step_incr_op = state_ops.assign_add( + global_step, 1, name="global_step_incr").op + else: + global_step_incr_op = control_flow_ops.no_op() - def update_col_factors(): - return model.update_col_factors(sp_input=input_cols, transpose_input=True) + def create_axis_ops(sp_input, num_items, update_fn, axis_name): + """Creates book-keeping and training ops for a given axis. - (_, train_op, - unregularized_loss, regularization, sum_weights) = control_flow_ops.cond( - is_row_sweep, update_row_factors, update_col_factors) - loss = unregularized_loss + regularization - root_weighted_squared_error = math_ops.sqrt(unregularized_loss / sum_weights) + Args: + sp_input: A SparseTensor corresponding to the row or column batch. + num_items: An integer, the total number of items of this axis. + update_fn: A function that takes one argument (`sp_input`), and that + returns a tuple of + * new_factors: A flot Tensor of the factor values after update. + * update_op: a TensorFlow op which updates the factors. + * loss: A float Tensor, the unregularized loss. + * reg_loss: A float Tensor, the regularization loss. + * sum_weights: A float Tensor, the sum of factor weights. + axis_name: A string that specifies the name of the axis. - row_prep_ops = [ - model.row_update_prep_gramian_op, model.initialize_row_update_op - ] - col_prep_ops = [ - model.col_update_prep_gramian_op, model.initialize_col_update_op - ] - init_ops = [model.worker_init] + Returns: + A tuple consisting of: + * reset_processed_items_op: A TensorFlow op, to be run before the + beginning of any sweep. It marks all items as not-processed. + * axis_train_op: A Tensorflow op, to be run during this axis' sweeps. + """ + processed_items_init = array_ops.fill(dims=[num_items], value=False) + with ops.colocate_with(processed_items_init): + processed_items = variable_scope.variable( + processed_items_init, + collections=[ops.GraphKeys.GLOBAL_VARIABLES], + trainable=False, + name="processed_" + axis_name) + reset_processed_items_op = state_ops.assign( + processed_items, processed_items_init, + name="reset_processed_" + axis_name) + _, update_op, loss, reg, sum_weights = update_fn(sp_input) + input_indices = sp_input.indices[:, 0] + with ops.control_dependencies([ + update_op, + state_ops.assign(loss_var, loss + reg), + state_ops.assign(rwse_var, math_ops.sqrt(loss / sum_weights))]): + with ops.colocate_with(processed_items): + update_processed_items = state_ops.scatter_update( + processed_items, + input_indices, + array_ops.ones_like(input_indices, dtype=dtypes.bool), + name="update_processed_{}_indices".format(axis_name)) + with ops.control_dependencies([update_processed_items]): + is_sweep_done = math_ops.reduce_all(processed_items) + axis_train_op = control_flow_ops.group( + global_step_incr_op, + state_ops.assign(is_sweep_done_var, is_sweep_done), + state_ops.assign_add( + completed_sweeps_var, + math_ops.cast(is_sweep_done, dtypes.int32)), + name="{}_sweep_train_op".format(axis_name)) + return reset_processed_items_op, axis_train_op - sweep_hook = _SweepHook( - is_row_sweep_var, - [train_op, loss], - params["num_rows"], - params["num_cols"], - input_row_indices, - input_col_indices, - row_prep_ops, - col_prep_ops, - init_ops, - completed_sweeps_var) - training_hooks = [sweep_hook] - if max_sweeps is not None: - training_hooks.append(_StopAtSweepHook(max_sweeps)) + reset_processed_rows_op, row_train_op = create_axis_ops( + input_rows, + params["num_rows"], + lambda x: model.update_row_factors(sp_input=x, transpose_input=False), + "rows") + reset_processed_cols_op, col_train_op = create_axis_ops( + input_cols, + params["num_cols"], + lambda x: model.update_col_factors(sp_input=x, transpose_input=True), + "cols") + switch_op = control_flow_ops.group( + state_ops.assign( + is_row_sweep_var, math_ops.logical_not(is_row_sweep_var)), + reset_processed_rows_op, + reset_processed_cols_op, + name="sweep_switch_op") + row_prep_ops = [ + model.row_update_prep_gramian_op, model.initialize_row_update_op] + col_prep_ops = [ + model.col_update_prep_gramian_op, model.initialize_col_update_op] + init_op = model.worker_init + sweep_hook = _SweepHook( + is_row_sweep_var, is_sweep_done_var, init_op, + row_prep_ops, col_prep_ops, row_train_op, col_train_op, switch_op) + training_hooks = [sweep_hook] + if max_sweeps is not None: + training_hooks.append(_StopAtSweepHook(max_sweeps)) - # The root weighted squared error = - # \sqrt( \sum_{i,j} w_ij * (a_ij - r_ij)^2 / \sum_{i,j} w_ij ) - summary.scalar("loss", loss) # the estimated total training loss - summary.scalar("root_weighted_squared_error", root_weighted_squared_error) - summary.scalar("completed_sweeps", completed_sweeps_var) + return model_fn.ModelFnOps( + mode=model_fn.ModeKeys.TRAIN, + predictions={}, + loss=loss_var, + eval_metric_ops={}, + train_op=control_flow_ops.no_op(), + training_hooks=training_hooks) - # Prediction ops (only return predictions in INFER mode) - predictions = {} - if mode == model_fn.ModeKeys.INFER: - project_row = features[WALSMatrixFactorization.PROJECT_ROW] + # INFER mode + elif mode == model_fn.ModeKeys.INFER: projection_weights = features.get( WALSMatrixFactorization.PROJECTION_WEIGHTS) @@ -364,17 +325,45 @@ def _wals_factorization_model_function(features, labels, mode, params): projection_weights=projection_weights, transpose_input=True) - predictions[WALSMatrixFactorization.PROJECTION_RESULT] = ( - control_flow_ops.cond(project_row, get_row_projection, - get_col_projection)) + predictions = { + WALSMatrixFactorization.PROJECTION_RESULT: control_flow_ops.cond( + features[WALSMatrixFactorization.PROJECT_ROW], + get_row_projection, + get_col_projection) + } - return model_fn.ModelFnOps( - mode=mode, - predictions=predictions, - loss=loss, - eval_metric_ops={}, - train_op=train_op, - training_hooks=training_hooks) + return model_fn.ModelFnOps( + mode=model_fn.ModeKeys.INFER, + predictions=predictions, + loss=None, + eval_metric_ops={}, + train_op=control_flow_ops.no_op(), + training_hooks=[]) + + # EVAL mode + elif mode == model_fn.ModeKeys.EVAL: + def get_row_loss(): + _, _, loss, reg, _ = model.update_row_factors( + sp_input=input_rows, transpose_input=False) + return loss + reg + def get_col_loss(): + _, _, loss, reg, _ = model.update_col_factors( + sp_input=input_cols, transpose_input=True) + return loss + reg + loss = control_flow_ops.cond( + features[WALSMatrixFactorization.PROJECT_ROW], + get_row_loss, + get_col_loss) + return model_fn.ModelFnOps( + mode=model_fn.ModeKeys.EVAL, + predictions={}, + loss=loss, + eval_metric_ops={}, + train_op=control_flow_ops.no_op(), + training_hooks=[]) + + else: + raise ValueError("mode=%s is not recognized." % str(mode)) class WALSMatrixFactorization(estimator.Estimator): @@ -452,6 +441,10 @@ class WALSMatrixFactorization(estimator.Estimator): PROJECTION_RESULT = "projection" # Name of the completed_sweeps variable COMPLETED_SWEEPS = "completed_sweeps" + # Name of the loss variable + LOSS = "WALS_loss" + # Name of the Root Weighted Squared Error variable + RWSE = "WALS_RWSE" def __init__(self, num_rows, diff --git a/tensorflow/contrib/factorization/python/ops/wals_test.py b/tensorflow/contrib/factorization/python/ops/wals_test.py index 8bd72b7025a..36b483c6d7a 100644 --- a/tensorflow/contrib/factorization/python/ops/wals_test.py +++ b/tensorflow/contrib/factorization/python/ops/wals_test.py @@ -417,73 +417,67 @@ class WALSMatrixFactorizationUnsupportedTest(test.TestCase): class SweepHookTest(test.TestCase): - def setUp(self): - self._num_rows = 5 - self._num_cols = 7 - self._train_op = control_flow_ops.no_op() - self._row_prep_done = variables.Variable(False) - self._col_prep_done = variables.Variable(False) - self._init_done = variables.Variable(False) - self._row_prep_ops = [state_ops.assign(self._row_prep_done, True)] - self._col_prep_ops = [state_ops.assign(self._col_prep_done, True)] - self._init_ops = [state_ops.assign(self._init_done, True)] - self._input_row_indices_ph = array_ops.placeholder(dtypes.int64) - self._input_col_indices_ph = array_ops.placeholder(dtypes.int64) - def test_sweeps(self): - def ind_feed(row_indices, col_indices): - return { - self._input_row_indices_ph: row_indices, - self._input_col_indices_ph: col_indices - } + is_row_sweep_var = variables.Variable(True) + is_sweep_done_var = variables.Variable(False) + init_done = variables.Variable(False) + row_prep_done = variables.Variable(False) + col_prep_done = variables.Variable(False) + row_train_done = variables.Variable(False) + col_train_done = variables.Variable(False) + + init_op = state_ops.assign(init_done, True) + row_prep_op = state_ops.assign(row_prep_done, True) + col_prep_op = state_ops.assign(col_prep_done, True) + row_train_op = state_ops.assign(row_train_done, True) + col_train_op = state_ops.assign(col_train_done, True) + train_op = control_flow_ops.no_op() + switch_op = control_flow_ops.group( + state_ops.assign(is_sweep_done_var, False), + state_ops.assign(is_row_sweep_var, + math_ops.logical_not(is_row_sweep_var))) + mark_sweep_done = state_ops.assign(is_sweep_done_var, True) with self.test_session() as sess: - is_row_sweep_var = variables.Variable(True) - completed_sweeps_var = variables.Variable(0) sweep_hook = wals_lib._SweepHook( is_row_sweep_var, - [self._train_op], - self._num_rows, - self._num_cols, - self._input_row_indices_ph, - self._input_col_indices_ph, - self._row_prep_ops, - self._col_prep_ops, - self._init_ops, - completed_sweeps_var) + is_sweep_done_var, + init_op, + [row_prep_op], + [col_prep_op], + row_train_op, + col_train_op, + switch_op) mon_sess = monitored_session._HookedSession(sess, [sweep_hook]) sess.run([variables.global_variables_initializer()]) - # Init ops should run before the first run. Row sweep not completed. - mon_sess.run(self._train_op, ind_feed([0, 1, 2], [])) - self.assertTrue(sess.run(self._init_done), - msg='init ops not run by the sweep_hook') - self.assertTrue(sess.run(self._row_prep_done), - msg='row_prep not run by the sweep_hook') - self.assertTrue(sess.run(is_row_sweep_var), - msg='Row sweep is not complete but is_row_sweep is ' - 'False.') - # Row sweep completed. - mon_sess.run(self._train_op, ind_feed([3, 4], [0, 1, 2, 3, 4, 5, 6])) - self.assertTrue(sess.run(completed_sweeps_var) == 1, - msg='Completed sweeps should be equal to 1.') - self.assertTrue(sess.run(sweep_hook._is_sweep_done_var), - msg='Sweep is complete but is_sweep_done is False.') - # Col init ops should run. Col sweep not completed. - mon_sess.run(self._train_op, ind_feed([], [0, 1, 2, 3, 4])) - self.assertTrue(sess.run(self._col_prep_done), - msg='col_prep not run by the sweep_hook') - self.assertFalse(sess.run(is_row_sweep_var), - msg='Col sweep is not complete but is_row_sweep is ' - 'True.') - self.assertFalse(sess.run(sweep_hook._is_sweep_done_var), - msg='Sweep is not complete but is_sweep_done is True.') - # Col sweep completed. - mon_sess.run(self._train_op, ind_feed([], [4, 5, 6])) - self.assertTrue(sess.run(sweep_hook._is_sweep_done_var), - msg='Sweep is complete but is_sweep_done is False.') - self.assertTrue(sess.run(completed_sweeps_var) == 2, - msg='Completed sweeps should be equal to 2.') + # Row sweep. + mon_sess.run(train_op) + self.assertTrue(sess.run(init_done), + msg='init op not run by the Sweephook') + self.assertTrue(sess.run(row_prep_done), + msg='row_prep_op not run by the SweepHook') + self.assertTrue(sess.run(row_train_done), + msg='row_train_op not run by the SweepHook') + self.assertTrue( + sess.run(is_row_sweep_var), + msg='Row sweep is not complete but is_row_sweep_var is False.') + # Col sweep. + mon_sess.run(mark_sweep_done) + mon_sess.run(train_op) + self.assertTrue(sess.run(col_prep_done), + msg='col_prep_op not run by the SweepHook') + self.assertTrue(sess.run(col_train_done), + msg='col_train_op not run by the SweepHook') + self.assertFalse( + sess.run(is_row_sweep_var), + msg='Col sweep is not complete but is_row_sweep_var is True.') + # Row sweep. + mon_sess.run(mark_sweep_done) + mon_sess.run(train_op) + self.assertTrue( + sess.run(is_row_sweep_var), + msg='Col sweep is complete but is_row_sweep_var is False.') class StopAtSweepHookTest(test.TestCase): From 2eb758397f02e4234e38811c08723be29c83dbc4 Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Tue, 14 Nov 2017 14:46:14 -0800 Subject: [PATCH 029/104] Make tf.make_template compatible with eager. PiperOrigin-RevId: 175738521 --- .../python/kernel_tests/template_test.py | 149 ++++++++++++-- tensorflow/python/ops/template.py | 186 ++++++++++++++++++ 2 files changed, 316 insertions(+), 19 deletions(-) diff --git a/tensorflow/python/kernel_tests/template_test.py b/tensorflow/python/kernel_tests/template_test.py index 8b9c58ac3f7..798bd0fe894 100644 --- a/tensorflow/python/kernel_tests/template_test.py +++ b/tensorflow/python/kernel_tests/template_test.py @@ -20,7 +20,9 @@ from __future__ import print_function import traceback from tensorflow.python.client import session +from tensorflow.python.eager import context from tensorflow.python.framework import random_seed +from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops @@ -50,6 +52,13 @@ def function_with_create(trainable): "dummy", shape=[1], initializer=init_ops.zeros_initializer()) +def function_with_side_create(trainable, name="side"): + """Creates a variable as a side effect using tf.get_variable.""" + variable_scope.get_variable(name, shape=[1], trainable=trainable) + return variable_scope.get_variable( + "dummy", shape=[1], initializer=init_ops.zeros_initializer()) + + def variable_scoped_function_with_local_variable(): variable_scope.get_local_variable( "local", shape=[1], initializer=init_ops.zeros_initializer()) @@ -99,6 +108,46 @@ class TemplateTest(test.TestCase): # Parameters are tied, so the loss should have gone down when we trained it. self.assertLess(final_test_loss, initial_test_loss) + def test_end_to_end_eager(self): + """This test shows a very simple line model with test_loss in eager mode. + + The template is used to share parameters between a training and test model. + """ + with context.eager_mode(): + # y = 2x + 1 + training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7]) + test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17]) + + random_seed.set_random_seed(1234) + + def test_line(x): + m = variable_scope.get_variable( + "w", shape=[], initializer=init_ops.truncated_normal_initializer()) + b = variable_scope.get_variable( + "b", shape=[], initializer=init_ops.truncated_normal_initializer()) + return x * m + b + + line_template = template.make_template("line", test_line) + + def train_loss(): + train_prediction = line_template(training_input) + return math_ops.reduce_mean( + math_ops.square(train_prediction - training_output)) + + def test_loss(): + test_prediction = line_template(test_input) + return math_ops.reduce_mean( + math_ops.square(test_prediction - test_output)) + + optimizer = gradient_descent.GradientDescentOptimizer(0.1) + initial_test_loss = test_loss() + optimizer.minimize(train_loss) + final_test_loss = test_loss() + + # Parameters are tied, so the loss should have gone down after training. + self.assertLess(final_test_loss.numpy(), initial_test_loss.numpy()) + + @test_util.run_in_graph_and_eager_modes() def test_skip_stack_frames(self): first = traceback.format_stack() second = traceback.format_stack() @@ -106,6 +155,7 @@ class TemplateTest(test.TestCase): self.assertEqual(1, len(result)) self.assertNotEqual(len(first), len(result)) + @test_util.run_in_graph_and_eager_modes() def test_template_with_name(self): tmpl1 = template.make_template("s1", variable_scoped_function) tmpl2 = template.make_template("s1", variable_scoped_function) @@ -118,15 +168,23 @@ class TemplateTest(test.TestCase): self.assertEqual("s1/dummy:0", v1.name) self.assertEqual("s1_1/dummy:0", v3.name) - def test_unique_name_raise_error(self): + def test_same_unique_name_raise_error(self): tmpl1 = template.make_template( "_", variable_scoped_function, unique_name_="s1") tmpl1() tmpl2 = template.make_template( "_", variable_scoped_function, unique_name_="s1") - with self.assertRaises(ValueError): + with self.assertRaisesRegexp( + ValueError, "Variable s1/dummy already exists, disallowed.*"): tmpl2() + def test_unique_name_raise_error_in_eager(self): + with context.eager_mode(): + with self.assertRaisesRegexp( + ValueError, "unique_name cannot be used in eager mode."): + template.make_template( + "_", variable_scoped_function, unique_name_="s1") + def test_unique_name_and_reuse(self): tmpl1 = template.make_template( "_", variable_scoped_function, unique_name_="s1") @@ -142,6 +200,7 @@ class TemplateTest(test.TestCase): self.assertEqual(v1, v3) self.assertEqual("s1/dummy:0", v1.name) + @test_util.run_in_graph_and_eager_modes() def test_template_in_scope(self): tmpl1 = template.make_template("s1", variable_scoped_function) tmpl2 = template.make_template("s1", variable_scoped_function) @@ -158,6 +217,7 @@ class TemplateTest(test.TestCase): self.assertEqual("scope/s1/dummy:0", v1.name) self.assertEqual("scope/s1_1/dummy:0", v3.name) + @test_util.run_in_graph_and_eager_modes() def test_template_with_internal_reuse(self): tmpl1 = template.make_template("s1", internally_variable_scoped_function) tmpl2 = template.make_template("s1", internally_variable_scoped_function) @@ -173,10 +233,13 @@ class TemplateTest(test.TestCase): with self.assertRaises(ValueError): tmpl1("not_test") + @test_util.run_in_graph_and_eager_modes() def test_template_without_name(self): - with self.assertRaises(ValueError): + with self.assertRaisesRegexp( + ValueError, "name cannot be None."): template.make_template(None, variable_scoped_function) + @test_util.run_in_graph_and_eager_modes() def test_make_template(self): # Test both that we can call it with positional and keywords. tmpl1 = template.make_template( @@ -199,10 +262,28 @@ class TemplateTest(test.TestCase): with self.assertRaises(ValueError): tmpl() + @test_util.run_in_graph_and_eager_modes() + def test_enforces_no_extra_trainable_variables_eager(self): + tmpl = template.make_template("s", + function_with_side_create, + trainable=True) + + tmpl(name="1") + with self.assertRaises(ValueError): + tmpl(name="2") + def test_permits_extra_non_trainable_variables(self): tmpl = template.make_template("s", function_with_create, trainable=False) self.assertEqual(tmpl(), tmpl()) + def test_permits_extra_non_trainable_variables_eager(self): + with context.eager_mode(): + tmpl = template.make_template("s", + function_with_side_create, + trainable=False) + self.assertEqual(tmpl(name="1"), tmpl(name="2")) + + @test_util.run_in_graph_and_eager_modes() def test_internal_variable_reuse(self): def nested(): @@ -241,11 +322,28 @@ class TemplateTest(test.TestCase): v1 = tmpl1() v2 = tmpl1() v3 = tmpl2() - self.assertEqual(v1, v2) + self.assertTrue(v1, v2) self.assertNotEqual(v1, v3) self.assertEqual("s1/nested_1/dummy:0", v1.name) self.assertEqual("s1_1/nested_1/dummy:0", v3.name) + def test_nested_eager_templates_raises_error(self): + + def nested_template(): + nested1 = template.make_template("nested", variable_scoped_function) + nested2 = template.make_template("nested", variable_scoped_function) + v1 = nested1() + v2 = nested2() + self.assertNotEqual(v1, v2) + return v2 + + with context.eager_mode(): + tmpl1 = template.make_template("s1", nested_template) + with self.assertRaisesRegexp( + ValueError, "Nested EagerTemaplates are not currently supported."): + tmpl1() + + @test_util.run_in_graph_and_eager_modes() def test_immediate_scope_creation(self): # Create templates in scope a then call in scope b. make_template should # capture the scope the first time it is called, and make_immediate_template @@ -270,6 +368,7 @@ class TemplateTest(test.TestCase): self.assertEqual("ctor_scope/a/dummy:0", inner_imm_var.name) self.assertEqual("call_scope/b/dummy:0", inner_defer_var.name) + @test_util.run_in_graph_and_eager_modes() def test_scope_access(self): # Ensure that we can access the scope inside the template, because the name # of that scope may be different from the name we pass to make_template, due @@ -294,6 +393,7 @@ class TemplateTest(test.TestCase): # Template is called at the top level, so there is no preceding "foo_2". self.assertEqual(tc.variable_scope.name, "blah") + @test_util.run_in_graph_and_eager_modes() def test_custom_getter(self): # Custom getter that maintains call count and forwards to true getter custom_getter_count = [0] @@ -326,6 +426,7 @@ class TemplateTest(test.TestCase): tmpl2() self.assertEqual(custom_getter_count[0], 2) + @test_util.run_in_graph_and_eager_modes() def test_fails_gracefully(self): for create_scope_now in [True, False]: def module_function_with_one_arg(inputs): @@ -336,7 +437,7 @@ class TemplateTest(test.TestCase): templatized_function = template.make_template( "f1", module_function_with_one_arg, create_scope_now_=create_scope_now) - data = array_ops.zeros(1) + data = array_ops.zeros([1]) try: # Try to connect with a kwarg which is unsupported. templatized_function(data, is_training=True) @@ -348,6 +449,7 @@ class TemplateTest(test.TestCase): templatized_function(data) self.assertTrue(templatized_function._variables_created) + @test_util.run_in_graph_and_eager_modes() def test_name_scopes_for_variable_scopes(self): # Test that name scopes are not unnecessarily uniquified (but are # still uniquified when necessary). @@ -374,12 +476,13 @@ class TemplateTest(test.TestCase): outputs_b, _ = linear1(inputs) self.assertEquals("foo", linear1.variable_scope.name) self.assertEquals("foo/w:0", w1.name) - self.assertEquals("foo/add:0", outputs_a.name, - "First application of template should get " - "same name scope as variables.") - self.assertEquals("foo_1/add:0", outputs_b.name, - "Second application of template should get " - "a freshly uniquified name scope.") + if context.in_graph_mode(): + self.assertEquals("foo/add:0", outputs_a.name, + "First application of template should get " + "same name scope as variables.") + self.assertEquals("foo_1/add:0", outputs_b.name, + "Second application of template should get " + "a freshly uniquified name scope.") linear2 = make_linear_module(output_size=2, name="foo") outputs_c, w2 = linear2(inputs) @@ -388,20 +491,26 @@ class TemplateTest(test.TestCase): "New template gets a freshly uniquified variable scope " "because 'foo' is already taken.") self.assertEquals("foo_1/w:0", w2.name) - self.assertEquals("foo_1_1/add:0", outputs_c.name, - "First application of template would get " - "same name scope as variables, but 'foo_1' is already " - "a name scope.") - self.assertEquals("foo_1_2/add:0", outputs_d.name, - "Second application of template should also get " - "a freshly uniquified name scope.") + if context.in_graph_mode(): + self.assertEquals("foo_1_1/add:0", outputs_c.name, + "First application of template would get " + "same name scope as variables, but 'foo_1' is already " + "a name scope.") + self.assertEquals("foo_1_2/add:0", outputs_d.name, + "Second application of template should also get " + "a freshly uniquified name scope.") + @test_util.run_in_graph_and_eager_modes() def test_global_variables(self): # Make sure global_variables are created. with variable_scope.variable_scope("foo"): # Create two templates with the same name, ensure scopes are made unique. ta = template.make_template("bar", variable_scoped_function, True) - tb = template.make_template("s", function_with_create, trainable=False) + if context.in_eager_mode(): + tb = template.make_template("s", function_with_side_create, + trainable=False) + else: + tb = template.make_template("s", function_with_create, trainable=False) # Initially there are not variables created. self.assertEqual([], ta.global_variables) @@ -413,6 +522,7 @@ class TemplateTest(test.TestCase): self.assertEqual(1, len(ta.global_variables)) self.assertEqual(2, len(tb.global_variables)) + @test_util.run_in_graph_and_eager_modes() def test_trainable_variables(self): # Make sure trainable_variables are created. with variable_scope.variable_scope("foo2"): @@ -430,6 +540,7 @@ class TemplateTest(test.TestCase): self.assertEqual(1, len(ta.trainable_variables)) self.assertEqual(1, len(tb.trainable_variables)) + # TODO(apassos) handle local variables in Eager def test_local_variables(self): # Make sure trainable_variables are created. with variable_scope.variable_scope("foo3"): diff --git a/tensorflow/python/ops/template.py b/tensorflow/python/ops/template.py index 24ef70c6f4d..98578b799a8 100644 --- a/tensorflow/python/ops/template.py +++ b/tensorflow/python/ops/template.py @@ -21,6 +21,7 @@ from __future__ import print_function import functools import traceback +from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging @@ -138,6 +139,10 @@ def make_template(name_, func_, create_scope_now_=False, unique_name_=None, """ if kwargs: func_ = functools.partial(func_, **kwargs) + if context.in_eager_mode(): + return EagerTemplate( + name_, func_, create_scope_now=create_scope_now_, + unique_name=unique_name_, custom_getter=custom_getter_) return Template( name_, func_, create_scope_now=create_scope_now_, unique_name=unique_name_, custom_getter=custom_getter_) @@ -336,3 +341,184 @@ class Template(object): def var_scope(self): """Returns the variable scope object created by this Template.""" return self._variable_scope + + +class EagerTemplate(Template): + """Wrap a function to aid in variable sharing in Eager mode. + + Templates are functions that create variables the first time they are called + and reuse them thereafter. See `make_template` for full documentation. + + Note: By default, the full variable scope is captured at the time of first + call. If `create_scope_now` is passed as True to the constructor, the full + scope will be captured there, but no variables will be created until the first + call. + """ + + def __init__(self, name, func, create_scope_now=False, unique_name=None, + custom_getter=None): + """Creates a template for the given function. + + Args: + name: A name for the scope created by this template. The + name will be made unique by appending `_N` to the it (see how + `tf.variable_scope` treats the `default_name` for details). + func: The function to apply each time. + create_scope_now: Whether to create the scope at Template construction + time, rather than first call. Defaults to false. Creating the scope at + construction time may be more convenient if the template is passed + through much lower level code, and you want to be sure of the scope + name without knowing exactly where it will be first called. If set to + True, the scope will be created in the constructor, and all subsequent + times in __call__, leading to a trailing numeral being added to the + names of all created Tensors. If set to False, the scope will be created + at the first call location. + unique_name: When used, it overrides name_ and is not made unique. If a + template of the same scope/unique_name already exists and reuse is + false, an error is raised. Defaults to None. + custom_getter: optional custom getter to pass to variable_scope() + + Raises: + RuntimeError: if eager mode is not enabled. + ValueError: if the name is None or unique_name is provided. + """ + if not context.in_eager_mode(): + raise RuntimeError( + "{} objects can only be used when eager execution is enabled, use " + "tf.Template for graph construction". + format(type(self))) + if unique_name: + raise ValueError("unique_name cannot be used in eager mode.") + super(EagerTemplate, self).__init__(name, func, create_scope_now, + unique_name, custom_getter) + # Create an eager variable store only if the current variable store cannot + # store eager variables. This should allow for correct nesting. + default_vstore = variable_scope._get_default_variable_store() # pylint: disable=protected-access + if default_vstore._store_eager_variables: # pylint: disable=protected-access + raise ValueError("Nested EagerTemaplates are not currently supported.") + else: + self._eager_variable_store = variable_scope.EagerVariableStore() + + def _call_func(self, args, kwargs, check_for_new_variables): + try: + vars_at_start = self._eager_variable_store.variables() + trainable_at_start = self._eager_variable_store.trainable_variables() + + result = self._func(*args, **kwargs) + if check_for_new_variables: + trainable_variables = self._eager_variable_store.trainable_variables() + # If a variable that we intend to train is created as a side effect + # of creating a template, then that is almost certainly an error. + if len(trainable_at_start) != len(trainable_variables): + raise ValueError("Trainable variable created when calling a template " + "after the first time, perhaps you used tf.Variable " + "when you meant tf.get_variable: %s" % + list(set(trainable_variables) - + set(trainable_at_start))) + + # Non-trainable tracking variables are a legitimate reason why a new + # variable would be created, but it is a relatively advanced use-case, + # so log it. + variables = self._eager_variable_store.variables() + if len(vars_at_start) != len(variables): + logging.info("New variables created when calling a template after " + "the first time, perhaps you used tf.Variable when you " + "meant tf.get_variable: %s", + list(set(variables) - set(vars_at_start))) + return result + except Exception as exc: + # Reraise the exception, but append the original definition to the + # trace. + args = exc.args + if not args: + arg0 = "" + else: + arg0 = args[0] + trace = "".join(_skip_common_stack_elements(self._stacktrace, + traceback.format_stack())) + arg0 = "%s\n\noriginally defined at:\n%s" % (arg0, trace) + new_args = [arg0] + new_args.extend(args[1:]) + exc.args = tuple(new_args) + raise + + def __call__(self, *args, **kwargs): + if self._variable_scope: + if self._variables_created: + # This is not the first visit to __call__, so variables have already + # been created, and we want to reuse them. + with variable_scope.variable_scope(self._variable_scope, + reuse=variable_scope.AUTO_REUSE): + with self._eager_variable_store.as_default(): + return self._call_func(args, kwargs, check_for_new_variables=True) + else: + # This is the first visit to __call__, but the scope has already been + # created in the constructor. Set _variables_created after the inner + # function is successfully called so that subsequent calls take the if + # branch above. + with variable_scope.variable_scope(self._variable_scope, + reuse=variable_scope.AUTO_REUSE): + with self._eager_variable_store.as_default(): + result = self._call_func(args, kwargs, + check_for_new_variables=False) + self._variables_created = True + return result + else: + # The scope was not created at construction time, so create it here. + # Subsequent calls should reuse variables. + with variable_scope.variable_scope( + self._unique_name, self._name, + custom_getter=self._custom_getter) as vs: + self._variable_scope = vs + with self._eager_variable_store.as_default(): + result = self._call_func(args, kwargs, + check_for_new_variables=False) + self._variables_created = True + return result + + @property + def name(self): + """Returns the name given to this Template.""" + return self._name + + @property + def func(self): + """Returns the func given to this Template.""" + return self._func + + @property + def variable_scope(self): + """Returns the variable scope object created by this Template.""" + return self._variable_scope + + @property + def variable_scope_name(self): + """Returns the variable scope name created by this Template.""" + if self._variable_scope: + name = self._variable_scope.name + # To prevent partial matches on the scope_name, we add '/' at the end. + return name if name[-1] == "/" else name + "/" + + @property + def variables(self): + """Returns the list of trainable variables created by the Template.""" + # Currently there is no local variable in Eager mode. + return self._eager_variable_store.variables() + + @property + def trainable_variables(self): + """Returns the list of trainable variables created by the Template.""" + # Currently there is no local variable in Eager mode. + return self._eager_variable_store.trainable_variables() + + @property + def global_variables(self): + """Returns the list of global variables created by the Template.""" + # Currently there is no local variable in Eager mode. + return self.variables + + @property + def local_variables(self): + """Returns the list of global variables created by the Template.""" + # Currently there is no local variable in Eager mode. + return [] From bc0b26046fa729612b0017815b72a5faa4890e86 Mon Sep 17 00:00:00 2001 From: RJ Ryan Date: Tue, 14 Nov 2017 14:57:53 -0800 Subject: [PATCH 030/104] Add seed to tf.contrib.layers.dropout. This allows customizing the dropout seed via arg_scope. PiperOrigin-RevId: 175740302 --- tensorflow/contrib/layers/python/layers/layers.py | 6 +++++- .../contrib/layers/python/layers/layers_test.py | 12 ++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py index ad4a0b302fb..46b3eeae914 100644 --- a/tensorflow/contrib/layers/python/layers/layers.py +++ b/tensorflow/contrib/layers/python/layers/layers.py @@ -1403,7 +1403,8 @@ def dropout(inputs, noise_shape=None, is_training=True, outputs_collections=None, - scope=None): + scope=None, + seed=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by @@ -1421,6 +1422,8 @@ def dropout(inputs, Otherwise, inputs is returned. outputs_collections: Collection to add the outputs. scope: Optional scope for name_scope. + seed: A Python integer. Used to create random seeds. See + @{tf.set_random_seed} for behavior. Returns: A tensor representing the output of the operation. @@ -1430,6 +1433,7 @@ def dropout(inputs, inputs = ops.convert_to_tensor(inputs) layer = core_layers.Dropout(rate=1 - keep_prob, noise_shape=noise_shape, + seed=seed, name=sc.name, _scope=sc) outputs = layer.apply(inputs, training=is_training) diff --git a/tensorflow/contrib/layers/python/layers/layers_test.py b/tensorflow/contrib/layers/python/layers/layers_test.py index 2837a3172da..ff7f0e44623 100644 --- a/tensorflow/contrib/layers/python/layers/layers_test.py +++ b/tensorflow/contrib/layers/python/layers/layers_test.py @@ -1345,11 +1345,20 @@ class DropoutTest(test.TestCase): num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0)) output = _layers.dropout(images) num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0)) - sess.run(variables_lib.global_variables_initializer()) num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial]) self.assertLess(num_elem, num_elem_initial / 2 + 0.1) self.assertGreater(num_elem, num_elem_initial / 2 - 0.1) + def testDropoutSeed(self): + """Test that providing the same seed produces the same result.""" + height, width = 10, 10 + with self.test_session() as sess: + images = random_ops.random_uniform( + (5, height, width, 3), seed=1, name='images') + output1 = _layers.dropout(images, seed=1) + output2 = _layers.dropout(images, seed=1) + self.assertAllEqual(*sess.run([output1, output2])) + def testCreateDropoutNoTraining(self): height, width = 3, 3 with self.test_session() as sess: @@ -1358,7 +1367,6 @@ class DropoutTest(test.TestCase): num_elem_initial = math_ops.reduce_mean(math_ops.to_float(images > 0)) output = _layers.dropout(images, is_training=False) num_elem = math_ops.reduce_mean(math_ops.to_float(output > 0)) - sess.run(variables_lib.global_variables_initializer()) num_elem, num_elem_initial = sess.run([num_elem, num_elem_initial]) self.assertEqual(num_elem, num_elem_initial) outputs, inputs = sess.run([output, images]) From f89cffd37c88e4d9fa0ee3ac191e6f5fd5c005c8 Mon Sep 17 00:00:00 2001 From: Anna R Date: Tue, 14 Nov 2017 15:00:48 -0800 Subject: [PATCH 031/104] Internal change. PiperOrigin-RevId: 175740778 --- tensorflow/core/framework/op_def_util.cc | 9 ++ tensorflow/core/framework/op_def_util.h | 4 + tensorflow/python/BUILD | 1 + tensorflow/python/eager/BUILD | 1 + .../python/eager/python_eager_op_gen.cc | 30 +++-- tensorflow/python/eager/python_eager_op_gen.h | 3 +- .../python/eager/python_eager_op_gen_main.cc | 23 +++- tensorflow/python/framework/python_op_gen.cc | 120 +++++++++++++----- tensorflow/python/framework/python_op_gen.h | 17 ++- .../python/framework/python_op_gen_internal.h | 6 +- .../python/framework/python_op_gen_main.cc | 26 +++- 11 files changed, 184 insertions(+), 56 deletions(-) diff --git a/tensorflow/core/framework/op_def_util.cc b/tensorflow/core/framework/op_def_util.cc index 2f737a0f169..f7d4166f970 100644 --- a/tensorflow/core/framework/op_def_util.cc +++ b/tensorflow/core/framework/op_def_util.cc @@ -161,6 +161,15 @@ OpDef::AttrDef* FindAttrMutable(StringPiece name, OpDef* op_def) { return nullptr; } +const OpDef::ArgDef* FindInputArg(StringPiece name, const OpDef& op_def) { + for (int i = 0; i < op_def.input_arg_size(); ++i) { + if (op_def.input_arg(i).name() == name) { + return &op_def.input_arg(i); + } + } + return nullptr; +} + #define VALIDATE(EXPR, ...) \ do { \ if (!(EXPR)) { \ diff --git a/tensorflow/core/framework/op_def_util.h b/tensorflow/core/framework/op_def_util.h index c329e4627cc..f9661dceddc 100644 --- a/tensorflow/core/framework/op_def_util.h +++ b/tensorflow/core/framework/op_def_util.h @@ -43,6 +43,10 @@ Status ValidateAttrValue(const AttrValue& attr_value, const OpDef::AttrDef* FindAttr(StringPiece name, const OpDef& op_def); OpDef::AttrDef* FindAttrMutable(StringPiece name, OpDef* op_def); +// Searches op_def for input argument with the indicated name. +// Returns nullptr if no such attr is found. +const OpDef::ArgDef* FindInputArg(StringPiece name, const OpDef& op_def); + // Produce a human-readable version of an op_def that is more concise // than a text-format proto. Excludes descriptions. string SummarizeOpDef(const OpDef& op_def); diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index 76477384de1..bc034e1902d 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -444,6 +444,7 @@ cc_library( "//tensorflow/core:framework", "//tensorflow/core:lib", "//tensorflow/core:lib_internal", + "//tensorflow/core:op_gen_lib", "//tensorflow/core:protos_all_cc", "//tensorflow/python/eager:python_eager_op_gen", ], diff --git a/tensorflow/python/eager/BUILD b/tensorflow/python/eager/BUILD index c36647b21c4..912aa4c1951 100644 --- a/tensorflow/python/eager/BUILD +++ b/tensorflow/python/eager/BUILD @@ -222,6 +222,7 @@ cc_library( ":python_eager_op_gen", "//tensorflow/core:framework", "//tensorflow/core:lib", + "//tensorflow/core:op_gen_lib", "//tensorflow/core:protos_all_cc", ], ) diff --git a/tensorflow/python/eager/python_eager_op_gen.cc b/tensorflow/python/eager/python_eager_op_gen.cc index 371df563bbb..374894733af 100644 --- a/tensorflow/python/eager/python_eager_op_gen.cc +++ b/tensorflow/python/eager/python_eager_op_gen.cc @@ -17,6 +17,7 @@ limitations under the License. #include #include #include +#include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb_text.h" @@ -100,8 +101,9 @@ string TensorPBString(const TensorProto& pb) { class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { public: - GenEagerPythonOp(const OpDef& op_def, const string& function_name) - : python_op_gen_internal::GenPythonOp(op_def, function_name) { + GenEagerPythonOp(const OpDef& op_def, const ApiDef& api_def, + const string& function_name) + : python_op_gen_internal::GenPythonOp(op_def, api_def, function_name) { op_name_ = function_name_; op_name_.Consume("_"); } @@ -139,8 +141,9 @@ class GenEagerPythonOp : public python_op_gen_internal::GenPythonOp { std::unordered_map attr_expressions_; }; -string GetEagerPythonOp(const OpDef& op_def, const string& function_name) { - return GenEagerPythonOp(op_def, function_name).Code(); +string GetEagerPythonOp(const OpDef& op_def, const ApiDef& api_def, + const string& function_name) { + return GenEagerPythonOp(op_def, api_def, function_name).Code(); } string GenEagerPythonOp::FlattenInputs( @@ -662,7 +665,7 @@ void GenEagerPythonOp::AddEagerExecute(const string& num_outputs_expr) { WordWrap(return_prefix, return_args, kRightMargin), "\n"); } -string GetEagerPythonOps(const OpList& ops, +string GetEagerPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, bool require_shapes, const string& source_file_name = "") { @@ -698,6 +701,7 @@ from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.tf_export import tf_export )"); @@ -727,7 +731,9 @@ from tensorflow.python.framework import op_def_library as _op_def_library continue; } - strings::StrAppend(&result, GetEagerPythonOp(op_def, function_name)); + const auto* api_def = api_defs.GetApiDef(op_def.name()); + strings::StrAppend(&result, + GetEagerPythonOp(op_def, *api_def, function_name)); if (!require_shapes) { strings::StrAppend(&result, "_ops.RegisterShape(\"", op_def.name(), @@ -760,19 +766,21 @@ from tensorflow.python.framework import op_def_library as _op_def_library } // namespace -void PrintEagerPythonOps(const OpList& ops, +void PrintEagerPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, bool require_shapes, const string& source_file_name) { - printf("%s", - GetEagerPythonOps(ops, hidden_ops, require_shapes, source_file_name) - .c_str()); + printf("%s", GetEagerPythonOps(ops, api_defs, hidden_ops, require_shapes, + source_file_name) + .c_str()); } string GetEagerPythonWrappers(const char* op_list_buf, size_t op_list_len) { string op_list_str(op_list_buf, op_list_len); OpList ops; ops.ParseFromString(op_list_str); - return GetEagerPythonOps(ops, {}, false); + + ApiDefMap api_def_map(ops); + return GetEagerPythonOps(ops, api_def_map, {}, false); } } // namespace tensorflow diff --git a/tensorflow/python/eager/python_eager_op_gen.h b/tensorflow/python/eager/python_eager_op_gen.h index 250623850f2..f9dfdf0408f 100644 --- a/tensorflow/python/eager/python_eager_op_gen.h +++ b/tensorflow/python/eager/python_eager_op_gen.h @@ -18,6 +18,7 @@ limitations under the License. #include #include #include "tensorflow/core/framework/op_def.pb.h" +#include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { @@ -26,7 +27,7 @@ namespace tensorflow { // in the output. Prints the output to stdout. // Optional fourth argument is the name of the original C++ source file // where the ops' REGISTER_OP() calls reside. -void PrintEagerPythonOps(const OpList& ops, +void PrintEagerPythonOps(const OpList& ops, const ApiDefMap& api_defs, const std::vector& hidden_ops, bool require_shapes, const string& source_file_name = ""); diff --git a/tensorflow/python/eager/python_eager_op_gen_main.cc b/tensorflow/python/eager/python_eager_op_gen_main.cc index 9e4aa97ccc7..cd74c438ec6 100644 --- a/tensorflow/python/eager/python_eager_op_gen_main.cc +++ b/tensorflow/python/eager/python_eager_op_gen_main.cc @@ -20,15 +20,36 @@ limitations under the License. #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" +#include "tensorflow/core/framework/op_gen_lib.h" +#include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/init_main.h" namespace tensorflow { namespace { +constexpr char kBaseApiDef[] = + "tensorflow/core/api_def/base_api/*.pbtxt"; +constexpr char kPythonApiDef[] = + "tensorflow/core/api_def/python_api/*.pbtxt"; +constexpr bool kUseApiDef = false; + void PrintAllPythonOps(const std::vector& hidden_ops) { OpList ops; OpRegistry::Global()->Export(false, &ops); - PrintEagerPythonOps(ops, hidden_ops, true /* require_shapes */); + + ApiDefMap api_def_map(ops); + if (kUseApiDef) { + Env* env = Env::Default(); + + std::vector base_api_files; + std::vector python_api_files; + TF_CHECK_OK(env->GetMatchingPaths(kBaseApiDef, &base_api_files)); + TF_CHECK_OK(env->GetMatchingPaths(kPythonApiDef, &python_api_files)); + + TF_CHECK_OK(api_def_map.LoadFileList(env, base_api_files)); + TF_CHECK_OK(api_def_map.LoadFileList(env, python_api_files)); + } + PrintEagerPythonOps(ops, api_def_map, hidden_ops, true /* require_shapes */); } } // namespace diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc index 3c62dfd133d..c57f0a98421 100644 --- a/tensorflow/python/framework/python_op_gen.cc +++ b/tensorflow/python/framework/python_op_gen.cc @@ -447,23 +447,48 @@ static void AddDelimiter(string* append_to, const string& delim) { if (!append_to->empty()) strings::StrAppend(append_to, delim); } -GenPythonOp::GenPythonOp(const OpDef& op_def, const string& function_name) +const ApiDef::Attr* FindAttr(StringPiece name, const ApiDef& api_def) { + for (int i = 0; i < api_def.attr_size(); ++i) { + if (api_def.attr(i).name() == name) { + return &api_def.attr(i); + } + } + return nullptr; +} + +const ApiDef::Arg* FindInputArg(StringPiece name, const ApiDef& api_def) { + for (int i = 0; i < api_def.in_arg_size(); ++i) { + if (api_def.in_arg(i).name() == name) { + return &api_def.in_arg(i); + } + } + return nullptr; +} + +GenPythonOp::GenPythonOp(const OpDef& op_def, const ApiDef& api_def, + const string& function_name) : op_def_(op_def), + api_def_(api_def), function_name_(function_name), num_outs_(op_def.output_arg_size()) {} GenPythonOp::~GenPythonOp() {} string GenPythonOp::Code() { + if (api_def_.visibility() == ApiDef::SKIP) { + return ""; + } // This has all the input args followed by those attrs that don't have // defaults. std::vector args_no_default; // The parameters with defaults (these have to be listed after those without). // No input args are included, just attrs. std::vector args_with_defaults; - for (int i = 0; i < op_def_.input_arg_size(); ++i) { - const auto& arg(op_def_.input_arg(i)); - args_no_default.push_back(arg.name()); + + for (int i = 0; i < api_def_.arg_order_size(); ++i) { + const auto& arg = *FindInputArg(api_def_.arg_order(i), op_def_); + const auto& api_def_arg = *FindInputArg(api_def_.arg_order(i), api_def_); + args_no_default.push_back(api_def_arg.rename_to()); if (!arg.type_attr().empty()) { gtl::InsertIfNotPresent(&inferred_attrs_, arg.type_attr(), arg.name()); } else if (!arg.type_list_attr().empty()) { @@ -474,14 +499,14 @@ string GenPythonOp::Code() { gtl::InsertIfNotPresent(&inferred_attrs_, arg.number_attr(), arg.name()); } } - for (int i = 0; i < op_def_.attr_size(); ++i) { - const auto& attr(op_def_.attr(i)); + for (int i = 0; i < api_def_.attr_size(); ++i) { + const auto& attr(api_def_.attr(i)); // Do not add inferred attrs to the Python function signature. if (inferred_attrs_.find(attr.name()) == inferred_attrs_.end()) { if (attr.has_default_value()) { - args_with_defaults.push_back(attr.name()); + args_with_defaults.push_back(attr.rename_to()); } else { - args_no_default.push_back(attr.name()); + args_no_default.push_back(attr.rename_to()); } } } @@ -515,6 +540,7 @@ string GenPythonOp::Code() { AddDelimiter(¶meters, ", "); strings::StrAppend(¶meters, "name=None"); + AddExport(); AddDefLine(parameters); AddDocStringDescription(); AddDocStringArgs(); @@ -530,18 +556,37 @@ string GenPythonOp::Code() { return prelude_ + result_; } +void GenPythonOp::AddExport() { + if (api_def_.visibility() != api_def_.VISIBLE) { + return; + } + strings::StrAppend(&result_, "tf_export("); + + // Add all endpoint names to tf_export. + bool first_endpoint = true; + for (const auto& endpoint : api_def_.endpoint()) { + if (!first_endpoint) { + strings::StrAppend(&result_, ", "); + } else { + first_endpoint = false; + } + strings::StrAppend(&result_, "'", endpoint.name(), "'"); + } + strings::StrAppend(&result_, ")\n"); +} + void GenPythonOp::AddDefLine(const string& parameters) { strings::StrAppend(&result_, "def ", function_name_, "(", parameters, "):\n"); } void GenPythonOp::AddDocStringDescription() { string comment; - if (op_def_.summary().empty()) { + if (api_def_.summary().empty()) { comment = "TODO: add doc.\n"; } else { - comment = strings::StrCat(op_def_.summary(), "\n"); - if (!op_def_.description().empty()) { - strings::StrAppend(&comment, "\n", Indent(2, 2, op_def_.description())); + comment = strings::StrCat(api_def_.summary(), "\n"); + if (!api_def_.description().empty()) { + strings::StrAppend(&comment, "\n", Indent(2, 2, api_def_.description())); } } strings::StrAppend(&result_, " r\"\"\"", comment, "\n"); @@ -552,9 +597,10 @@ void GenPythonOp::AddDocStringArgs() { } void GenPythonOp::AddDocStringInputs() { - for (int i = 0; i < op_def_.input_arg_size(); ++i) { - const auto& arg(op_def_.input_arg(i)); - StringPiece description = op_def_.input_arg(i).description(); + for (int i = 0; i < api_def_.arg_order_size(); ++i) { + const auto& arg = *FindInputArg(api_def_.arg_order(i), op_def_); + const auto& api_def_arg = *FindInputArg(api_def_.arg_order(i), api_def_); + StringPiece description = api_def_arg.description(); string desc; if (ConsumeEquals(&description)) { // Skip the generated type info. desc = strings::StrCat(param_names_[i], ": "); @@ -572,7 +618,9 @@ void GenPythonOp::AddDocStringInputs() { void GenPythonOp::AddDocStringAttrs() { for (const string& name : attrs_) { const auto& attr = *FindAttr(name, op_def_); - string desc = strings::StrCat(AvoidPythonReserved(name), ": "); + const auto& api_def_attr = *FindAttr(name, api_def_); + string desc = + strings::StrCat(AvoidPythonReserved(api_def_attr.rename_to()), ": "); static const char* const kAttrTypeName[][2] = { {"string", "`string`"}, @@ -596,7 +644,7 @@ void GenPythonOp::AddDocStringAttrs() { for (size_t i = 0; i < TF_ARRAYSIZE(kAttrTypeName); ++i) { if (attr.type() == kAttrTypeName[i][0]) { string s; - if (attr.has_default_value()) { + if (api_def_attr.has_default_value()) { s = strings::StrCat("optional ", kAttrTypeName[i][1]); } else { s = kAttrTypeName[i][1]; @@ -625,14 +673,13 @@ void GenPythonOp::AddDocStringAttrs() { strings::StrAppend(&desc, "."); - if (attr.has_default_value()) { - strings::StrAppend(&desc, " Defaults to `", - AttrValueToPython(attr.type(), attr.default_value()), - "`."); + if (api_def_attr.has_default_value()) { + strings::StrAppend( + &desc, " Defaults to `", + AttrValueToPython(attr.type(), api_def_attr.default_value()), "`."); } - - if (!attr.description().empty()) { - AppendWithinWidth(&desc, attr.description(), + if (!api_def_attr.description().empty()) { + AppendWithinWidth(&desc, api_def_attr.description(), kRightMargin - 4 /* indent */); } strings::StrAppend(&result_, Indent(4, 6, desc)); @@ -650,8 +697,8 @@ void GenPythonOp::AddOutputGlobals() { // Prepare the list of output names std::vector out_names(num_outs_); for (int i = 0; i < num_outs_; ++i) { - if (!op_def_.output_arg(i).name().empty()) { - out_names[i] = op_def_.output_arg(i).name(); + if (!api_def_.out_arg(i).rename_to().empty()) { + out_names[i] = api_def_.out_arg(i).rename_to(); } else { out_names[i] = strings::StrCat("output", i); } @@ -714,11 +761,14 @@ void GenPythonOp::AddBodyNoReturn(const string& apply_prefix) { } // namespace python_op_gen_internal -string GetPythonOp(const OpDef& op_def, const string& function_name) { - return python_op_gen_internal::GenPythonOp(op_def, function_name).Code(); +string GetPythonOp(const OpDef& op_def, const ApiDef& api_def, + const string& function_name) { + return python_op_gen_internal::GenPythonOp(op_def, api_def, function_name) + .Code(); } -string GetPythonOps(const OpList& ops, const std::vector& hidden_ops, +string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, + const std::vector& hidden_ops, bool require_shapes) { string result; // Header @@ -738,6 +788,7 @@ from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.tf_export import tf_export )"); // We'll make a copy of ops that filters out descriptions. @@ -766,7 +817,8 @@ from tensorflow.python.framework import op_def_library as _op_def_library continue; } - strings::StrAppend(&result, GetPythonOp(op_def, function_name)); + const auto* api_def = api_defs.GetApiDef(op_def.name()); + strings::StrAppend(&result, GetPythonOp(op_def, *api_def, function_name)); if (!require_shapes) { strings::StrAppend(&result, "_ops.RegisterShape(\"", op_def.name(), @@ -799,16 +851,18 @@ from tensorflow.python.framework import op_def_library as _op_def_library return result; } -void PrintPythonOps(const OpList& ops, const std::vector& hidden_ops, +void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, + const std::vector& hidden_ops, bool require_shapes) { - printf("%s", GetPythonOps(ops, hidden_ops, require_shapes).c_str()); + printf("%s", GetPythonOps(ops, api_defs, hidden_ops, require_shapes).c_str()); } string GetPythonWrappers(const char* op_list_buf, size_t op_list_len) { string op_list_str(op_list_buf, op_list_len); OpList ops; ops.ParseFromString(op_list_str); - return GetPythonOps(ops, {}, false); + ApiDefMap api_def_map(ops); + return GetPythonOps(ops, api_def_map, {}, false); } } // namespace tensorflow diff --git a/tensorflow/python/framework/python_op_gen.h b/tensorflow/python/framework/python_op_gen.h index f485044c5af..4d20888dc63 100644 --- a/tensorflow/python/framework/python_op_gen.h +++ b/tensorflow/python/framework/python_op_gen.h @@ -18,20 +18,23 @@ limitations under the License. #include #include +#include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/op_def.pb.h" +#include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { -// hidden_ops should be a comma-separated -// list of Op names that should get a leading _ in the output. +// hidden_ops should be a vector of Op names that should get a leading _ in the +// output. // The Print* version prints the output to stdout, Get* version returns the // output as a string. -void PrintPythonOps(const OpList& ops, const std::vector& hidden_ops, - bool require_shapes); -string GetPythonOps(const OpList& ops, const std::vector& hidden_ops, - bool require_shapes); -string GetPythonOp(const OpDef& op_def, const string& function_name); +void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, + const std::vector& hidden_ops, bool require_shapes); +string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, + const std::vector& hidden_ops, bool require_shapes); +string GetPythonOp(const OpDef& op_def, const ApiDef& api_def, + const string& function_name); // Get the python wrappers for a list of ops in a OpList. // `op_list_buf` should be a pointer to a buffer containing diff --git a/tensorflow/python/framework/python_op_gen_internal.h b/tensorflow/python/framework/python_op_gen_internal.h index 92237ac81a2..c1efbf9be22 100644 --- a/tensorflow/python/framework/python_op_gen_internal.h +++ b/tensorflow/python/framework/python_op_gen_internal.h @@ -18,6 +18,7 @@ limitations under the License. #include +#include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/platform/types.h" @@ -42,7 +43,8 @@ string DataTypeToPython(DataType dtype, const string& dtype_module); class GenPythonOp { public: - GenPythonOp(const OpDef& op_def, const string& function_name); + GenPythonOp(const OpDef& op_def, const ApiDef& api_def, + const string& function_name); virtual ~GenPythonOp(); virtual string Code(); @@ -62,9 +64,11 @@ class GenPythonOp { void AddDocStringOutputs(); void AddBody(const string& prefix); void AddBodyNoReturn(const string& apply_prefix); + void AddExport(); // From constructor arguments const OpDef& op_def_; + const ApiDef& api_def_; const string function_name_; const int num_outs_; diff --git a/tensorflow/python/framework/python_op_gen_main.cc b/tensorflow/python/framework/python_op_gen_main.cc index f681daa7e46..61b1d02a5e8 100644 --- a/tensorflow/python/framework/python_op_gen_main.cc +++ b/tensorflow/python/framework/python_op_gen_main.cc @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" +#include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/io/inputbuffer.h" #include "tensorflow/core/lib/io/path.h" @@ -33,6 +34,12 @@ limitations under the License. namespace tensorflow { namespace { +constexpr char kBaseApiDef[] = + "tensorflow/core/api_def/base_api/*.pbtxt"; +constexpr char kPythonApiDef[] = + "tensorflow/core/api_def/python_api/*.pbtxt"; +constexpr bool kUseApiDef = false; + Status ReadOpListFromFile(const string& filename, std::vector* op_list) { std::unique_ptr file; @@ -108,6 +115,19 @@ void PrintAllPythonOps(const std::vector& op_list, OpList ops; OpRegistry::Global()->Export(false, &ops); + ApiDefMap api_def_map(ops); + if (kUseApiDef) { + Env* env = Env::Default(); + + std::vector base_api_files; + std::vector python_api_files; + TF_CHECK_OK(env->GetMatchingPaths(kBaseApiDef, &base_api_files)); + TF_CHECK_OK(env->GetMatchingPaths(kPythonApiDef, &python_api_files)); + + TF_CHECK_OK(api_def_map.LoadFileList(env, base_api_files)); + TF_CHECK_OK(api_def_map.LoadFileList(env, python_api_files)); + } + if (op_list_is_whitelist) { std::unordered_set whitelist(op_list.begin(), op_list.end()); OpList pruned_ops; @@ -116,9 +136,11 @@ void PrintAllPythonOps(const std::vector& op_list, *pruned_ops.mutable_op()->Add() = op_def; } } - PrintEagerPythonOps(pruned_ops, {}, require_shapes, source_file_name); + PrintEagerPythonOps(pruned_ops, api_def_map, {}, require_shapes, + source_file_name); } else { - PrintEagerPythonOps(ops, op_list, require_shapes, source_file_name); + PrintEagerPythonOps(ops, api_def_map, op_list, require_shapes, + source_file_name); } } From 6357bafeb80523c45bee21a19def146d221cd295 Mon Sep 17 00:00:00 2001 From: Yao Zhang Date: Tue, 14 Nov 2017 15:16:45 -0800 Subject: [PATCH 032/104] Use Toggle instead of bool to make the layout optimizer name and usage consistent with other optimizers. PiperOrigin-RevId: 175743440 --- tensorflow/core/grappler/clusters/cluster.cc | 2 +- tensorflow/core/grappler/optimizers/meta_optimizer.cc | 5 +++-- tensorflow/core/protobuf/rewriter_config.proto | 2 +- tensorflow/python/grappler/layout_optimizer_test.py | 10 +++++++--- 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/tensorflow/core/grappler/clusters/cluster.cc b/tensorflow/core/grappler/clusters/cluster.cc index ead44de1e2f..e2db47b758f 100644 --- a/tensorflow/core/grappler/clusters/cluster.cc +++ b/tensorflow/core/grappler/clusters/cluster.cc @@ -57,7 +57,7 @@ void Cluster::DisableOptimizer(bool disable) { // Disable Grappler optimizations. auto rewriter_config = options_.config.mutable_graph_options()->mutable_rewrite_options(); - rewriter_config->set_optimize_tensor_layout(false); + rewriter_config->set_layout_optimizer(RewriterConfig::OFF); rewriter_config->set_disable_model_pruning(true); rewriter_config->set_constant_folding(RewriterConfig::OFF); rewriter_config->set_memory_optimization(RewriterConfig::NO_MEM_OPT); diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.cc b/tensorflow/core/grappler/optimizers/meta_optimizer.cc index 6204a81f805..eb04bc6e9a9 100644 --- a/tensorflow/core/grappler/optimizers/meta_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/meta_optimizer.cc @@ -71,7 +71,7 @@ Status MetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item, optimizers.push_back(std::unique_ptr( new ArithmeticOptimizer(cfg_.arithmetic_optimization()))); } - if (cfg_.optimize_tensor_layout()) { + if (cfg_.layout_optimizer() == RewriterConfig::ON) { optimizers.push_back( std::unique_ptr(new LayoutOptimizer())); } @@ -175,7 +175,8 @@ void MetaOptimizer::Feedback(Cluster* cluster, const GrapplerItem& item, } bool MetaOptimizerEnabled(const RewriterConfig& cfg) { - return !cfg.disable_model_pruning() || cfg.optimize_tensor_layout() || + return !cfg.disable_model_pruning() || + cfg.layout_optimizer() == RewriterConfig::ON || cfg.constant_folding() != RewriterConfig::OFF || cfg.arithmetic_optimization() != RewriterConfig::OFF || cfg.auto_parallel().enable() || cfg.memory_optimization() > 1 || diff --git a/tensorflow/core/protobuf/rewriter_config.proto b/tensorflow/core/protobuf/rewriter_config.proto index 8f3457e97ce..eb74d4b1c56 100644 --- a/tensorflow/core/protobuf/rewriter_config.proto +++ b/tensorflow/core/protobuf/rewriter_config.proto @@ -30,7 +30,7 @@ message RewriterConfig { } // Optimize tensor layouts - bool optimize_tensor_layout = 1; + Toggle layout_optimizer = 1; // Fold constants (default is ON) Toggle constant_folding = 3; // Arithmetic optimizations (default is ON) diff --git a/tensorflow/python/grappler/layout_optimizer_test.py b/tensorflow/python/grappler/layout_optimizer_test.py index bc9d9104473..9ac33fbb4ad 100644 --- a/tensorflow/python/grappler/layout_optimizer_test.py +++ b/tensorflow/python/grappler/layout_optimizer_test.py @@ -88,8 +88,12 @@ def loop(): def get_config(layout_optimizer=True): - rewrite_options = rewriter_config_pb2.RewriterConfig( - optimize_tensor_layout=layout_optimizer) + if layout_optimizer: + rewrite_options = rewriter_config_pb2.RewriterConfig( + layout_optimizer=rewriter_config_pb2.RewriterConfig.ON) + else: + rewrite_options = rewriter_config_pb2.RewriterConfig( + layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF) graph_options = config_pb2.GraphOptions( rewrite_options=rewrite_options, build_cost_model=1) config = config_pb2.ConfigProto(graph_options=graph_options) @@ -194,7 +198,7 @@ class LayoutOptimizerTest(test.TestCase): meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def()) rewrite_options = rewriter_config_pb2.RewriterConfig( - optimize_tensor_layout=True) + layout_optimizer=rewriter_config_pb2.RewriterConfig.ON) optimized_graph = tf_optimizer.OptimizeGraph(rewrite_options, meta_graph) found = 0 From 1bc367859c6dc3c3ab17fad25198f9fb25132e2f Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 15:33:12 -0800 Subject: [PATCH 033/104] A few profiler improvements. 1. Op view proto copy uses too much memory and time, optimized. 2. Add a hint to use "bazel-bin" instead of "bazel run" 3. Make proto string parsing explicit (seems no longer throwing error) PiperOrigin-RevId: 175745677 --- .../core/profiler/g3doc/profiler_ui.jpg | Bin 220483 -> 190596 bytes .../core/profiler/internal/tfprof_op.cc | 18 +++++------ tensorflow/core/profiler/profiler.cc | 13 +++++++- tensorflow/python/profiler/model_analyzer.py | 28 +++++++++--------- 4 files changed, 33 insertions(+), 26 deletions(-) diff --git a/tensorflow/core/profiler/g3doc/profiler_ui.jpg b/tensorflow/core/profiler/g3doc/profiler_ui.jpg index 36aa94502a8c3de7915fb0e388c861cd706c3af8..77346e61ae971725e163c561a813bb6c0153ad89 100644 GIT binary patch literal 190596 zcmeFa1zelmmNy!_6nBRfLUAbWP}<_fofK)&5Q4jFy95M8#N?zz#AL(-1f>f@9f!O6ufA}S^>Au09bsiKmy zimIBPzJZ~Uv5BdbwT-QvJ=nqB!_&*#$Jg)8+mO((ci|E72_F*SNy#5mGPAOCzUJoT z7nE02R#n&3*44MQcXW1j_w@FSj*U-DPEF6uuB@)D|J>Nz+TJ-n`E`1Beu22W`VAKb z0PA01q2K=n*za(Wp>bhiV`E|C{e}wz(;IzbkzwQ97sMr3(7}7*O2I1h`VQsexG!a` z_-w+uM^rD}M($DrMON64e}ncXWd9hj;Qth|e*^aKxMl%_SQu#LVUYpk0i2l;sBiLb zgVXwcEvA|Yga%+0_$0pRW|@IX&p&(E5?1UplkWVwH&2}`rjS;jYHD81TtY}~ z><9T3#8j8H+3|iHo{(-;1!zLz3YbHY&9vhy7yxfD9|l2+q>c zPgyMQkc$sLR4XV_>NWM&aOxQ zi-vGb#J{DQ+lsp4h7_9YIRS%r@cA~Q_I#Q<(~}2ob4h7bb@5n8Us0qqcu%RL z2>8GK*0OOwm#+OfTl%wlPwGJLjB48mMmG2(1Cv_c?ajm1@3(+mqoj-Fjjt6fO-P<2 z7zIKz#`#S;*f2XGi^je{k<3;9h%ST|E@H)Q2(GItFsv}Q-){tE?Fdbq?zlc0 zbfn?!^Yb2jY2i~3xdo6!%YoTHmP@*~y;5K?PYk-&uN$uXTH$N%b3h@O??g#9=!%di zjXxR+ahZYNjd>CEIg4Aj5t#aX(!0UTG0-lO(JZphbvh~+Cww!+0IZmO=@efM1%Gy} zS!{ldXg>yZrjs7?fBd>tUubH|B(|t<7o@aYh9~cobqs^Dzvb^ z6IdTI2Wot1QslEZH+NW?1rY8ofw(Wr)xb&v97mnccxIfP(k0uqTso6c8_x*S+ea&c zh#kb<=pQqOdwUWk?D{#Li(TlY6_nR37&^7qAwzt@Yu}hhKXAi{xn_VU|{i< zpW9WaW&T8>wEN)KIq(eUxtB)MjfyBq$gJmNSiXZf&GAz?pwvs>B>2XtI#^d`xgM9p zFUEb0w&V)Ay-1C~%TmqqY5S!CUtRTWoKshhQ18_z`zg#5{MrR~RYd}iDt1Kx&%xqO zFgXm9W4vR3TClD<56FeIdW_+tM zg%cww6Lt6e4wKeC$+8!(*e&2Chx~b$LkGgUaIh#qay&pj3w$M3S~}+t?Ip5c3w*CZ zhD{SC2&hlL+&(L@iSE>k%qL=qqVVJ42SciopwVlu+YEyGzLx1GsCzB(nt~Q~H(b1L z0ruQt2(YouXEtPR5S`rcyN^ zN|)?y9a1|f4bK6s%7;(8$yb)3DNtQ(zF`v4_0siWh-vyxq@PJ{q1wShgYi*?j2Hn? z=%;PHi@Ny9?ZUzao{K~GkC_oX`>}8Da%R4gmDXxFDxP}n2?Zl*?9Nlw^Y}(^ZYC#* zfxwJK&r3WlJrj5{PDZCI0_xGvz$a(sv~l!kW5bX(mXNF!&t9kPy{hD8y={fZg42(s zl^-=ZjSbEv&c(SAvQgfwg<;ZVuY=oz-G`_qeK@+M#A4)F2>QDsuek#i4&~k1d>q{*#+-$l_mZ~Mwk+1o$&rhqbK@!?%n0xlrAv6F!W_nn^n{P2Nocg_+h zYDm4?Ou)Nv>wL~_d!@YK2VXO2rpa$+yKr_paFfq!nPhLfQI$9^&MY;Ma7WBYY*aAf zSZ|%f&Jv}*f2(WLSgZIc>nzU+gTifKpa=+*Oa6?6l40nDvbtj&=_YJjp zYS&$pLh}&U!9oSU=#{nH*>&wg?>qfHNmkIe^>D60%RS`{T^R_ zKBSeT?m9Ka_@d|>TIS&Oj^fltf=Q@CeW@%Sm={AsZ^}#VSw;4h3aV1W5DUE15or*a7eLatpo= zhO`hw4A?|QOO;)CP7Z-A%!<+@)r$qb=KU%&eqK+jYLLYxtw+CqDjZDQkF{D@_Y$wsu%02qs9p+4b4*+Ol*k_E?JYBGO?;Znq_ zylR3Pp_S7s6JpW(wqBKCT?GS(^@~|yqYj4aGuX(pd<&pd0O$4R?$pHEo!h=~vDF95 z3?&m1(|WQ==K7Fy3(p29e49L5&|S4)aw3DeoJm`LQ&%Q@cw)|6Q8c zs8MzK)PgB{_08@8t^~L88$4hp@NDW(Gqc0ivGA)UZ2>D-}?um(O#|=N@d;x9*o`dd2?N(rR+jD1C)wvaQ^HQ&bU&$e@;r)B>uY{P~^r`uID`udO+2rxo}+jn_Y6#E^(q*(?u|x0_8I zFn6Bu@u0|7ijC)lB`3 z`o}v)mVM>$7r}sgG0LLFHpi@e-6j*cWew?l1TYdJj%T&pZAKpUJytc+>ZF5HSQ3MZ zHj&_hfcYtJM0|}U3U)Zspx2<57<^J>^O>ddM6@L!C(PhIik0 zB~#Y#W%D`j2w@?0+XhIYquu~TSk^57yyId#yW=uH$fz*?oa&v#&{egzIG0Si0f=!; za*_O)xyo~Gflbes|13>`pG>W1pQo2j+EnJ#kwLPbyBTjY*Qkhdl;Pk;r73O#!?sI* zWpA}0L5BqvyuU%P`}rrMFoU&it`}K6%|^h<($a>KQcv46FS{BIONi*Z4~``XT2IfX zG{{Gm#QslCz2*qfrE8BL5*kH@yMe>vb)cB!yzy=>5AWjv<)6O(Ncs`_Y_pRP}!a9MVLN_cbeTTuUPnW3L`jEY8+pQ-{_j* zy%({e{;@<5`+ZLzwaPpuor-0k)*}K}6K3|K&hAit=vw;f>;X?QwGTh^d82NZDjoJK zAMWqL7}!QOMTjZo45i7_ev|ch;3)5Dzod0|Vu>l&q;de^9*fVb?$21IR0D`C&J{D?Yd@|zsK4Hd{(8qW9*($2b$&=wGX(qj17RlTN z39o}y86!p#TH)zJ)x*v*(oXdt2O~G|n5mYqDy1-YVIj|Z5QK-ASmoXcM7r&&`lIhk zv3h%b(-`<@P{tcx)Ak_o9-QWoiGI&&u24<#<<=wK3z( zb;3#E|ld<)(F55P?xwYdi=S~+P~b7p()e!1=HgDmD!W?iM( zd;=HFBro5=-hCX_ExL15pS|V zh}iEndXOZk>Cuo5qneY}d$6=QLM*pFEY~H1VP+ogZD;J)aO*A&y;(7Zvh;+qVjiN>-B@fq|mr$2@#EjI~kM)2f}XO~TN zZX2(9{IWcGOk=iexha8%CtDERxfzBOUKt`a+4ie|2s>^XE}KsWrW zzcgcq>dLFrQfwmGE|R%%i;nUmeb)ljq?;yF;Hk1rmFar1VoLgV z1#8%V%qp0TT}_|LOCDrGD4G&x=ZcMb)8(wxiSPvqoA@U$MjTNuZvj*ggk0N_!EsQS zl{?Evh-V-+g_3c}I+)7X+A=EG&rlYEc%of1+Gv((@mxL$3Jt^_Q?0BXH=#;iw*Cb0 zGx8En-8qvKuc(=Kc_A6tD0yDrbY69p9K^N+t7P81BiNU7~r~UCT={kQBzU-%UWuGe~TF;aFB!lv*n)W ztj~^M4^>XS&yaH6ad(U-W&HxU0Q{>J!tr5k(0iuR%1Oy9T?H#*D~3bR)xdP|V7@6u zHDJv7=Z?D;3H|~XhQp zv^@5R@8?6A*;DG=9jx~s%mqis6fM8%uw0|sKvAQ*{V%^vnnV13Gp!>INR8*zTUGA{ zi*vSO6Z`gTn^E#uL52(W$0DoYn%ig95$7k+!~7xB7~&8=^Ui#pU9X}^cyr0%Ze30B zwwURc%5P=E{2~J6O{1+h$s2~MaiLUUDe649%fT)RL$n1crR}@B1n#m;3o+mF-QeXV zse*}_iRgJW6DU^#qQO8qiA|E2GuoEuBLbBZMweL;mz8Vg?bCqp5yUiJj%W6W+eShIAXMM zQbzVmETzSEePc>mR}D6Nc$UE^x_H#c$el;LPc7v8GzhpEvE69r{RLsQ%3oHhT1Zz# zSx(xXQ(O@$)ay_dJQSS@2wZTVqhc}d=HlG-N;Tg#a+nnzF{^@g=R>f9UEN23qkdoG zy(i>KdZih0(~idU$BUpq^+?W=(if0a4LOs(`FSPbU#$Dx(jbg?d@;h$6a8*tJ3#m% z3wICr%-dC)NB49mRx=bqh4IJy6FxRP$VbFuLo|%50)m=51si8WQ z1tyYU9-S^{^8{as^s2IC@hW%3Lvwp+7)q&Qe!Lt)sq+Zuf6$HR;3la=n2Y<~iuu$H zl0g=h6Pd$J`;P0e+MuY%REqS&;C56UyJ^OvrGPtq#6iKBz+Zh67Ub+7`ENI4WpZ2$C9s= ziSN6tgO#Eq(hJ`!-O_CHFBs=(S~Y+_&-~dsrZWL&&Bawp(609J9F_W zpR&H35lZ4_X6EFu_uv1zdQgMuW}g}H<^PdeTHgMydCwa!^!nYUmqijJy&VC#s++F> zNbbAbl59f*a&{08H2NZR?b;asqwb*aTR>aQ(b5ejwB;~Deg%YZM!gnWy66njLN&q0 zV!Kfo*EPr|0$1qgdSS~MX&;QXGLSqlqFcc5gEnWW#|}+dhn+oKKb9=ifP=SBs{HuVFSCT8fm;Ci2lthvTfp~z7)sg#J<0jIZ1%0H zonzoyjAG52S!ihz@tcMbeG|-eIJ4pMHdH?VXE4Fqg^_BvfG^-%z!Cr4Eg(kc7C<<3 z%!3NaMVH;+jFS8h%YK2Xx0UZ_sk;SyoVW#$&T*rB19y?JF)b?}QS7kc%3A<}1}!$- zoaniR9U}1-@ObPNuxwUy3t%iQgZ;ZGvK$p7#cu(RQk}0?W6#C}QN}kz$nO9065CPt zs|}DR*Hf1)|8G#U?0&UEmR)CDh9RFg7GD0mC_RyrK_j$Be26ec*c{1kA1tBCXp}Vd zA1jiP6GM!kV9O*|mszpr#o~p`o>6G6>dB31W!BP1j-bh^*dKj7r&@odduW^LQakJv zzxo!?%u{jWcMHg6iA7=!-2&d(+yZpuF9nbkzse(SC~g5E&MPff*mJNYO4wfr#Z>8) zD+0|^5=5xQ8TzUCTR^M4G-v6EJi-vga0~dXQFC*w{Fl1_=Sl#7)bR(c{*bFb^yA;= z5r6b0fAooej30lDwSUYR{(WurN1yn=y-&kSS=wM^l@@feqb9N)!ls!rBj$6B#ChKn10K@pVyqhq6>ADeEj2l;7jV>2euq3@d~VYX1*d*vsZLQ&N( zUE&uA6t(N8c%d)ryy~L|%SGa|o#@6nFKZdcL}E#&At^Q?H6WNGO_S1uH@mn_;yQBfVze7$mn>KIQh@{E-B)io{q@weLLetqFmTY6Vh>_@R$yH9+i+UiN_j^4YIP9n^wbswHHr+a@ z%tWPC$;H+4On}gL{rB#t4*R8)8RpVCsz+tjqwO_$uQDSz0htlG1r`jn1|Kj7=9K(A zIIgsD`gqb%3-xD_D1`i)^7;N$k;O&pEr1@dl~>|@?X^28aE*36h-R6qF)xEH;UVp@ zGBfLz_UB5CTMpl#L|{$}ew}D@3CrW9`!19KpI?Npb&C z#UF(FzgePIJp~wc#8Ps;rrBlBLuhIq_PelFkn#7d*-u2$*(yxxf5%R5{s3#!S#7x@ z_q4dawnAGdJuv2Uo@*i5nR(DAD-MHOKufheGD-ROA3S2JqUu0f#Qr+(e|K4Ih+UJ4 zma*b00b+}7b(k=y-PHw}CeupM|K{hsG+~kR55Z2wY{W%})JBqB-i(j5L3-Z_Wo@w8 zxq7p>cAtyWYdUNfC3~!4&Jqq3c`Oy$1e;KB3s{@6v$%yg!OUssE$2DwQ9T@n2Hg(3MpddTz-cLuR^7PK59$-&vet?Lf}cNb0pf== z7~hEfhViogQ9awdDvfP?@@3mQCGp}-m9b;dihua}FWyvZ`<&!uGjbP*w%+RV*WVVhW9B=ueF=9i`9BMv0@;L zqQgmn)Xv%IiN%+$vX0489{tSt4&1{6hheG^wSwq)8Ur<}qHY6x)#gwov5+M_(Xv&WHn#@wkg;IIlQ= z*-Fj91q!ySE<_%+NTpwG%BtU`R!_qk_8PGJ3N4D_eG2r zDrtk(4$10X_g*Rmy6X-eS#^w=BOE}fbYH0_sSPh{nu3LGc%Kohm)Eh?3Z7^x^ps|E zme?ffC3QYnI2`fX^1KB=4WmD>QO4I8qOMV4+IvX*MdZE)mQe4R_=#gEga%)lw?A)?SYoEY(Fk=2OdtEeH;`n<)$zELoPQDs` z{LVl=MftrH(?#9MoBBecO@qB++HI%Utoj{ac~ya;YA-gIUZ7rGYA4&GKHACLfEbrjKYGWPB77gPPv_|JDLpsk z{E8>j_^#W}20vg@&sr$hkXVDg5PH}!a&{g2XU5kVyVDCVMlWaq1__+IcLXd-e? z_;PQa#we+KU6qplXhGc)I|l*J0-4;6iOW6WJ! zy|kXj^B?=-XPy(D0)AByu)C8yfnz6!`9kL&gHb%IiDt`koX}fw#&gVF(mCy7Xis2|SI!S%y2TBF0 zCtHfuFkHyIt(V7>`EXfYb;`@FOQow$3YcHekc!y(@-~#&?L*K+3vo9!2a_I^=EJng zrkb6@k}rysMy{KG+JJrO^QC(3F>L!YN~wA`<2VxfE$S99rhtqFxOu547H&r z=VIR?b_GRJ-V`~0wR~t$gDq2XAX_tZQ9@ygP|k$Z!#W70BMVO>yK*!$t>Um0t0$#j ztp6llif5M>J4{ZW2i!FmxXBSY1Qf@}Ia=v)j(Kf@}Cdo&wREG!oXrSH6O-KJQWX%4h z3SI^x_F863p8U+r>*~;CQlxuwk8P|pK}&3@F+%oQtnr&n+%2F;@=jkdQq9fX>u}jN zEa)ny_(x$;|H_~mq(3!-DDs0k4SX923YFGYd-5^oW$5GA^WU@|-NRB|vDhbDiH)o; zK1j`K=!8KWlR|0gt7?}B1hrR+zb5JjKh)`~?dkjVJT&@R1roX1oK^nWAdzKf)}Q#Yh#!IV-fD*0dDdcz2Pc4({8iRf>qvb7so6n4=La zS0E>Uq#q$MUN$5o0Vc*ZeyzWUqS%@=zh5<}yMtH&?RuM@u&JaH{+bi5$Y&7##ukt3 z{>eJ76QJ90^Hqd^Kvk3=nhg~81XjZy^KH2OJWvT_{1tAzf@!uAG515e=UwT~v?2w% zVu5}69Mj^p1%y_|xvPostizPsAW_M^vN~@SClC$x{sS(zS9Eyr8O@KyzSq(|Fzu>9 zxi#a-pTk+=rB+!Y!#Al~@Exv9xQbTd208VrOR%!1#^&9Jf`!EaG{c^Afqn#$O^P%o z!$pnqE>`e|hKpziX|ZQt+7sL#th(JHc=gZ?V*>f)IPEZGPvpbs{^*6__?fgn1H_K* z7i9tvarwasRqHnsSmsh?*iDT;}0(wm5>

uR{qsXD^*Rp)+1fY8-JnHb; zUVzfzH=CZVI&2c7aS;Lytqwn>3Yi3vD^tztBV1;~%g6ceqwM?&%JZ=S>;Z4$Vx=o? zJ_SgFSD?<1pcbaJWfL;80wKL{W30L9hG&nv49R|RrUh4oF7xkq17A5V1daH4CQeOr z9Cg3Tn$-LP&h5_eHSF>w8qn=0d=-9<%}R&C`#oQ;?$JgbqT)O_g@stzdu4&uaFp3dvb0UKrfVvrb-+@DJDQ&}KS+ z+QNC4nzpTA?LD#6?s?ZyZm38x8;aci=h&HvN2HmNZVio?y<@x$kSW%M`FV&fJ|MAe zzX#`)tW(;x0$3`vg*jcs%z;2sv@n*WyRet?!NlQx6WY%NTpvQo#HLQpHv#W`iqtJ$ zc~L*?X3C9h)Umc6+dmzA3{p?^u^zX4r`|#_M2IoH#Wbpb9zIxb>wQ&acadfe-yfQ?NFb@oxQlCJzryB2zMS|0> zK;cid+i4^0@VVSRcMC=*co}mNDn_=F?G%AaS+f5M12goyewKzn)aG0{J zNPSJ*eiu$$9yb??O=#R=yF|he+0ZcPY&Kq{`l2S3NOu+FSzmp^VY2dGm66*f&ibkJ z<}c&rU==%}s;mIoQIS$~_)_qi!9*3*Z%H>$*X;4RiuZ!rB-FNGmr><4HOGBJQB5UH zRmWE@LDV;SjSA?3FF(+O|YR%-i&0R4*80&Z`PPdI;iDX zr#$zau0b)Q_-El8!92BJs^hXFI2-eHMa)Lk<0<$ys#Vtj#5G%nSN%GdAvzCmB@t{{dpRI%TPn2-IDz#z_#p^#9il-ch0j60`Rj#qpf)~FmIL7QKM%4>c57;?myY5WeL{y6L-oaUdVw)7;wng!{6c>W zJ?vuHXq6epjSX!9M+(qv&ElLVEDupGXqC)}<>zjr%u?nxtP9B?3H9r&_rO_?=-!mc zbC)A0`W#v+4+`^qCa3MG!)bz1tzXR3)ONKf`rSPFp3brs+>|^BbDGkt7q{$NFQIzc z1TAeP5>^>*WP6dhd$kziy%G(y_wvj60N;1#`#SwPMh7UsVBOAOeeYgrpAnGO>Lx>m z3ksnsE~<|%t~5*$O;Kdrnbr`xJhTLs!+B477GD4ln8ZF+0o;dC(~B%e=g}lc_6PTi zlExpvXS@6%u2k9dA%jJ_(7M_uY!7RkOrC6N)`i&A z2Se%(8msvTpfl_P*UYl5n5Tw*ZrkC{@O|L&3L;Io=+9J_2itX=~18_VxO5EK`*oQzCU$Y5cbU z?NG{^#>(g~y1V6uo_y!;lGzHXN$&gn03%p3E4MroL*6)b;d>tZ%y2Zt?G-K(n-qG6K1x6}t`(47{VqhM=YD(smi{K26iM);UtlB2 zSD)MtKf{s-_369D)wQqYEo57taD8N^ew(q-U0LLyeGk9Vnw0qZyE!k6&WE9W_B0$@ z$RQ=LX{{6DQqd6&bm-JoTp1^S^rE#3;*XvrTC@c*k2O-PqzOD4Wfi-!O?84-A6Fz< zMm!YihRgQKBqW$N!g?m@^~mP+Fm6W1p>oEdNjj{F`TyE4aRzbGAybr< z8Bhl8TfoSP{#_##_!N&LA z0~8j>e?4!0@+SlJy-891|;XG8IlvXNt{p*6&-QB-sUKWRW#UiwE zCG5)Ful0Py&hF+tSJJ7)gOgHu*!e4;cZ2sw_OLDRZxAD^lacnla`M{Daha(qGOiIMr3r~By zu=cBQ9WB>9Q9!2-@M%$)A)yTe#XzAkaXzE0&YJ~I)(ZboAPd)L%nrywG zS~|4`v_Jm(E0bK*a5LkiXEce{7@sJULKTF8PmPP!SUa2M>80jNUqd4-I(K>rg|rUK z$JJ!Mr0>0?9X&gxkdiToLz^UeK+Vps^viFyFjEx-~Ye?$5KV|?TvbE(GY z3lO(VXK1p$$2lqbQg#0pKtT}kMg`Hk40W4TPnD;(KXfW@e5}+Q{v`2bkr7|tIw`}K zE+r-WvZE=KV*k)J;m*>POKov6NBfmih~xQH;0H1IAiQ`3Y{Q#v(-+Jj)BzSp5bLfKI#5*I}RVZ8Y(4U;dal*$UpRlj3hKVGd* z)P8dpGxiZr62kcwz`B2U6YA-FBk^1tyXB}N;QlS(i7nb!`3)@$QBbu)LTBGBGr~4e z$@G{=t#c+z=<)JioF*MDjDTx7f8DSpjs^Kk>w4WlgXYP%qk~kc&{POhTDC6_qg6f! zZ^xM}Fy44QUKN<|h*|IGXT|Y-r?PiSd2c?a zIvv6YcF{W?syxF;ie;$qcPZ<0DSgMX`R7gJqpCF?G1FMmj{w}%XjJ_U^s z)i}3=R((NRBR*j4i~c-ZbEZW*^^%JO(x33rZ_Me{*Wsy^g&H=LeN#;6Z%T2yemD@9 zuf^w^bay~|bDW2tgrpTO{Lr&AE_!exo7$N!=H3@pUo*|4^cNepE1&XERJz9BaE&@) zm+Z}G-|26no2Ykw4J(uAz)frD4hZetE2_TQf{b6Hy#-9u|GEV{l{GmUvr>M0wRxEW zm`3lk{^qM{uefl;u}V72B877_yFhZl^2Xev0D*~iOJ$~;Izt4=ufXn@XgTMa@`ruq ztglMfs_xPlC^$dnNT#ZeU5J-DSaCfKq0BnyTM_VOzwwY!TI!}rNs9Jdkk>wbSKbnF zaxx;rbS{w*nDS_lsq67&Z^K=byJTdBfYU0G;Y~t-LE)}BUd0&l(WcfoZ)LIcUdls# zPWXd`fYikIM5A;n&~6DOjn@jgLkFjNu?;Yhq=l&|ZT}^-WgMO!_G)X1x+-!g7CuY9 z8>jJgen)&ZU>11tn4iy(mTA<2-IAQOESCFF8lUQf3>G3Mgw&dl;aMB>>l*7~nVzCH zC5lGHe(@C>x+(T&?;I78@EIs=R3wK+e~4PQO1ZfD5!QZAjk?z`xmGh*U^e%}PUL#} zAmTyXnpxfK!!Z>%T+IX3@2#^p@}z!?3tlG^%v`G(Og`PJqjC>+#r)cPZ$6fQz=qWo z-Pm5U(2AT?4Y$!dOuY&+MJC8jZ6WOt(bq*tony@ewDDVQTtM!g{VJV+fNR0Fk=JO| zx=O^$Xnxzum`Rf-G&q=w8#oM%RDM@Z5g*c;RRia4R?%AnHvzF}XRrunp$0@w&Nh5@ zGU@Y7@@XA(Pt;d2d$wHTjf?L5oI2aL{s+?=T6`a9de+aDa7FDa3u9@(S2YpV*@b208l$vH1n}}U69oMY$h*LIzjtr5{`sFFW^lz)n zOn2bkLT30;_>?>)~l|W&& zNfA#Br^NRU%| z^|L5Mc}U7iUH#@AzzHaVk#64tX2WqxKmPQM54@@CqKegOtL9FdLc&a7Q%}&o)1m

&6?M3%oyc#azwr+$h>-s3=_XEP@x=*M|q>-5-HNhXP z_!DXVOK9=kC!rm$9mO;us#O)3wI^0Se`#*Ma7|i0zrH5X((CFxZNEVdz^IO1c^p=YJ z8#%rsr#B@F0qRG#Z`=#O=NIMMreWc{@f*GC(a&4CKVld??ldNSs6R?K4{IxxKc)OF z{0G$+i!j(k1!r6epvA{f9w{2zN^-@Uffm@KJoE#Q z%`uX5>H!yg-xdnLutxEa!U<%|@d&3i#@t9zl?5j#lFwb)=e_Sn{8`z4k}tQAC~r1Lpv zjs+P=)x~lE&!H#DHTKf`c~K$nGcR~lA|=K_#{UTFp}Bjv5$$lGcMD*@1zdB>uNY-q z-G!~Up!+IgH2Z(gv?WWB%HNg3h9cCX)dnAf3Hh_M6r{BeY)V@hZ=!s zJn0)sIxoaYuyy_9WEHg>^f(`#MbZZy(|M(Zx{LNQq)$T8cQ1$CP-}&u!|(Lbs(U;u zzfu%*2R34jfT2eaMRcFc^!ZN9%BRNPV(xA@ZUH!guxOT}ALhTsfCVMo0)mrUx^mDF zZDk1bYD+O;9)rdK-dWoq5BPaRdj^ZGmEw7 z868?7&GbjaY1wA57hrT1v-A_8aTV8coXD2Sd7zxFi+fGbexn_2&#qbQ-*n?uKhdp2 zpMwK?OWzf;VqYZ3gJ3>a2{6f=@2f_!RlM(}^d1q3e@M>Aod+yi2B>}}f)V@RwK3m2 zhIWT#AJv>IuC~h#7S*SRIn$fB6SmO-qn}3Ye3=p4D4-^$dqNmjheq=@gG9!JY0Ta` zopusFX~DWuc@6UI5Q4d$3(hWfr>x5{2G}AXD+07Wsb*8?B?}r5YpQW#vO0AZ6zWN+ z&PL1E5e;MY6=V~7Ydpp~unxE7U3vqf3?f2uo9R42po1Kl3}?43BNj&mNI_3<3KTc2 zUaETYx3zxfqDi_lE%x-I<10Tcq;S%2k+m$@=;yx*9A}~E`qA-yB%`n+JsQM#%ir`2 ze^2!IV!vyUs5hvmN{H;LfB4b+8NJ|o9>qxNCIJi*CHY+q`LMqcgsi`n7C_`*H3VHN z;ZJlpA>Ot;f-4dUq3L3&4r?tX0>3w#Luww`mY+CKiz&^vcT{V!vUiSa%XHwA^pf}4 zxX-Y;cL}4Hg@1!%7~A4^!WR2GA=3SOdhqv#1Df9<{7=zP@vjluqb1)B9q5F0nt`P3 zM2QOgt&k2O*3QQe!Ycx}yR-YH9WA(!DJk;|SAXinTts~DfwQ^9Oa~d~d$>0@)gwIi z5CA#|_OF#x{&(7ko*@1M^Q8YD(2eCcx~(qZZb|J=q3LEXF7-9T*`Gf#&nLc@-!WZf zVt_pVJ(UNq!D}oWjsGL)Hq;LLweqhj{EsWxU* zBK>zK_Sj1pBGQo{f3W4)&+3|m^|mWV8`gQZIRyzinNOa1zaK*WlS14$xe~y#yic&oBaWoq(=%@tlgwzu4ipzVlir5$2*fJFRx*}7Uj9jPtcn6r@5#f*aZ}e&ZzDRjgoqKdO zD8l#P5JkSPb9%b;g3#HiFxPDHvcY8zz2F&FQc{u`cJE@yL=T-@E2UT1V0i>@MB}ec zc~TU;Ps6JC-dPqxe3d0k=0)CC{m+S(nbSdsC7p&7`EQ<7LJj!LW)1NVJL2&@=T+w{ z6o&CK(5s#gM^iVLXvR=0nn$k8!FV)P+%E|DU87dsBaF zBoY7V;zE+dpu*<-Enuh=^_0ps0#N+t1f}nV{IA_xKpc)q<}03mOE3oxA+OLr!3u`< zpYJaIIiZ~4ehZLmxg<46;l!-@b3*6OW&M9zuqt7^+@X*F50jjwyY9%p9HC;f>((f5 z>$4Eo@k!_t*-4dyAxWh3er3Nzi8KGp%4${I%UqXtb=ZUn-`8IwhVkAb!8gT7jBD8) zQ=8aW8Rfvtq5P*~rc~wiwWrQyJC5J|cgcs<^0QPgA2K@>C${XC- z?rSCg$hV26v|`S@BWphQ{YvIzk_UZbyPN?50*DEj6W7 ztKyve%#+*Dul}B1=@NEaKc8)}y)Rc9Xzve_GwjA%&A>%F%hsUy(5ErHJyfkmEj5jl ziXDa{W4~!JHn?v@FWMzV9oI;*_QSh+x4%gGrL( z)Yr$Oz##wq>9V@JuhT;Jzcc5v?1Pq#YQX48AM5xePDP|H*m~3x+o0n8`y}gR7rq~3 z*N10TjNeKw^p6MV9~5VSYwAB8E$&kELanH+ou!_JZ`kx;^Rq?I@jhB(QlVPDT53Ba z+-0Vjs)QkoO`plnrsG7mntLCl_DxsBR_^}_b>D;)_FT94r%MaG>(53sU$k9)ja^G4 zP?~I%4Z4^dGk0vPc`w^oeFBW*9TCKN03=<*QPz3?NX!Xlzl3kvO&Dg;k|K${Hwd-z zl?b_?R{S&mWn9JsQMkC-2~@A_9guLMKQ<1z-J`hOS1=dwK5Vj(UKfH8%I$$(3y zx8*2D8d>~TQdNPeS`-Ixb$PDVh#i{@hZ*F+Ho!UQ!a^8hU;YkKbj3MT+;4ErC#;pr zH7}ctwox+26imHU=G@9%u5$3d*n97&CZDEZIEac$6{I7eG?k{(lon8FB7!K=i3&&w z5RevX1Qd{}AfR+9f>e-ab;1HBfw76Mu=t zX_aw;0(&=9A~7Llz*D_F4ObQFi1JAL!h0?E<8`B2#{Uh*+-Cc1`RSnzPX6s|S-CW%%=KQU`3vh> zl4tsiigU+xZ&C~Ate<%&a!2eERROJ-%otS$#LRAgDHjV;N*PZP=tOfV8tcneuxw{|fwW4jvT zW@Ucl#a;VV} zapu9ty=_HzRG&6TQ8??^WbTdDa>QWMDp|+HvqyWJ+4h>8AftD=+i7N7@G*;7NN?o8 zJJE>!L&|Q8REqHAxy{744tVlfbZk|D8~LmThu{%mI2ARA{RRJt5Zf~JbJny^dbW_z zD%;yzvV^m>`UO>7hiy1zV02X(<4h0scS_X?Nt<{M9S?)UT?apnJiJW&0^!6m&mY8E z70U!O;u$|jFx*RVEh#s&>KiyystP8pkN5{(P3&US;f)_0TKc`aNt_iuxe+q+isFK~ zLe@-uX>02aYECWU4k|~OTInKCVjs0bS#Av;;nZ-@z~w6^HePKPk>{K4K*ChYFqIuP z5hr>kPv+e&d*HUbF@E_;?m)keX-GW(qzwnU4z-`ilRt>GxTeWD<6u;oUFfk>k3kP zPOYdbxu^Kmyd4()?23VW@~58JBXJ47M{B60NAJiMCbEolzrt819a75CcnMSwb!Xn! z5?LA!)NemVzur#Q$!#7u#qz|ACQd(|Jl;ZL+(d{VilwFFq8U&cZKcr6wDRT(ugw<- z6wkKRW(tx$63dckzGO~i7M-AT7tE<+1XFN1sI^`N5}T-X|scklGim zAH~Lnak{m%mhI>|Tk*?}Wcok&9-A;aYG(cVz2Z}Uz403t;t`PeJbUp-<=eX-{d)Z=pqYhM>*pI8u&A2EGEs4`@#8o#bv!WNy7Z50k5jiWYcRQHl> zz8*2j8a-v2gTH)7*<3H=(+5-PXB-e^7JRv8?L3zz;bMlTT->Ngs7S&~eSN;r@oP#P zc$2S;*gH9x zZ-I+ZF2qX(NSF~qlizJ>TfJYu>C)a0`xIT!5K2}WHnQIrCbSS{Imvx_HgEr6GH28X zZQsSKc=W^D-E5;I%JI(BE`-zEkunTi+@*#`{91g4sr?m%;PSry79CU{^9@d3bJE8c z(qTUQ9>T3TXrJv`cu52uSD_nn$#eAh@Z`earN!5OVE>t`H;>>vxd3SMash;zHa0j6PVRvjN{%D9;GT_19~x7em3<+_K)aV zn$*_icc(W-u3T>(xT^AuIS4WP}-3P|oi?uQwSUDEaToJE9dpCT1&aiabKAG5Hb(>f8x84lV9TOb}> zK`6@vN(G#84~C|E`C-6yI5I=(Oh9za`Pd1*eum1+{s$SKHQ3uj?wa2$m6s zl*>Y+Egv>L%?zwyZj7qrBp(ZctmF!gEe5P+dz9I%dYNbz8syGAD(WAP^|*K8QF3Ss zt*jf*WZ>y>$M-KyNNpx;oy04#R71zA*!*S>rVFO;oMdQ(1fz;hu~ZIlJROc=<)va< z8ulNiE`s^Ptjaoww@1{CwX7b!G%%haaTOK5r^+!e|020DSR!(vQte5?^M>>A7Qvg5 zqr!QSrdd`!RR@C`$m{8i28_O(jgJ%$Dx}O8_m6sXIPTVs$gPg#J!JFINkt$~CBMip z*96Kitn$)urq^I6Gx`$CL~#jgDN?;bc2US{BHWSo@=U(`sW19)$LYna;igxLj8B&g z`>P$qcwHUUs4EhJ6tr<44o>?}%6Csz!EuP1bCoJMj7poQ+gL6?i>(v&MO8nIg44V` z{!9G6DiIDcY4tpJZ}er#+mwv?nfPIikB?o%n`DmyYuuwDwyt8Y3(e}Cun({GxYglm z3%vHP)?Ug?7{1y+TE}y{i2}P^v3+~KxXOv>FKlg2pR8Uyg4kD(-G_{j%)+P86vZiWfchrTR&WK8!6DD zy-`V6d|9_?=E7E}=74fGXZ4-Wvo&AJd){&K0!#4G1_8>)S(oF>M>DOsilXC25rNXQ z{^Byt+sbm0BNBUM2&k_poifkxlX>G z-2Km7m7mm76&SRLXB?c27Y{wTZ=I2Se>cY@6hb&uTWv7xXM%XdCh{VgaPdlMhj@bA zt+11%R_=$_KP9!^=1EZqj=DtM#3S2Akw(ZElYfE5Jd&ruMa-1H?egX>i~h3ecQUN! zOLWI1_6EJ9;OpYs#mYxG3dxHN*>yLqL}xZVNT8|^Jn8UZ&H=QEA9 zjBV1n5~j1~Aj_0D<=#6&$bw3lQ4n7(sslEL`RX8h+yloOG1^Ps?d;pBUZLH29~arr ztR-wzZ#`=~PdCj|TTT&Ld3*Gctj^0>W`n6O+9&qnc?=EvYm2$5%r**nTKT=TsOn5Z z9@EsmgVKx0x?$nACMWZ9`Oa$CI$JJB!{N`&EiE1-p4HYHbPRKudg}!3XC7YO0(J+K zArao>O*c3re;DTwe!W-xS+VhD(z;%ykLakmuVMCcfirv9sV6pkq-ZL{hAK1(8blf3 z{oWS+1|jG91{uvHPbYl_4#}jsq^-^2F~uJ6VQO=VL_!mKCBRVzOaN*c3LF#$Nq%o{0SZYv%2|zssr8r(gmGbn<4?t4~w}(#4?R|E}LLN5^-g&IXx5l@!x%e1rT$a`ymm zavIprh9`lOY!ZVN$E|Nuth5rTfIloKnb5hbw)@hW^SLS{)Bm7}{jDUd6V|9$||1EBc9m;lW5d9Vo@F+z=jmQ8jloBY`_ z{svjcH;COVY=y4;hmfLnJxH{mYD|++ZH_y{EB!${IdYO;EP}MJkt9ym`5SnCC;iK> zYUW82AHk-}d{@yOF}qSkyGdh(A$&*riu0g4$(I?nzKv;!+;Ro3ujGfl+utBSxGf3I z3cGLwKH6uNTq_)%D4fX8S>=0C)UKkT!#|1w;Xe8s!N0KInU8i#N_{UGK0p7@Z@{8g zoc=+s9S|+d2aG_A)HleUv7H1sfJKU-n+FDg9Ib(8=pRp$DD0Uw=r0-*btoF7DYhxZ zhl!NoAVp3(X|M}ewRv2@1l$iM;N4H(1-e!dbap7=Co2dL`Ppc<_pdC1(mw?YbsYfw z>|yd;U()toyKj)iEmSCA&6sKE3VSC8PzIkSf4U7dEtP@V8EA#0QfnmOGf-s5Y>{>Z za@WJoyw67QvkjsZ6#GXx=x$IA(r_2%7w-QBwmU=f4+1-r0BUC-f8x3$sD3-RNBuK8 z^xG69KX=#f?1QR-B?BQrT>Z&hJHxYmo#d;Vv@@{byMC8NTOp?*B3=eUwLA4@-`O+<>;xdUl>WHVCIEtDtVwRI;5%Nln0(K`w`vfM$} z+QdV6;f=Rl6U7x}rx-k}sUX+dARG|sL<((9ZAJeRX2us zJ+TMD>PgEKL+ME6Et)6?&JiGSr2Otjyz!A0eG>5;5tD?_1p;C(@cvXbqE6XczzcDrKAVB~~3WkM*n!V9IC=n;C>&BDiE zvIDMmQ}r&=TqVd%?Qpc}psG2Apz+PWJvd~HfbY#qqVJ^FoV3mE8qKcH3>WnLkaduZ zO30k{N#g0V?wsA$W>&||b+5?~<3rD)$_E#gq7)+ad8w)*DMWH-*!sSK5p(_MOwFZBC3Q zF^U^_^uK?3GFtrD#jLvX{WG5}rY-k)2o}FZ>qqG;^4|8L@LRmz#pE*CL&j=9La#{9 z!4Q9A!WoDG+o-ylpyNmTXBUv9}b%@lp+kEVLsG{{qk z5I}w+C-c!CI=da1g43+gU?s87n(%ZH9Y;2VRyFoCYUBAjKdZai&Fb%~?ASQG@oGw? z{lZ4ZW3ik0RaD5i8~ErKIb1Yvi@%L@W)wvBFzDz9=3*UMxli5{ed+*r z9QO))Yu|eHnn7q?XeuE>^Ps_OfO09m$lX1Gjs1magiiYKqz!$N&=`PHEq zRu=aXq&w?+NT++udY!`Q2CVIC_dj=#t33T4{kr0yF^{EdIPZC98Sm?DdEI0r z|6(1d?!!v`QDdIkCpqOq6)$G+&E^>=AKm5qGNBx4b2V@1a#InfJu*iFy7r_ze9af1 z7y9api2+f%T~@$6U!-)#M);1()|CijnS)neN~w0!?R{^VbSgPpDw1eL;KLg>g5w)2 zEvB_>=y1_hiFm+b)m6K;#DvgQK-Gu$q=rHzpM+MF%U^zl4;)er^5{{#2 zk#RNJaF;hG(=qbAuhlJ`^-Q%EZVU3zRUhz*c;HoLTS~!cH743eAOvk;vj}1ucz%$) zF1SIG&5V-=(~-*NQh2vR`r~66CTA+Vs?Sye%bl`F}US1DsNhXC~kD|9aHv80y(UM zbE^(-z1Be4FL>NkEf*`qM|B-uYM*x7cd-@1nY3j_RPL!_^E`)}SPLHQ!b$ZLufMc% z?IS)m{7~ZJc=@K)fl}p=eXuy8bNxM~;eIIA)rzDL3@*$Ps8sV+@&x;7DR+>=Aw%zT zy?mssCJQ=5-G-i7Y%%JRt^al4d-#7-Z{<27ysaZ9FE4On}M>q#964;bR@aGHl1$e(v$dXn}@zxf0`-owp`8Dt(Wp)l=V11RyUaIZv_8C) zygosiy&G;n1kyB-tsDmKpi@JpEmFKD!1GLd=nCfhAS=z_{I zUCTR$N61#p|A$LSI2P+r8v=!>p+5Y({H?tcj0n zf=(pw`T2Jsce91GLAsEh`Rm8ePs_AHB-yS?nJoP;R<6RBo0 z{vMnTw$BXe32dml-3u75CHcUVDxg$8%pc9)iCC_L5jD7Q#YAB5*a@5u21-V10>zrG z$s8W&4AVh^Qz(J~=@d|YKwy6TH%J{&bN^`PeA_=2XNrPggU_;a6@j_8Kgl-5AD`;v zuk!x0t=n+-RgfQlabHg=v`^pwoqRp)sRj8V$b!}Ryoa^I3ELZjZd|~K@RydCS_%vl z^T}>7gMw3jyeiTa-iOj635PZ!f{eaSlUf3n#ij*YK`36y{idS>XuAX3U8m2PeuH@J z^e=6cc8^H4yNjPt{Ylhc{JmLfeNzogUi$LXp@7V+;2X1ZBI10 zt$aGfVxQ?we%9-CfM@Ri%U{scLdMn{an|7HkLf)I{}tH(<~xogPhH>Mw*teABSnyw z>!gdA{iv_*U`yBKs*|0>>WM__hGT=n_YksgpkH?t4Qe0 zYF&AD=a$l(1-FKuE;JdZN3ox%@_7MbYAaAxV2{k2qd{Ie}=_&bWvm>(SRqsWJcj$e3#9^)f zJj4b9aTe%!lUoTrL}>V6`Z5`9%w1#jPICVc21F{2$-3XYFD;fyp4s1j>|uwc$*m## zA%#=>yGVQ+Wp)GZ?Y22H>yes_Z&!nT|p5olpP z=V+H9bA^k_)_Js5FXdK4$WqQ#<#V6Jd}61kzaJ#qIZXEN{<2(Q_-*rV5Mx@>{qB3O z22qWxG22eiO>&Tq7F;cKe|Nd$`^vP(!GTJ_T22OVJ~Rzvm2_3^TsbOoyya-vbL;4~ zW2Vu^==SYR)a2;N{m-r>qL9IJk<_MwpkEmQv!@v3chd%fvuc{M*TCW8?EG!|ZuE8} z_#%tJV@VQ1Y1dQoP-cUuR#74n*vP&}!Iqmr9pO|HT-u z)Zsp)oStvIpZ`6@07E3lyfL%IsYw(24g>}CK{iy zO-4)FTMdlMVEs8{*pRfc7fN8D0GHM(CEZ$E!6bN1k6PySla8Q6q03|I=DK?&tTTNLPY;@qMNgObbeUk(1%hQ(*Ox1v)p_j9p=zyxcPNZMXdZ)AM|P6#gKglRTVOe#{;Q3Y*4G~H zuuoMh4iaElr$CLX`jINsKn_=3V1TQM!Y6H$Y5t%_uMQSQ#I=0 zD*Pjo1Sl--z2bU9m-5eVTz?n+$oYRDs19T6pyqn-Mla)9((azCF|OC0`w+Ky$e@MQ z`4D$y7*B;fz7zCrfC?kwYMT8FuR zgNUW3l(o)t0iKRP;o#I2@?LrP7I^e7u5XI zvEW?jQ>{Z%{ibnl3#Z?nC`~xe=aK&w_1yj6OtoUi6Br%0_~%G&sIgjLg>3@5Q6h{0 z91Coa3?-x8&(Jq^n5h_&EVSYN$#&h}PvKwV@;4@cw9(>|54~EiM+$rG@AIl7_h>9u zxn8Q<`t)zbD=)Hc=gbj~R50psPF4<;rSwA6{H1uwQr*kN8x^OY361LB{?z=;IPFg6 zi_Lh+^Kbvu+wue(J8F{Hh!)va(k`j5sElH$)eebv8H4<*ey?c+SBC`H=#T)}pnw_0o)9anHZ! zWt(&QM}?B8C!WPsno!})UxrgBv_u$cI6iT660t~#9aVup;^lIiyelU4;&BMooeB+F zl3f4xIv9}_i1GKe`ALj@gX;Q`Rsm8hbV`B#+O-Yn3YgoaWs^36@{<5jQ&JfRi6T^p z(@UtCRFV@JA0rnKKMk(sRG^W+4Bu$m`}Bsl=C(NMQ~3WUbrjBd-P#TQx{VeNk zpQjs|_s^o9j45t8h~fa%^eWFah~aCxt`Ku#N5%9D@{MOXO zS)cGEAEQYh97Thq2`{GNZRcumf^Cw4N3P?1?2VhTA&!mqCHMtZ?=Pq2ZJ^}|J?B2& z%0A4thinb8tMN==E%!xiU@69ji8`L43pCmlp*Okaa@ER3nS4yg*Ot$VEFrHa+yvSIqBpljOHT|(e zh4s|Z3+~b}Ub7H7S`$!>|F6abh`eIINRT}$3?H!w6CnzwULK;9@;;8%Fvg-|Cu^2I z9L(sNDh<5M8*%>jhe{g}VZLRB_a~oTGav~NG{y#Dql1`0Zd{gCFOHBPfuyEUV4NKp_c1H~GO#*%F9s$E8-@dzVd1k1br|-$**SbQq>g`ZP?9@Z6V) zjK!{a!D=^cXAO8I+0x1l?yA;RnFS*C{m`-mNYkF;W=;SDvz>v?KR>8<;Do}>hIj9v;g(|=C+J~_qAuzCI2y(7*})KZVCc=ZWTmB<_2 zLzLJ8#2P@t8n~L6@~Jmp&|g zG2{<-Hnnhgz#E7wSbq?>_rktPE&kUF5e8+>4cHmGp z63;PALq?VFIF5OJ>0b|I`Q%jqJC#w5V%#WctHweLkP(y#cWg}?xZr4Didn718K+5Z z(7UykO_taeo=P)OKJqv_e%^HLZtu!8kxwANiBeCC_)6Wi9 zzMOk|kwXus`%YN%0>KTN%Z;?|*LYjpF2Vj*aTF=rpKGGZ);DO}9k%_t^_~<%v0@0h z&wa906p^)aco6f@dk@n*^=0u& z{E21zIu0L?df1^=4eg`yy_X3W_dmg2^l)|H56jOOY+-mN+;M(iJeuqFgYyi&*IRh3 zUu$(V-KWj+RKg$ct#Rj@EXaPF9u{Z88f`UBb0IvLH{vP;LazqdqnaZuH8$y!HC;IS z_~UUjx+>sGqF@DOw#PHObY#bgG+!nCYtLGkq)xnlVz?0B>kC%VL$VqI??NdRD{)n= z>@x;;gO;tCrEV^+)!xn&e*bzos@dtm$eX~%K8o{0%oa*GS`fyBzdBVX-5ZeJwn7P_ zye=40&IP-f*QX~v+4HeLb*5}0VS-)%KGUO8-)c+pVImaZTyJnm`sLF#bTk9=;ZMQb z)Wt!PXBD?Ts9+7aaFM$P6_@w|M_#Y;E?S-?7@J<+|HW!ED(CFGShKVtHT!!0Jyyh0 zQ?sSOs5@2-9S1bB*z-F{49HKkfxvD{PB1YHy#{?ly_TA#Y3_JL?P$@p!6OM~>ZN=v z&)n3e6nSok$$mLawyNSU&)1s3V1woK9=TS~k zuxMD!JD{w$Q@^6{Q;uu59VRv~7F$q8&?sHY>ZV?Lf=TE@8Hw(1LJbvZ!*_|kw?Mnm&xcW=im z$O|`jcF>KT$`^L-eU&P^r0ucbu%~2;y2FQyLgBR}UgqONW0zkue3>;@E6Yz3-ua-@J(VMkI=-$7@*w1*#!ouztuo9b3|>50%^l)BQIk6_2e1minwgx1hQ-7kopZDg3ZpSJ3$RefQKj zUKTRNK^rQB8CN`PO&fVe_qzMd6&Ux&rFHt8%@GBHUlceRa_Uy&IMJsznwyRmTpA(? zERKmc!KS(S`?7261DHPUF=!{#l_y@#lXI)N1iIU+1aDgV;Z>mhv}j7-62cq z^||w3-xxhEJbU8)fYOO$!F12HU=t8d%UQP^|4P}W(d4e&mS%<8|1LkHM7sradb?M(10lpY$W(Y2pF zQvg15JnUTU^Gj(%SykgYHL4dS6=c`v)p7X^T!)KA=|n=YO5N%Nq@iVb{Qj1g=&FxX zLhIMbZHu1i$yJ*7R|XeI-uIG1;A0!)yoZ(5pV4HX_}cx+_hQcYH8Xe)?jJrQo#+Vi z{Qy(MgFcWf!40fk!@@g?YBSg2Dn?{~-SRMdc=m}4~Ks}xJ>fg=U{hdSJ@Xq@90o=`k=KwF*j zh%?XeDiy*boMW9ge+R?W&MIcbm(z`!7!COvnP9;t8xg)|PpQxhZcIT6x-D-mU+24V zKUrd1Yy=a;jSuzD5PeWHJTO48Cavo)=TiE?nVL(-5n>-*smSeF4LQ!pYEgU!r~TuA zS*?{Fu{8mh7m7WH0i5MByu(;JxYglz;jEgt%{kQqT)>*eqs|)XhNwJvk}c>$P}Z7< zau83MnaI|Zyvx@8MA#q)Lm0XN*poy?-{O@L{jhG#wXU0XzQd*+7KMQWCjEnh#m&#` z$owsq`g)PiPJKwG-ne$3Hm5ZV@s!@Q1aeR9<_`NpFA%_jcToNb*gl5u>q&C)HT3Xd z$odeFny1RAI?nlkPFZn(C8Z)xAgJ+c>uCO=1o?i&-n<+MxUtAk7q7f-Jl~egSry(V zbsP4Vl_!{?BJE4zLU@rVin@H(IR}DAH5B*Q$b=}>A-q!IA4}=@VnCwdWkJP8SU!XD zSahIx!829LNChsd&z|aW*J9UB+}7&3zMTk@?9=n{3Ic{r8ww+7Vsq3d<$Gy|+$kCi z5%h#Ry>$j&^=ak8AGxf;7F37$&X~#s-a8aR-gRi>4#F>y*RPSeFC)-X5Oi!YAn9i1 z^OYp&Ua7N988c5Vq9$67H!ex!jwA0J^_>4e?T)GDA|F7yj=&yz3XU3!94Ra5r0-b2 zn$R(OTA`LlN|1YfN}TNT#Vt*O-C}i*$;qN_+hSoF(vmu*V5oe@=)}U5nptQB%NW&C zLR<6t5#LNbbGKYM%CWVW(MuidS1XGXoXtw%ay`q+W~U{aQjZn9xn$$Ve!HP@!&|xW z6WAdik_DE&LsPYVPMb%&$0a!>yH~RZ#V%vQdPwSO`OXJz6Z^(5`vP4~6@71SK}pTD z9{u|UpOF@rLOY2Gc?W*Cuv?g@>yc~NFPkk<9~LX$_0f(``%(mBS(reikp9AX3%>aK zUO)y8g19u|(l3EX1LC--1^5qKcfZ4bt=BEdcSLm8JFv%a1yQL#*>&nfGEFdvVelde z3GU9`5#Sy5df^3_4Q(+$Cc}i3`_iHWO=sGTFIV|f zL2lNDlsO*A>Br!1o63W`57X5dG>p36?0-WL9Rc+=3zI*$43$yY4}Q%CbDYG~2#GPVLa;_8-lG_*UFM_}nh<(4qhor=$h{BuZk|Yr$%ige#B` z-N3>Dv84d9jp~n(0dqu6T^DBRVgAqMatC7UH^kZd(n$op@|fRMr;?aN0J&s^f1(`? z(Ee8t$-hAWJ^EsP6YQT_pg%evZG9Osi^1NRg)P@~l6D9P$Rq!~l3x%%+SvrC6#b++ zs6-yjN3z1dKwfH5=z>1;TnBw-O8VYsRL%P?5ZV4P<9q+^J^fE0bU+;ff7H>myN;sH zp8L|R1bykG->3jKd>Hj=hk-P|s~Y2g^9o5!H+Rsp{ed1}uYUo_{|WN52Rk4!JGD^$ zsAb}05m^ryp%2~AsfS5F1>jAw<_6Z{r@$D2tYg+mtJFK|D(|4x337Btr2{H^Sj9s8X3a^lh@k4yz3ZkkCNd^ISi}ac0q~Jv zRv1iB3RKls<6#iEXedkN^_v0*>xnw&9eRwk>bu}(A~=@3zIR70&&{gq*# z^`X~ueF>3pO`FgoTFsUs0)eWK(3Iy`7w&_MIvlSRAQbsJZ+}}1?^Q@JghwHa?aPCDxhkWYwhgh5Y*wd$zH`+uf=K9PNC zt36Dt_LkPZ*^`ey#vB%UlJLIvltW067vxu_&42el<9p&2V-2Fjx%Q%{US|bIM26(c zP0NJ)0#;>8cf)(RImV7sJ9l55BSCX8b6{d?W4kY}iF2)k1)7Ku9*sf|idJXN+f_=* z#Tyh&jfRfp2s+S3v7K3Sn7ktvZ+7oY;_8AtE6SfF(S-nFMT;yEb|<|XUFf}nH(Ed2 zA3wGp6tg!&)-h;ftC3AAP*y3=yHDEU;Y}-9Mb~tzYx<2gs?jRankSx3IZ|ad-aF|j z2hw-I=)6UU2g66YLk=4wodzX~`kaFmoN|ReH?bB~$|(0*eS|1~gGi1Ghl$-(To~f& zCNUz>dckUKLM=?5#`Ya(eONpQ?Hk&!K-}5;+WqJ9YT|j8pi~#v|Lp`XESJL6D2}y z?w>J4&@~|@L&vdoL4I-+ox>%zq|`i5=?)UBCG(knWbmkbS|XRf8e{xLtK#&k=TVoV zmC#{I+;k}9`J%s z_TFM-ku`lsWK4yVcnDCehFDFucp`TNfg-!!;%LEfeU4eCvP+FJ&&-u&fNjo+U|z+J z?7dd?8n+&Nr#JsAjmW%HD`TspaM>p)@|FE%$HsH%ovpQAuzD#I)&jhlZWV9Q-T})s z(y2uC_X=}WhwY6W76hkcj9uR)9VYR5GWv~}rx)iHBwzH{&Jce1=)M)3TjxZG!4NSO zE>VrHLqrceENHMszk@Om&vtrqdh*~Rk@8b|1Wd9GN)r=ZNhc3i zFz>BkjBL5Ge+)_9bpM2$jP1}x_Rm1g+U8UYq9zcf8>gAB?hWk2Zb6*veYzC>L3Bknf z8H1?@MYig--ykI|+u%w}XW-|z^vUD*(O*+ZHf&U*;x#cmdK zfH`+sTo}a4bD6#As??RJ>}5U2X(0Dbw3UOr#XA2Xk&7S0|DZg#8Sc<8JU5eCC#5{W zefPwhXT{wMxOBQE@Jw;G`EYsa4yE;nL@(^Jm-%Rv;n~CD zTY&>t-GlxTRc?WOS1nW!I#AHrb|@((Q#o{i@OuPbxTpr zr!MmuS!6}}#6tSPq6kkxd4N^9x_b&ZzjoHzKVIa$41!Exum;mfL)|Exv)fKGz5f?r zT6P-$vn|KJ+xi2>(z`sLM1z3tBx=nShZ+OjghDEb=u!?8%4mR~)r-wp4B>p`_hhg& zMQz*8Fe1kg3g0(tnM`9yVtg4e2QJ1OnOp{mUE6`y-+6Jh8;bkhCj3>(ec9!Pp}id| z;rr)$dm?6^7#n;x(xd|C;cSb0Bv09!bC3A+-82qywu+|N38L#X}Ls<8yX9jIy*?U=W}sJ+v_oiacP{7%_lFl7J+VEbc8 z{ymVFGnB|-0@wkHHJYv}Px?6^U0M`kI~6Of0;Lq^@QVW-Q#NN&gmXL0RNQ$@fuj{r zjJX$RvDNj!if|O<5*`Yk``+pzV1!-`aBCv;E3bA(@#W(e;|Tz1!4!z{ro`nC&-Ex#hKr@eh1l+%pbT) zReZ;_U4f&KC>Y8>N1%6>72PpgJE5M>&zu}8Je^jyS6{rIJuT|{4 z;`>>vswY`$A2yZL)=c`k*ew$0S3P&EdMf`~UpcsC{ChOzG)$DK zb5mgZ5(f9ZBk+ur`{%<6yI<|}ZFhlk_Y>sLz&kil%y!pO&h=+4D8kJ?u9sl9^PTjd zceb>h+F<0~?u=X>P*^9x*3r37Y+r0z!~UURN!vC^ zPWNNvW=|y3z(|aF0W+^qBSSk3u)}t}7gs=P%paY}1f2nBl>dQ-@qdAa!Sru5fK(b& z?!q+j2d4Vd^1Cq2S>gH@OyS^=mg_ap!{VR3ndi4Xr~=5`3&{MdJAVRwpY;J9e;4~_ zDn<=

Vc4{P%s@|F3}br(u5pS-e2~OGW8sLnXuz5HX>tL2p3_&hqi@FtXh@3yesf&*4n%!KsOI4B)Fk**LonVUT` zbfU|m+xhdYO1duw7mmM{%%OZ!v{vwe#oVP?bnl3ia~A5o>qzT%xGronY_@njiIsF* z2|FB=#31hMr{0x^_y(~rCnwks$$iCqad~s0^%V6-i<81Hv}^A?(I+P{5@I* zK@QG-8K>IP=_wU-<>0&$k{3d`2G{$-h0R63%~a_#qpHe`$#eatZQ{f**ZKZ?L1HISJi zxpGgMU;aJl%%?GMh`8&rLqOfvOBpQ0tBo((O@NG19~`0Gq(zMYt z!N~@haFW#Z`P+kD(@Aie>6OcZpTL~;LY54i99Seqf`h(ePN>=oNN|vLctgMeoco<# zd%aM#!r61Qefp^`m}>w02olFWz=0F5n5C;bc(i}7SuRH=wf#iC%-5NYVG#7HcgPQ*Ax9syjQR5$7p^T40s>FhNooTqd^zY-{#%OT04H}o z#wpaF-=KdH5OfLVPWyk!yNkRjWm~UEn;*m(DWOZZpKx<-Q;^?VYj%V{v>3>ZL6pD<&5;$f+P575X%`l)wsP%oWc4zac>K@ zq!bNKEVc_r%DAFZj6y7$TZc-HN~!g7IVP0GeE5P}NT-f@WT;HfXk{DPr8V-HQBQDW z1;TVyjpx;!0(;5_;{ z2BLv|HSDT99j4_K$dQ|en%QK*Rq;*2y)jE!ekS36Dgm>)!_=}ouL!<##+A5+C$2ez z3eknbt)}cPLaMXVA@%nZ%#<5(~-9y18hR$H%E$6kBk6+Q5| zXt%@hlN8mmCNj z);^BFG;Uw z7Gj$CEW<%>4K|+*-2|GUCoS+~f!}H%fZ^eelWIJPj(}-2p~F)Iw-H9St{3AL^0Cvm zeJ5+0l)7Ad>FOOg>bb8MQQUbbc{XxcEacFXIBgggT`?CuY&*Y>O)j^ist9euvRv2e z?sOABz{2%)?hYqC+d0}ex!QVe$D+HJAfO@{fU{qxyaYJic z4-@6fD!Y^0WzTqIYg%OwnYdZmW!l)89OlhC!y?A3z>#;AS-|C>eZf4mKYHihhM!HB zSRpg7qN{;)RbClSQp@a19 zHtmftI$SV{=wqm3N@5nySxIo42qw`ZSM+5iJZ*Bnq*$(v8wL(dlONL-h|p^fhS2$I z)z>uk{kx_GxaP^m)IBH#bD0}1HkcesdbgQ~enqe?B2cVdO=tM*b6E8b!-?Q|hH(B1 z9Y^D%d3EzipN6D+i7*f>O08bcrV?D#e5uF-J}-htRF)a3Pd4-();Fjb5V~|cTiQ|Z!pc628Bb-VLo#PLMG@kJ5Hb@oML6b@S!AAaWH^V&d=BE^ zIQO%U>hA9Te7?W$^}DX;kLQo)y6*dmxBXswt+m%$@4feW?X{n;`y6(MU0yL8lFqMJsB~r#&30BA40<0*JL6^=U5hr$WGAvIBIkV`cu~E9siEM?aru^v=5&(gv_6>x`$$b z8uW^M_Y!e~)1UjiJecJp%Vo;*kvq-G^S!u(uc-1OO1*> z`ZRZk*%Hq$e#NOMPEZ-q<`ci4BYRthV!HQ!@utxRIFlv=Hs^q&10zRS154W>D_lY+ zJozl+s1`W%JBl?8OS;Pm1rr!6BtU%9*3g^R$eaX)7RoT?=KT`{cy~w-U66WljEC4%t&G27jyejWfn~sXV}%zsWW;P zE9oPiQyjSIN8RKvh_`J_p_*{yI$b$bW?VYjebS5-2FnQgY|``*#+KdPQ#h8zhES z)O+3FU=1Ga@M*K9T{i3eQ{s(YGqdU3yYCuSJKiP5BTiozD#RPL#7pM5?OwV{$Wtt^ zb@%Zo_KHmFzlX67rS>?VxW(&ef7m-@Kf8xbqrf3nYT#v702XdN2i8A(vSyGN*~PRF zdp_su>^`*w)Ow02 zL1B1*OAEGOjxfb#weA@nN19UjrASz&NV>O8ynk)<~wwy`3s zaXYFti;-_%e0-Jp5vU$YfoKZNz3Mh0VFgVOEe)6`#k>h)V8K8x_X zG&;oh6Dd>a7|TT{Sgxjz@;PDZ zOaiYHs;82KbbHr*6kIiW3i5^51tkjvbSpA0!b)>mK&zC{3xp#?^wERMJe?6A&=>;=L8r+wA&F|EP==LpxabJmIV+s9Bj z^Ko%q=YitC6dkXNbp4nt~3RKSpyi`zjwQE`m4T=)i^v?Nq~LFw+m+ z#COZbFWLBbpU)9Jp>g@+?8<7~CFz+*iftfOhVCGFeSuF1kwefA4)A|-5K5va-omql zEPXe^Q#HX(NfxK6`+4TxGFNdCMLg&eM1L(M+zI4qqd6XLeNbKZWdCxABLenVou;j-E5uG5MS7&^3leTv zWKLR#bR2*DoEt=-K{qi*-zx)4qW&M%r>i=l+ULr!8XP9dyv_f*4hNwulewu<2$Yd}{9)yOc z94dQ*LU|vncDIRDn+&@(yu)qa;X?mGR0A4cqGTr8W2&f`YMclUM=K^~X{I?mTmiQ= zysqlXpw`+g6Mg;B!-Sn+aiL^HMk%t=Q&W2M)!>7}6gwTH}vfF&mwxvJGBnQFWPlA~2LpT^lLOj-+tU)~ zKoLnmh&J z>AK{~)@J2T2L|RG{)0>mc`EZ1kSV#8Lb&cWv4#2dCV;!iITLZeUMXY?6%J?xe>~C! zCcpBc@D=i_IlxU9K#n$bt)sR! ztA5QS0Z+%Z$fNUxJ)DV zN9jPXes~*5Qlq^@q5_+uQkQs&^UJ06mPXRf!CJ-=WZ7MiC!2gNt-M-wMCJXOKl~s5 z*0dRSe{Lgs%uIt9`Iya^WBn#2Kk0G`P4Ab6s7Bt7<2hDtJb9Eo0dk?1=L*7K`gqt~ zO>{xpM5uj%wOrsnIl^_JzXQST;sU>n7b$;?mF@gwkRpecKX8p9C|Z_Ane(B6V={oh zS?X+WaBm;ir2=uJp&bm`_{z_?!E6YWCGP~L(ND-O-#J&hZw`(t0i!eJ`P&p~#YkRb zKNpxr31GVetTw5|z=$QYq--_-m?o(0C5hU?{CdU0QI!H*a3@TD&jLPi8W3-=+gD>CYp=MINd93%`pHS=pa159 z(kUB1|NQ^uW)FmDmSGb?X9s?4!rU6?E32#xJ$!FA6#X@G>vZz*bD~Tk?LqopZ99$l zIa8@Fua#yK_+wv)4_3hh%BR&Ql8vg0-YHh4iI`nAP~tfDbc!8*+*b+D5+K>l{tany z4!3^4BX_PN?Mv=sYuz@_#7Brm(aRTvDm^WE-`RSWHyt6T37^`iGMLbjFN;$ZoKlkuf|H>z3wrk8A zFLXAl6MdglGD;RMo@sx1QomO9@d^Mhgn6Zq0@2{I&C_6yFrgLcvrxn|PO#x-w(D~& zH+(Vi#aI*}{Vg>niSL1b|8ogd9!l9QZC@mQqLpp<*}XbpLSnWNLcJ!? z6pWCk`Q99=7p(EVE7(OK(ftDNMUrXY^GRaoQ_i-cq&iNojMnkP`0U5a1;YNPEk+zR zWGqdgTni8VI?FS7M{4jomM1);YMf`%5uzuKd~MU@7L*F8I+ju7dvQ0h?({s0{`-=r z3En7doqhLe!=|qIT%VTW)-}V8*6BwL4>CGNW7KIYR!=VbKtkjNFwP^1I$_P|;gD1P zw>=PQpkdh<%{SJl$4H+7Gh*%KuRjcLB-L(8SkG+ry$D~4dH zJ2w(Q$bY)BzLO&0F>&E;WvsLnjgoNcBaLpo8*mt*OtyvzH>1Fa9qPXYe`d0OM91g2 z)&W=cl*mj~YvHtaSA)P!U=d|)I&TQGYP)a!*<0~OhP^e)q9|~HR`jteM$o38T|4e$ zgik!bx;N*h`u?|VCHvUvs$*%Gp7_xL6HEy~eN)JS?-^h|{D{;_L@B!;s6c2Fo;znm z%d6_nj5x1;p{=)0;(<6NAIfUOIk$_vwq@6bnK+)ZLMUfB&s31Bxzo9_m0 z0|is)ipNsps>)wUtIB({tjfU4Of`cp#eXijboOHNKGz{I`C!D0fpXkM#1nvvkCN~P zJ}h*b&K^n-1fQoU$FlU8fLSJ>b3_RIZ=Gk57`TCn_xOOXO8pfSG1|P~#4S-52=19z zG;mR)pDqf)(BlPmsY-v+mKxPl4VXO+Xa$FWJn66TtChiA&&qUUYss~LJEuD_zp5?* zum#b`gx;3owEC)3_d_$jQohlVAI|#qI{Y86XOQT@^^72w`Zp_OKUfJSv$73X;U`B6 zaN|o2_{1nL{jFt>b-mk_P4EUztj=$7K~?c%WR}V50L?%CV3`51?77z`Iq2z-$S(av zrYQx)MG%(sfWz|-yQnf#C&RGX$D5M5am%@H;rHe|x$l(4&bE`v07mSAQdg7;P3pI8cOriKm>GE}1{_z*%4RZmrLi3PFP|Q-gQW_p*WXCT}uVYT)xj}Vl6!FAIvW~h4sL4FS>bRX$N9Q^Gnc}K`exd9yS2`UZ zJQjFXVxEh}81cz78J;|M`1?6Q$_O1A$7<$_?%{G$&kkHF9bxD9HP^mE0t&q~8h`_slvxvaD0`&;iCOv$yd5 zq7M|)t3e-+dPtlO$!xv(r3Df<`e2ef7h@0eXrI$(IB}aM@pVcre{yyLSX~@0dIsu} z|1Dfgr5Gqr2yC5Pp0ES>P+yf=WVS5vYzc&*>Z=N@rxSbw59-5LL6yT$uizhp2y9_G z0USR;J%xWe3r1!Y%+G-r4daS-4?Y&T)XYk)$lI_T#3HoE4 z_k;ont1=?6QF8r6poI`{Xyp&W=ALwx23ycCZ79rfJPR0vZXy%eP(hCZSTSH9DTH%m z`vAzUKSTi^Zfa-)YOkWBH=K|lS{+=RODtNpg5p95VL%dF($PIhAb-*Ym@pCeQD9>R zpjzZtFi&+%!gIHbI?5(+a*c=$qzbw6iJwEJT~1B;?L#>OltpR2O#|z z{>iS?2hd`57&SW#)|{vZa6CEx&R(M|DH=lioThQV2KH{pvUrpK?Y>!c^$kBbUk?hz zDY`%8<->CSme=v)EFl9M_RK#e#-fmmV68bPcc|H$auOO`oicv zGQbsIuiyAwy$##Bt?R%Iry{fnC2?j6JHE-+t&rqL@`1oJ%`!ooUmJO%= z;O;{~A%AH_s8^y@@kO_Lwg)%@JN6j&674o|grqrxwnV#fmX&M%u1DocNKv527Js|@UO z?6)ff5duNb|7k#>O8|{5K%$?Zhwt5ZK;|hD@Knb`<_WCA$vp8^I|x#S0G?!;034_v zj@#o&zvxL>+cB#lw=K&3kIp3WVh{lMQY1^Uu`(}ImQ(0I53fUYJ-WE^lY=FhAI_*M zKs@FDCv^a%1aa$$7@*Gii@F|=82StXB>oZ1Gz!Oo$Z*WjJ&`AVxX}rC0}~LLd`-fh z8wr&Ik>P~PWk2su*n21lNDDy!5GPcSGWb89A_$kkJn0|ADt-|A;~|+C-+vGT(}#aO z1;nKOAXW*8ITMt>?>+qK^<-k)d&GVYK5sPukCTc0`4mA=F8go9%2oik6F)>J(_*(E zOPTa*&;YHc;OX%{p5|y{I(5{C-R6qig|yl}R~-oWn8vec`8poT7ua&?dy!_LV>5h} zz7MsK2mgjlJ?7jBU*#hNlj?`Lx6kG^l8$;MHVw(Wet0t}cc64jw@c(q@cH{ECtX`|~zB2Pu z1D`v1gcw^t*d^Kt*mJeMxfHF$sULNNF-wypmnU+ml_|_U4Z2{#uV?M&Z2fgq6o7tA zN_R>+pWnUGsrsEY~l1XzR{?T+GSbdwUo!iXZ!Pdl)=tX=;zK~T(IPoTh zYr?-PsxDawVUu`QRPkcXq&=YDYz~EdCtze^+P9j<3qZJr2VZ8 ziu@ILcyNZMc6ph54)vT9^6KK(Hyjs^nct@vMLHe&VwCas4uakOC4g;V+*^_*Hk%XJdn56UO9S=lK=!i6oT?&Forb!=?2Pk1a zPFVp~+0S6zj2z6=GryGiTp8(c+gwwX=Em8JW&v^+@~X=7{9IG#B$`&T zbo^ZR%e>S#3g=kwIBPb=89{gLd!Aw4;okheC8-eM zT)7-c7ws{0@v;g-WHo0FWnv%EIbS3zX=$wV_y@Fh)=~ris0S-&qZ~1w|53I*4-G@| zY)>8=_hEbK4aB|J&S^>NdpF;B?DoAEPd!~iA?y+nNSV67-0puYx%vx~T4;b(f3z+Z zGR_s($aT6zF7|0oBHSeg5!O~X6%pCtGnFuoEccH>MA^7 zm&XqTuznQhh>4Ii&;EMP`Z0gP`q6W3^5s(-w+VMVr;PAsbtj&F_pD>fC_>vZBfMl; zMC@n0X{D7E?;t$9yS#I2Y((9x*9O z!R1g#L7DZ?`ozBJapBzYCC~bovRfft+7Az& z*f-<9Z#Pkq!}r24W~k;8rn|(!5La89iDVg;(Xu{1<>v8GGNOF`{=QoQ&uQH97oZ~& z7=^e{5qV{241cix`vJ8)JHEO~dK2>_@{RoGz3XmVK?*ZaIAaghdW2F;D+>Ca!693q zw9To6>g#d({k&D7+ZQo4$-RAEA1}0(T>dthD|^ymwvNlcllvGbx)GoaZ%jB4xzKW` zTwBXvpzu2@_WA3V>Xor-ssgM>)qI9?z&54|P6ne`i4kKU%(jpu*-qhGpNkft@xF@u z(l1O+l};f;EL4p=S~hKN%lhtn%49|AfYU%QCu?vx+y`^s*&8rBrMS(jFlTnzgpsD| z0)$A@X{t|O9=OhfrOhjW#ZFwWJcG;=6ow_F)V?Sv_mpEUdaj5{E85L!jW~JeK(fu` z#jcBaY(|SKxW6c_;A)b&-}g+1T~E{xHcE;}bK8_*agmdJ&~d+y)taXz@Ypncng(C! z`osUZi11H~{(li(t>WV2Y{%YR5z3ik<$R$5qi!g>C@#C1>QUD;xO&qM0@50P-bu3+ zP@JMN){#Ed>HZmD(Azcw$khe^+y8a(I0!MMN@n+AD=}}zVVebGQOi|^x#Tr;CS>tZ zx$o;B;Nk=Z6BXBwpYD!H4y$>96UR=z-~Is!|ND}X8%7X!$OLp&f8gg2N{qd~k>h_i z`){urNnra7|Dw2hic4h%*}=wQxq}1+!-loZWSY${!vr*Z79ONy(bORs!s0T*jFFMw zPwcbohHtHdwc7`9{v<(u6XYbcCWz$oEFp#LTDSwNBWjxum*2bGZ*+^I)yJN{^}QkQ z5!E}URy&dauDlZOXi9K*#ERJ0sAi&7J;gaKW-Wtu+Oj*pzBt)Iw{3UfwKRo7-aosD zQJkg)bKUQT-_Lc)(U#Xg$a=1AQTf1r zp?YR2nGlC$Y=;U0n zV4{$#xAR-m&nj6C7IetE6f?s#A)aYowf9cFFK2#M^0L(M;KsXHl?sUq{1j?~sdQPs5KN~)!$Z}& zm^5%{vvizu^~d7=rM|;_Uz!sgc#evHYMOkYsj%Rx@{a@Zw=6h%GH^p7j&D>w{rotq zVobxY&MiA}^QR=MpMJIWJi6!T7?_g$kq|gHpK_1%shZTadMIrgHBvWN-M;3Ll(?~m zyx1k%;$k6-8^(%-g5%|fiRLJLM23Bl0~f>Ka(ejUJyPqDAO*Rh*^V1umcsi`!k@l; z|3+0loItR}T-zw0V7lk-of_5JB3HBN;urI(#$)xfsVV)IF69jCu|sM{*Iqx`H#>fU z5I6-rIB!8yjfaLnFoGp`Xt0=+cY$K#!FTG7^=Bna zwMuUfH&jnBdPTaAianFNH>^L0x)XGGy&Cae0$m))B8dxX8pN{+W6W5_)(&5 zp4T%6Htr_Q%UUuZlzqMvAtJ?JURhCQXLXZNLJ)Zae&Ln$s*>4Jb2_YZ_za?fq)>sU zPi))dGmNTCrnX9uR*F14RdFxIbNr^4gc7D;iv1d|LBfxIN>Od@+Sz z%d*4Qe?nc^>fE4f<8(XQs44X%7UE_giBv=w{>WWc1h7$vks^IufIWl`TpZX>RKM>= zutDfccg1CfzksbmPuI7Fk4FpX*3*sVh{|M{5SY4cCTxw#JI0No;fVvk@+DiU=fzq>rQ*F zTQ_;s%>J3ymSw2GQ|*$i=ivff`MMN}hQ5<}y(kC!veBIonEdg#9hg=31E9!$?R>6u zu7qDaGy#1vL>Njg}Os@@xxR?W5 zB%Ta+lzi5lVOCQ7Bym4o;F2N+@ElO<*{lc_%ugJf{b_prn>SJj8Y|iF*N9^%+tsBr z6=O*Ii4*}R@e9?m7wbit6%4wAN#sQ}s)Hoy4a};$iF52ww5K9#1K5Q*|vC&~@dJF2Rah+se;91Zeoi91+?r5oH=PdNVW2xzYd+eHhmyRC~Ak z2CiT;*h!f2h`v2+g%SPLYV8o8W1gw4Xplt3;kJGKG!K-3(5?SN*Vd3y*cWLN|1uYh z9Tr#j-W6`0X;GL}HDh0pR{7Gq$6M)2lt|{NvDFJi^n*eTHbq)O2Ud((>tHlNt)=zm zfV=|Lv(gUN0*O{({qqJVR{dU|O!5>PGd?jri3ga{UPU*^8rX#(LGc;CIgnZ%4y%*0 z@G`~H499sIsWdPnH`!cG(fJo$?K8hjn6+!Eb#1{SVMf;!kY+4K9fS}($n|bRo?1O3 zcyLlS#g#KZYwWvNLy5-!HK0H6!awtnjuA;-N1_Hq8A z5F-o+c{yUmo|P>@je2HqLn-!!--dOwh?;pdu@g24R8eQs;ubMUVL6wdhalpS^e1Fq zkCHxn3?pdhy-W=Y^z$`Q|kLHBOjhmirp0~Ni#rjO=B>=L1?@pp462T4< zJdS9$bB%j#ZBOu`SBNEc&x1iJ1z)^OF4lM>s zve5&Y9)a5jlKgM~Fl)azbX!|LCH@o(FTJ;&(!}Z}Bu3>BS9OK0w#F4nhNXtF$|)yGYtu)-gUvUdL1t$r_SD<^kDrG>8gZABs$q`lO}_ir7UW_hKh<%d};%_9vc$dh8FlD&cC53RRT!Levip#iEf<0budL%>1ppNN4)e*tM2OzLh7f z_cp~Dt&SQkHrnfJqIE0p7q7>mLEK15gzpehQ<@t&@LzHdcWigJ6u6oZ&b}dZTu{EkGHh4BC#u>3%ju)WGr*p@~9z$c*Wg`3kOcO+cdq`ApP{VwkkZ+ZQb>@A zbZ&tiS~0iK3=MT){E$$c@sUQNz4bEpFtn_-1H7zbKsmA-#hR0c_k`JLEFzLJV?$YZ-A1P)~`|i=6u0PH^B>{tKJOQM~bhL26k&YGeuNHOY1W$ZN6E}V50KFIEf=)F|)}5WIZU3GisOI>%wqrzyCm0L>z8|o5f+AHmI))Y@H8$FAzU0=AKjB;(?F+1c^$h8ayc*1Cvn?z&g^(Tpi@EsHnSeHfVWf{;bk| z>8%Vcwfl0iw=>_!=u%ZL|D)$QI940X6PSVOiSTgW32Sg6q`=al?PD39x9eCFl`HDr z7neSXS7~)|3NW^I(qiCy6uSTMK=HH{(Rnw`baHV!sFSPzR+x5QM6oC9K*FVr!{Sa1 z$|uB;)WGkO zJs0at*|1J3d}s4M$=2RT-0UP%`YJJmdf#z8(*A)41^12R-e+L#5$3!@g0xf$A%YYs zYm$w>A|HQ^>8SRrQjWT=iM_hD)|yhvTsFz6cma;lmXgXWudc4>kEpQ{eNoo~Ym#2= z;Bi?y6eSpdJDH0u<{`{t6|`E`5NAb*QZ6|Je<3wA|IW(%(&r^wQH)|!px6@L) zv|ow|*}sxi{R};>EqPhfl%m*-je7bU1g?D3*v3OQDC%gf8n~2GsBn{yrI1A%uw0!9BOfriWkzwWG(?h9!zUuHd>MkyM7Rszz4>ZKvbnnW)J=BVu-mlDhpk$E?XarT83={!0 zvhqVC4N#IS$cuspv%ghTXVy#fnWD8>Q0o(}(Se_NjD;{tMMPsq^Vo0HCAW*nImTP? zuU-)@kUYp2nYNYj<-_^Uv(Ams6Y?s~&KP_5OZ7R8VAI7S14F$f|5C-h#JF#K<>;Xi zO4IbJ!sDAIoEZ*}j6LWa4Ynr6yVJ>v0DH@1MbO288X5*D%+0+Aj=wa+m8cqbd!r1J zw7ZA~fEpv*8p*E!Wc=@d+!_Zc!2)gU-rD#zp2c?Q9q)WhQo2YRFnB{A!5e@mhd^bO z^JLl}xplM%h`M76tbr{1tx`Sf&k6-Pvv42jfA@P9@^40tg$h?wqX)P+r-W$Sz3 zukl3ptkY3@*^FyQ4waXOs*krjg;`CpZoS$zVp3(+(9hMs+86COuwEnq667g)&cwsq zQ|j?xnbbXqBpsl`7fGeZ$In?fg@MRqh$7HKS`adOEurL=7B&xi9`;bZ@vagh?wSDJ zmB+%5m) zF3v!Za!`KvILUu$Pfo5qBC#j}Bc!FKFqGoE&JRjD#3MkRdqlEH2N3uoq4Y-zI!kQa z#9cIeQiH6lT_DyH1ji4QWvZ`B^>%Cf_aC99cJQ@Ld!PV5qGP*VDxKku8d++O6mmz1 zKmKa}a^C{P4lD>}m-~SV)d{^D;lD|PdPV;?e;rOBK8U*>-Qan*%jXFIV7P<&7#i{M9;z^&-wqbAH%4j|84r(fooB?(rcN_zdLUgDIQCX;D*Q;4dV74&8^xXjW?Y6Wt6P_j8x za)dt05Ei!Kxz9`EY0bTpcM>;mzxQ9`p=zXi?thlhi-Q;s$!kxB6&-E6XMKLGxVemQ z0oi?2c5VNzGE`mu@jBw^bTOjI6ADWZbcJzsODn~1Wp>H3E}7Xj?ZiMS7o3G(mu2Qv zB2I_CfQYN=fHT%DDc=_%>t|BwZttjp#403~JS>E_iv4Nu?j`bglg`z|CMt1NZX{8Y z;4tza<8Sz7Uk27VO12E*O>a&xE-4N@sM(FZK_kP*(IADn3j+U#NHP#TvX}gR!N1>M zfc$u$+C4Ls&NOyb(@n_H9mYSqobMWBxzOS#p*x&HFeRxBp>Sfm2ZmF1sbYO?{-TIn zn?&qJuyAkR9$aW3$=aQtAD01jBY7pgGI3QSH3^)vqn$GqC8o_^PbLdh44o2+c4YpX zBHlFeMN@A%g%vgJOW^y9!t!Y=>6O&OHFzkTrcQuR)It(8XWm$CyjCgXlkK3pu!>IC zh_ydhhKgZr&o>e`H%GBsH))K1K&Nsm+smoG3ATaV%ebIq9qX6BIO`iF8S$Vg+4%ER zYS$r({bj#zWt7tY4NU%#Ry_PEC+pQy%F2s5q(M6327Uv`Ea`$cW(TH_Y3d_)z^wKHn8)%WO29`z-Vz+#P^I8@2qyWcg9-sh z6@L;u;TOxjHO0NVD)|4?`@hlO!>duYmU&Z!{Ry_eRh=Bk@BKM6exda4BTn+U_gl$= z-WkZJ1xu&vUTH{=erPKsa!gjA!^uuZB-a>@x%$iEg2JZ8IBOtS5oubeD(cLoJfWB| zj=ZvnlYUB&rxXvWy5bQ{BDT zh!RsIi;7c=>P0h3L?WM5G+jOH5Ut^qu1>IAccPH_#m8m0I!nbKusZPvQF?#9yn2NH z3;u2K8h6U8*)W3Vjs{VuieQSH!a6^91Gq4C4E%%xvk7b{KFjn=)>zcoH4#kparv&o zH)_vc`M$f!y=%!3xWrxR+-z5On8<(+em;iP3YTuKYdLc}XY=|MkNGCtzL*}yhcw9p zi!8FHD^J<3XJt*y4Ras+ZZEu2%8wgtf}`naw;u#kA5tAxJ5aP1z&*3`;i@{n-AZc)t?Fu8HjDBRChD+T-dKa*?G&PmV|) zzq+PHQ!l2T#Nf@ZXqNcW`8LIY&|;YnONU@i3c5sv;aIH56$iXhLSI3?(WBXJ(>p%X zdC{+5q)5+nc}m|cC49sSVTQwMSYUV^9bAK5vgq1m5R^`&p)=DQY@P73KF@D(x5LKx zHXRAlTb-QABBL47>8QNW&|_Y(nuFNqW@qBZf!-36e^o!>0KCQ_;}3VkNFQYRQa z5whONz>jye<2on9%`x3S3AtPKv_OC*?ualx+HLG;_RC;isRJ_c^#_ApN7D%+X^n4n z{K2(5(fmPQW2pu$9qqU##lOg(XYW48@|#GaR<#e&%}s!xe+Xw$7(<3yd% zZ=;%CB|)n(B?UZB(%PSU$4j|3r*J6>sr%qdKvk(Be+DQWQR;P7Yfk-xO}R|1Y!z?* zQ&Bf>nNj&_mDPUm9fKm9q8R9G9$`T}&+T-R^tF9M))x9i!dAW$uEhgEh zhVRbKjixt^QH=)#(RZLDfHZW(sOnzIG53xRS6{e0au}?4`4kU8hN+Yb_+G|b~yHMaMyICXCkTpgcJZD#1w6=m> zx_X4>w56fP$GlR_%-~kd7-Pu_W05IFg;gkv-@ovPYeE4CoY4lIkni(3Nte7;>Qp~)`}+{h2232jhb3rGz*RErK3W|3!x1Xf+M;MaEZ zg$#9WkBN|42QwUPs%{)cia0sTzj-CdU27%w;@IMv;rdFDwA}SX$2*R=zbI}-^bKE2 zR11)RQ+j(u#fYmNsS=nGjCZbGjqqYx+T%E3rM^?B$s@tKcJ`4}leS#boX3iFURHjJ z)hT79H*bw$tVG#^h+7<1&xe>&7B{VuC5Orux)057>RNPKij>FDPD^jZF4)t!G$4*z z9L_%48*KjVGFRa9;rGiJl z_Y~>o9y$ln1@jR6@@XmN=CZ=2we`>PXt{a19S!%m{LYqN<2;dP?1`=buDf9ra(Mo1 zj{W!nuuv?$@u_8F?Pln*H=7<~wI=_tQ=jx_UfoZcH_hK(j-spV1=#$~K2ld5d+gb} zU^VK8y_lXLn;I+Vd$~9BMaQYgNo&(dHpa#2D-XhIaTzY13(k+XrLuNozCh{9njED| zqb|XhUP^bLF5xxp;OD-#`ZY=_RHQ^Bf2u>}bxv!K8(AsRDTmw&oj7m=O zMM6tlO4STUYEUn=-g|Mf)y@DzQS`vW8=!RKUJohryr=*qa2th+orqe3?6(H7naQ5+4hPWH+m)q{$Nr{C_Lk@PCh!XY&VSA3zK?qIzVHQQWHd2J|*C3b$;11tUP z6z}xCEsk6(bko6)TSoLBaR}8!3qK2Ja8*8%?n0%Hx76W;&Y26|YL*U8HPL5v;y#_N z=3E&4mMPItsuTE7CKy(T=_k@*gS12X_L)Ay~;|i-7)u#>9Z-D%3B+GGQ?}Nm9o)*#s$PCW7Jb zTjC}W)T8#E$;SqX%J-|>wyoACECBdLB4r}qW5o;qfq%;DIqm0JsoGRu&c2!pF0QWG zUSdtf=3m03lINlHtl2RzetVJLOrO4HrlghzkCI|T8kC4n7Qi5hrFRO1q~WQ(ByPyn z7&_mMgE*99soXrYQFfypYQYz~Iqz$`a>)>6GJI9521KPM_B$eJb4ChNd}Gn==vHs1 ztoByP`{TUrET5v;>-k(-gvP*vm2IGobNjs}XPFv$>nbc{%p3-0PjP zpM?xd_pOa}f1sl^zkJs@czfIXg%-?}Uv4f!>+`fLf0*0CL$+xNiQ}^@JDu|m>)eM* zO<`*7+hP<8nhFWV{Cqpc^2Hn1MdG8Ee9U`XY9HUd$RD~1FwL{y3AC7ESPnWR*rX;l zZb-(&(xf9#sPSM~TNB>2#rf!{Py}7n@ziyu0xVsSA(vonRqcQ;b>2rGmxEdF%~f($ z59Gv_bcj@jPgNPP^L3v+_T<7u04SR2017yJ>fmVW;xHmAjk;d!H(}hm#zU(@VV?z} zX1lsy_jR1^xb)sh*5toWK#7Z zv&*?JJYHj0kF#EET!Lojq|#9jkm%;v3dWkh)KLx z?19itV0-n$lV5{3B3U*u3j0$OVhU9UT%xZP)o9y{-d2 zbR!qw>p~9jO%hnFBl?{G;I!y67~)NTklemp4mgJ|K?u9-f3UUC1BOX{q5%GnKHO2; zOzyw!m#se{*r>UoGrX>Er<3 zYGU^&Iu4{cz*fW5IssKKBNLCQz06QLSu>O^z#(>=OGigXU#RD;P)b$GO3_*>o#MY= zvIM)8X{P$*kxL=^*Rq9Dti5(5YY6&NLzZTLGyoOb)}A}2{BF>TBsYNmO@Z9NFJX4~ z==R@?-5Z8{Ga5ksu!bTHpyLUfN}!;+tY12o>fUDhF7|Wi7y5ww(yQOe&wi=(+n>KK z^Z^C@tI3>vfh*~$pVm(kTYoD^+3EYoqyM#mj)3_evWNm%{`Cy;Uz$Mp_EZ2ifcKR6 zc1tdJ_cZlCAdF*{&L!FlUUj8_Hv1uqz1fq0S(zbdKG}0x(k5=73KVNAk7V!Cs3;TFQv{4 zQhxI9{G*Bb2spC9^qu?H==JBM(w*NCttAg|#?Jxfwg!lOqXGC&6VL%B(70H&35*2s z%XWNvf8?$`b@!p^zaE*5Eo$VWA&XP-l-?RK5 z_szPhx@_Bqi`oWth9lzd8TMLNL``}pW>e4zDjhq*x>M`L`Bn(ZVNsUp%=}t3k`F1a zDb-_qBxZz#LOn3x(8K!=+4A5oI189MRk2(963#9fUV3(?DoRyzQ}oW+50}(bj_m6? zbfIV9$du4d*b^-)+YKo z_iYCC2Q0o(-DN6qf_lfs3YA&w&uQ>cBnxH#0ZKhum7MLEajVb-pR7TVRp9fmO97B|bEDntH(qrN^YfYEE%YvPv+7V*f~%S$&mg8`&G)HQ z;4k+pqEl&9^?Ku+wlc zGDJt|jMpuVoVGz72Laa}6%1eJbSCZxry!=>QgHn#$H4JNxe3BS7f^zDVY(Ai&ukLt z-FTxiqWNnXGDzjl&PRk1(g7sK9Rd3Skq@MSCzLJ#^2w1!ux_; zSI^sIZ==uCkR5*divq*G3ef@`=lZYD`Ht5j;)7XdSFh`39LgV0ihl2t0yZ5RulrsC z2R|0zlT58Y3Mac_CU&Vnyrd`u1%rL7U2y5AipJ**t11$FOrOC<#xDl2L``0n_}ad6 z|N5dddwH+%!{A4K6IUt*3yQ3g2SsANYCbb?U3Ow-%;YaZvOk`lE#ChON76nMRiFSP zEcNGPwHU7(?0#~qhKp>2oYlWL0MRn&H37^zm1n@tvJj}=3_b6FIvjnqcV)%&*7!zA zCv*2{iraBeTPiMlmkQ#<{hV*6Qk`BCjpw}}=84)t1Zb(~9RJh1x<@N5B0J8=aj`od z!xjb~vm(R`_7+HwkV!a#QTqO~^kBQ&q6uYU5Du?^tRHC&NLb)WbIfV)y7wL7H#0?c; zv@aWvV2X`Q7i81y3Po-m(yj(YL+iVi-kkSH9(TpSKWAU_?Y1-7ZOzeAL{-* zoa!$6ABT@y5|YgGP^M%m88RLvQ<5q3R0$zd=Fkx#b5vv&%1knkCo`Gnd7cU97>M+H0@9_Vik7C3>x6h@B{$jm_O(C(Lk#d?|U0 z)#rcXRcfOkURtxS9tc6eiJx{Pup~AZQTIP(!CZ$i>A7lgy^{ZkZ4RxcWqPIruz2V0CwrmZr)Q!Z*|$q^(<@ZB?F_>=$H#W zm`v7NYswAzKHHQOIGYQ;2Mp*}3SHHVXnQP2EtV3&QDB;7@x&?5Z??oev7qH;$>Z#% zb_ZIgi6G{-(P4vNZ5i7G?#Y@gL-JYKC>a0_+vjJ}U0zFl##{M8(X_PW3W3~C_&mK% z`UPql59AS%EvLn=zA%SmV>GQ4%a{zp??B!2x-B8`#Ve0l-NOo7%%Unbi`TeJN?Q4g z0yi3*qnIHchs?8v{;2gyv@ZV@w?uFw@X{qZP1dxx?Qs1liEB;#sQZKcHBk3t5Zp|K z!m6Bd(7$jaAjfJtUPZR3dxw`(tD9W?x!WZ-4y#L>&mOIpvXP!&r_YaD?87ilfqP3l zgXLl>J>;vJeKTLH)ZwL9dfc}nYHuFC%XP8w?0(T})`fF@c<-Y)-T#wr2^9ZFNdLdV z{D1%7|3BUWLNoBaQx?WM{qSx5L8X7LWhnS{lM~3k`dDbZ@m_#{4cedzP z=|H466ue*=VCyDFnAB7_u2{`~gJeyX&ab}XR8~a=SL6A|>a>D-`glP>6 zR!d7}`GSl#z`-cM)8>^dy&{{O`17Ha?&j`N*8J0{TED;O#RuFuaERC9#C%f+N(dIN z3DArS$lVDh4R6}eWIo%$F36^Ud;XyIiBSr? zOneyB5`^e z%xL5tCzXvyP97wWDF*AiAI7XFIz}|yOV>B3pN>$NadN!Z<$dxRQ_((@hf9po>2z9t zb{Zn|4J(z^4Yr*yt3^9wHhH^Q;NQTZsdRc9(}w;5nRcHjdG#qXHWCX3J+Ca@h!iiXu~ z>?S7qwbsk7Gt^|-mRWD*D_-HY8IOY2yEwd>j48CiJg}wu+L=pVBy%>d5PSF5T<&^l z=heJw5CWqIw%7x1LSeql2!9Lc)-t%xgx0>AS+`!gLG53dQp_R02=0Ps?!Nb^(zlQc z4ps`u9vyz_TYX<)vazD3yV5BhRW{P+ zYD1cV=*lKWi6&AB*&DCk6mKf>*D${~6}9F`BRI{-F?P?>XnlPHN%+Ssug%S`|7QB& zO5@j1w&~YXVQ;cJBpuWP5zr855!?%EUZr#^AG!EqqfmF}LX6e*Vr^}Li&>B%*YGHo z@bI{h&%2c4D}Fq)`wB@?odFT|}15$@cje3ELC=nxo;pv>-h8 zHXV0^HQWMv%3y`~zEHb5{p<3Qf@nU+3bbrx37*zu2me`8p7(vMWtxxeQeRqbVmuKQ zD;mQvTO;mz8&v}k1bg%rYGqBru0!KXbeH_O2O-h^krli*b(wo&_*jgOQ$aJCHJ13G z!bC#NE`Rloc7VlX;iPwwl7ApS#aMmH?L=**pbGsgcem_!7uZSzEm-eFvOKsYcID)l z@GC>==ha-x*sGB!Mt;>kt(9iEL8>RdIqf0!Rva6=Z5qi^jY9|e^onwjh%|uMd*5v& zP7P8mTI`4iQ6ee3B3K_MyA+q}Ta1idE>$q+nr}ls_iUlRLLAog#V&?`8;Uk3QMoE~ zVZwyqEy25o_2FE>HfL5chgAu9k``fgimq3hEwmf5R|HR0s{Y0Hwf#f$s7dr4KArr} zq+Z5s#FfEOL`?m%u4Bf9oW39W)_Jt+weKq^e+ifvS+-8IvTK|^7nc@wrIB=(?NOUB zl$onZ#QH!R{bWP0SYe~+L|1NMe!9NptI_cW_Flp61_o7qiR+4(&dWCi@A){Yx(Nc% zF{!K@w})+)c74iqvH7KH5pI)(SgXWOK@snK%gTo50u)$xO?MwW3y#y4f?9--Xmo9l z8_QJnIVnq_(0aUW1G~=QZdMcpodSa4B!co3g_E(sicm96^ht+Z^qiwVl($83KD3h`ZM4U6A5Lrl|O)h8%j=WAB8fN z?TM?}B0nC!*UV`hyGRrM;7qJ;9B!OBVA@@la;wuHW>KLmH)7RpHt$G(m3L#Eo!?!s z=i;a(|NT>HXNjxA6>v({?cV8cXf0v{<^9@|+sccJ*cPVOZpSl)r%UcDgl7lSbzL`C zB~uvX@w(TwxIhaWM=}G!mD5>lm!5O%6dbMKn3Z+t%(eC?mxl^3=Bh36+^Vgmx3l51 z7%kP-h^13JmBEWO$X)GXy2yBt8Qp3ome{CC$y2yhBVvIza?vCkdGo%UsZMam7Zc*Q z3=lb*C?aNQHDhsC2UIQsn>STKa(1#acR9$3vAw3yPyoB2bJtHjOfZOnE!$Jd8RxM= z%ZOD*&}{BAkr(&6?+x18*vz}$CmS6#Xtn&LL=(y8{zisSJcSj{{Di-e)V+vllc5WF zClL5+=h9q9Yd@}p*u(HPExK1k%AEoYCW~Uk?~%q_=rGJLx=GoS+HSenESgO`EY(3lUw~j}DiYvu}^;_UBd_CGCF8i3L$DTT-D>m7ilto(S zo!t;{+#iL01Q{2wqDQ#%1KguvqqB*JPmq$_vm5?9S?*St`~DF=Jdq5YC4}4g7@6--;a*cH$j46g%@cm#~5Bu zzM*A1O$R}JgZxLcPWGY_5l*Ner>v$K_oeMtPq9@k5r<(T8HVEGIuQ~A*QWUSu*&@P zd&`DGiw*kRyf&VUJNkP<7tVi5P;z(W<9HCdZgH-}Azy*ei1wP~*>mP2m0kGRzj{j^ zg?|)*z6rX&vo(vcLQ49FY%8;BcCq&s<~ZCb?auoJO;_S6g2f^a|>SoweL;v>+VPZF-rLFLR~fO%!2DssPhFO)tra zPPr|*+1;U0F5|MVk}FNW$WCh~GHwqg-U_YLvf$%i&rZ2ySk#_ft=TIkdqIZOq_lqe zF{PqLlz}EOD=MPbLm7#G4bkZD21{?s)SDP5mW`Y_b#t^(>8+kZtpO|TsY!ZDdU;N} zB$9W~kFMV!#>cv{+Mz$~5>~?Cahv4{tQHCss6iWd79^EQxPahG5!`$z&^{e&)ymB! zMC;^0*RdkV8rwu;4p{AIF2eILQo7 zYgL)iDx=`WLNj`bIe?l;0#Q_)M=_`s5zSFtUg*{;RMaL=c%@c_mW#)xtV!HqqMT&S zYGdyZ9CpKVutJIXVJ^GyE!JWAii63$*YIgAH1Nm>^a7Ui=_Cl#3*g4KVPId7PJ-cF z10yA~;xNw>U=mB9yF2hkpDbelN$fm4v_+;2H67L9|LM3suK%4~+XKA=EX)SBu>}iZ z9vPh6Isii-7K?d^y1b6v#oqgxGJd#$`I3mp@_>DV$bxm^vCx6NAS?lgU)y)^vE)GO zF`D|=f$AgZW8wj3a#ry$54;T>gzoOa8^cVPHa1`jOkX4?C%!@2vi?U==6_VKnYZTt z4YG$l5>7i-5N0)b+&t_*n|Be2=YQ)C^8;{Q{&xdMse!l6zqfGmA58qu7V2UL;YTeD zJ8GdkstU9k3xaHeBR4w9V0Sope(Qc=7Y0}tD8c-x6?cAg1O-r@?|K7n#H>KicJ6c@ z9<}Sh=!nbQ<8EMT{4OZT*TIDE8*+ncutbA}_Krf66hH*mdnh-< zP>#+MT2~TFo17nuZ8uc`JfyWH^VT=WkKV)%^+6Z0v)FGC5p=&?Oi(#|Vp#1%@_HW7 zI!XLrM)8-W%!6V_j>;?^m+681UY(VOOc(6mZ0gp=T8<1Bwtn0WPSnIZpai?eO5i0) z^siJT02SCGcJ-J__hAvZUg$g=Jd#NgiOla4!>{>bwG&!g#Ju+nQvO3ftSzCSrQoRW zNN;h$S>eC324Aa^2du%5HGT9+80H&f{Cn>~wpISaYHu9a0zF7P$osC;CBJhg`#X18 z#t*CYZg_lMxMN(lQa4cwU2B)edQ%%8>Hn((BkXOu{duoBGqbC#?^dOmLJprDXTd>s zVyl>kn74r~tU zL;r(N32OZZAyBvc?++kW@_!JDiFY~LB>N5?0(TT-Dt5dNzI5)WcSk|*_WVchs;rGG z0fEDI503aRo^vw|?)b-}I>M8>GaEfB)1txY7bPj1F2-^xG3;yYoM^qV>QX z4Bw<5^{4#tltiNZU({eFPSD?p{r&{m-uRo?Y8Y}L?on=GG_K<1E2N(;1c~gIES*%u zWZ@pbexsXx5OQ;K*RWT8X2q?G`kd*@kN74IElI>MN#G^WvkNXGz$x0y!>viICqE1U zQpR#b+j`=S4XWKZm@9rMd!dG2C%IY@tF6(LqasbZw*fp>qTpzJegM1IV z9}y_-wp+S3pSp79lGWvnq|kkOVGn~=`Y!=KU*4%UNnYiHCk_xXQwE>{yj5Lhi9Vd|kuT)>L6fa4`TMS3EZKv9zSNTn}saqq&lR5n1=>bwv+z#{!9LDX%iz zA;Mu`2!Dk3=)YM4EOAo>tQy3XqfH_@4(*^}I{9(W-32w?lk^P&_6i@E(K`^`GQ1Go`1bnoDUqF~InYhvKv`T4_68{J^YMpTFqv1D8kq zineq6Y6b0b_)aeOw8|RJ44l;CiG@(m&^+2?R=i!~CgQo3E#2Og<@cnMZG^N9%}OoQ z_E*nBlFZ*0b%rtt6(Ui29~!dvoVCCScwyq{2I}Fv;*QIOLmZK(U$>027T9vScHrVX z1Nh>UnZ%Q5^ZF7jFIX-;-%u>ZPF8j232y{o!ak}nR5&bzOqfRnBTGcA9BF{?-q0< z0WYY=$YnkD%TPgB;o6p;!y+uJ`2g;oAkP1yH5FaGTf6@uY&M=qZlF@~>Fz+U(WALw{_ zzIckkEiD*sH9Nz-;g>blE>a((6*9w5Gi347H55c}3bq(&k5yO|kp8OMcggTeM|h+r zJ3$RVthNf0_-j`~L+k1i1P32ECK5iJAgMyMr$#8J1?(!ZC%&F)a1!V&dW1D|^b%=3 z$cy$uB|d*jhbXC|uQEec%1pg3?NdJ$Y{bx$BN*hUQ!@2Nm(9gY;4*va#Y@7On9<^I zkVlCxMh5Ft{FxZ4yFK(0P9DGl9ozX1j3bELv^qL0ZnmGJKGp4&wPos0|74I9VLK~T z;((08lD0F?RFyPMo?$$>q26cXxJplx)?*$pON9)u_P)O0xtSjF@pDrJi&+rfm$YfK z5TS(lo|hy%<$b(l>pr%l74&f(P7;#Tt;R{gT#%qH5a%56iUONE^z7Dmif zJ+|9f?n;)q8pAEt!TU3V9mzJ$jrf%v9E{XtR${c1=0wf zF>Wzp6aHaDzr`s_Op)7c5_xdC4^1L49VFgx;y**ktwO; zmQMyUa;=oo^`%d^IBN@sf6g=&pjKG)yHs;?&7aN3H+4{oa?~}q9`*>M)Agb6Yhkls zJl|%|mka8rBGyNi=VGqd)#@5%Pt)R^y*n*>zRC{UpFn9$So~o`{)L8tvbDt32{Ly! zLqjL(;4d#}+7gVx!DoO(Cr?tdgMJK2lM?JK`u?hBF~T;;1pcW%xqG9Kws21WK-Vzk zaF#!MzVPCwY(ItZ#<5v8Qh-dxnSR0R$OzLI%#~tsb>7k*;AinvO!2+%n{)43O6V!o zvHXw{;Vo9d=vLw+(vtRUsCnW^9V=mxx*1-dCaW3hK8$yl*N7R~)L*GHwqC8>+UB5a z>}-Z{EbH3M3m*;k2&yTUdWV)r*qiFB=v0W7c13n$lp|#6#u|j$L@tF%!6X9Ig(_k# zv+Z9!A8YrCM2AgHK6g6EJH5g+JI>c?fM|Sn=1q#F8clGdi<$%oa49iHd0rrlT29`H z*H38Y5X2<+@ce*bKs=2{;$Ut_s7;lb#Oh%uV-y{&M4k8x8Lr4P&P7~SxRv5ce>y0R z$H!Y(5bBSm>yhAo_b>vzb+ar;9&xxHMC2EM6S;WTDx%tAUX#$bQ;~ULp*kZu2(L4= zQWs%d%fuqZSQ^xja3)c;2LomHH&Qukp*51qOX12Tcv+%#?h8#BK3}RZt^QvHQ)mZ9 zRN1T~nF{^GC)!>`w1RIvydrb#-SBd6Yl$m6ejhm}qWGvHNC=Mh*?8HpQHTY;ljL1hyy_o&Jk z@f%9|FhPCZoU)Kxn*!PPLe1x*TZLK~(p8Ig*pFHupMfF%)K^GTs0b3JJ)Kqb>sfE4 z7jK0MUr32De|eLYIxxiU-Iu-4?9_`hLVG1v!x;ANYzzo-t1HF&@RHQ)n8%X+@rBNV zTko^&Q>1NJOL3B-TE$ahL|>4VXFV&i9V!H~&L(y3q@S2bjkTD*F@eX8uIz^XuqaQ; zHUi$N2DC>GBG`aS`|>OL+}TPVdG3ORDwiok4nt>K))|fvYCmQpFv-5P;AgNEm^>{O z2JM8@Ek3)f!f!9ibk~S`W&cQ9&+Ja;zA^tWa&3AtKukEwae|JCrq9OqUFgP|s`pn; zS@N_f;XSka_2r3|=5OORNsRtMah=iZ{sh_!CCF-5Y|f z;%2T1lINA0wZvO`cYWHn^yu9PF>nDWRJ36deNC{z-=Lcn9rZyU&97fxUQyk9PeFDr z#DSito9Gpn8mTjX7`LkgeiP;PA%J+~)!F)u@YnGdjx9gEgkaB27p(8)<=g%SAsNw~ zJEsLxf4I-3XBZHoEtQpD$UtTYs>p6pw0QEGHQ!K zA(iX?xi;U&!&h0kXG5cys-;*Amgpj*GFoR6|1w0|zoUCoY0cPX^>`qzB7J0G-H{W@ zMY`zS@bpu%my}Vz6vOh9b@)4UkTG&Sny=`gU|qRjD2-|0fW`Z(DuUCg77F~O=|jTl zi2QYM79tt2J8GbFDX=8Zm^6CkD6_neFiwTZccLGMT69$NEh^s&KNF-|E#F*IE&L?ZoX%WN*$ix80oCfETf#0FK zSaKLZi&Yl^ zxYii0j1dP%6o*&7L2!Y}AE!>6JbJAc`s*!~A8)Pw^DU;IZ}Fv+micwy$GEAVQZ-Et z_s$GcSGftmg>LZ^j}m@4f2&Q$Iwmh2E8>+{3~i`Rq|V!#R^4lDV&SGCwx zZO)7PI%6ovRJsVYWUj>q$KG0&{Z88+Ndaa<>W--czTb$=6e(p7!C-z~DQ3ArDRE5Pp z7;w}1e6Vg3L}CBlXpqm&m;|KL_?zv89pjMl{-HNKQ^g-c^7Ct>*=|SAVbUbP(1bO9 zf=yeYv#{QYd;H+slU$=C^zD3qOvO=pJ7o`WN^tFOv1J@JfA_cMo7WjrO2`Pq(;D2| zP+1mcIuraC1v_R#-#IioptZ~UIUORanOvP;mnVQN;vbD=M#P|xVse{fPs(G3mbWz- znxcS!ERF=UoQOUa&^6$~`%64ed?|3|;fNSlP^|p8m>?VxjfVYrIy&M{2Y&MUfKZx0 zI4n4dihm?lK*{y@moh+5iU08uNQRgI`s6s^{E_>iXysnfN&Q?Bz{LxyNM7XCNnD4JJ{pP*Qe|tLzT$b>T-P(jGJY zqZ)8VqY}P1C7UHJzlb=`+_W|&O1V_WB(?7z!Q!}Fnvp_43ibeqmiGo|P zqmuF%4x^(+EF2ZGKe}aD*;d7W4XZUtf^|74105~}2w0#TlDHsQgzd4)u2QJtzXF0c z{q2>bM0mFfK?HpZIQocIHfZQAkKf;D3`5@%{P_YY~gMfuZ;4=V}m9|K#} z3M;}jhf>M2UJ57Jk=&MFG=l#Vg2p6?1T#jOuV5#I(g*!!Jx}HH?)}S;h~F2J^ljrO zNugVvCIcq;vGsjd5C?j__``GHkk<4y5$rV<@#++fQI-7dqNU;yLfb+0opa@deT!r? zXB4>RWNEdg9NxCfN|1Iuc{r|D=V>ieRIemd*=!OaytE>B3aa`L*BinA!WN_EG~F(v zUSOpZEjZd8Nkfjqp39~-Bm(eN!CECRcHD`%>+L*?Q4^hQ#d}iS7e9QxVoocu!yIqj z{=PDSB_A#q!cttAFw`hO&)4;41kwn35t@?zSz=ZN#GD9^gzck%DZU}Gl>z~vLQKGEgAV1MWxMBU4zKXdam-b=-h#mx9P(P6Jo{=`QSk%TzC)*^E)=i+1a6I8B2(IKQ4xHImQs%bRE5h4E0`CYm>} zvz`SeH<0vJV()j#M_L#QNgLR-PivFm8`gQ%ht(58Mw3fNZE1{Utv{?p1=x&er82(r zJ8@5t<;LbaGhY8MjgKJ)WMVrxRjl<~Ine;{=+6DzPqd%!&26>aN3b`ca+;;h5)Ppk zaE$`z3MbE6+8IyeW=Liad^G-$K>R*d&ZW}+Qw7zVk0J`x5GI9;DdbLkjh2%5WC1Br zQOlj7Aa2T#u+BIJb1%D7oD?{{Y6jSl*^n!S{R_Q028G@JcSpN@p5M)>%{Q%K-N5Tq z3)gw#Qcpm<8e7pWX5|=(Fwh^BtF^lNPT;=c8C3H*$U#+>)Hq0IL0 zoOhibl?eaD^M1}$m34f9A@1c{S~kLMu|EKA(#)LbR@C{R8YqFFIodYEHB1B9=`&Z* zFG$3wR3D{sk9WKx;{nx{(wzK4T0FBX(L#cByIwW=@@$Sqqn_VK4>44ik%v|{Mx9Kl z^>IsD!-s39dl(el6iEjk?uCw}vv`BWXtvKR_l6HMh5zSq6O^Y4rsfh^AojS*&RdPO-Q_)?_1kbQF#wW_=~547&3IuEI?ZCo8?c3=r={}S3a(p zj#p=gjVVFH1z`JvtMQKk@)V%00huZ;awB6wmb*+PF@6W_6st zVdQp+)XOMg)WXcfgsp%kv3N2-QISuSZk%ytZA_2^clJ{%^hO^1C1}M|wAQ35bqTqp zu*UdG?F2{pXMYIFf*Fvosf4sxZxqwCwO?XR|i4j zlV`JrxrBO5z~Z-hXVcc{RKI`g+t^dWubzqX5#+8wV zf}w{AT!sRRI{B4m_na)8PG_2!N*S+zc(_YEHOmUMM4Qw;VfJ=>QJ8pUY2>NT`+D7! z{9a!5krO(4`yUbW1|fH>p9(}xKX*EpX^D7kndGKpPRpRyE`S0E+>B(&|*SJj90 zKX<3LrYqW+1R@PT#j+8y(u>S_OX5yxJwx(O!+a~{**pE6s+5!~2iCN&BXUb-6l{x# zIqI|R$0VXczh-AnJP0})-mE!76WQQN`@hGRc7TJ~<#z{fRWz96*tQy&QH0=UmWqHkvbk z0IX34k=nYwX%9lePv z&l&C-r}R!#kk>PjubNZlxP*{(E2%*3N9jFj_^u87F9CW#)EgLQ$AkvB4Es#AK5qm3{YMiOOrdR%$qqD>#lndyXP_OUoIpUOL+kCLH_x3@rPSz zhUHKHP=bg5^4dR(9X)d=B9+T5vU5d;aj72tr80i6%hM_;)pbC&4pP_Eg@4MaLi$Pj zH;C6J3?yQ+(fj#XXgFRN+yu6sz@%f_ia<0cf<0Ay+QTx7RS_(13y}Xnl%HmSaR`lxZFYq4Fgyl;Zh4iVLVD{NEiKjy}%;U%+&YjldW$iGqKu z8T3iAT%*(mc49qw1yhSYmMPiUU?wlOcylT4xi)RsqqU&_jNJbTW1AskJ!5!(+Gi-p z6{UdEON}frca4*;>g93v_Vz;WNh#gSDr~w2 zTRsKQ5XTs1jC;w!2JmZ)`O+)cYE7@={iWj^aUplnSu#7xmbrwv1jQUf60GFt;V#%x zw?hm;a)Q4;Pv#d5Ih`JolA@ecpBb$9@ zIn~I4^Gp3Rt<5ZcHH??LwSztM50loP(qa@5{=O-Q>UWupmga=u-rF2%Va(;7ExS(M zdQV(B^$VrKD?%kEh$OEBH-pRNj+l`BmLjUh?6hTsZLO7S&GWJAlvA@w5N#>keAfid zvI^H5D#VqhgH)Mq*&gsHNlp6-XW?fc4+(GIpZA5+Hg_^M8!5}pov%vFjuc%;I%8&wTU*@IMBHB= zz~I0epS@nKTp;!-q7rfpNr7X$m5$hmE^{8@p>|D5w&F=q<|s( z6_6n~a7Y-y@YxFH+#l%okIrmki&RYIo{N7G+e;Le9-@OgS(?sVqa>Usk!Q~)W)a#? z*74V0@CY5RJp(TS5u?C_FQCMOnDYvkKC2(o7 zcM+3ot{{AK;`*0soub!O4CZiEx+SOt-905Z-Z@cp>DS?X_DT<(YP`xrtH`KwUEr!{ zZe$>2VH&|6U&;4zd_taEA?l>vig71x+gPs1$44Uc3Xr>Z2AN0$H?2hZgouW*9mtkq zx$A6>o-0lBU*dL#OIRSTG&w8((;F$%#Ta>L`V9i~&8z=$fs>Vw6>oNCX2SysVf$VB z9xuW4Y_7^X#b2q^Ea*w&#LvREFNS-_u%?^(om3Y%H4gS{VVVZ9^nzMY3>!JD^AwZ= zoY!PPOThjq;p|heJ#7F0^8#5DSnV|t48mvA7JD%atpHy7$G0*+zkP)zqlGU%XMP1D z>KX7@tLeZIFM-XFV8MWm1OUU4asBaaA|f0RkwSbY1@=1>lUp13bCVcNY9e|^ZBot# zYbhv2*qJHCtVf}8f!s;~WJ2L)5BSe}2ZY=5Puy!(>+cer;6R6)PaQHTYiPUj@~}o+ zh%x5Vu2Vc;??b3KKQ4d!%Bv;-Q+5{AV#csJz&b0BnS<3TieL_E_P`#FQnt2no)YZ$ z2|&Qz@6;b81_)ct_#&2Fp~ZAt8Mt9Ggt^T@pP=DH7YAAWyx~sbbs+;V>&1gp^G$%guO`pFs8uwBJq8+mY zo#sfyy6XQbXOBIP4!4??B-_&IpXg~)E(mTF+{8a8-$ka#2?6c$!eP^*ibZZo_dDwD zD43Dzk50;bP@x?6?XPMRQLnV1P``FQJH5>;i2n5hg`C~n0AIX4!X1VH);q|U(&Fq& zjK;`>&MgCZF&;1BfeS+|_#0fzz7JuwD!=pJrDmY*C&e-1+A5ttu6JC<=Ws{=R+n9II!{*)Vsh4us7)WDoJzRqR zU1!+W*t|WklM5q3=DKU41B3(KPgqi4K3P)eyZBmvR7oP1Qi(CYscih?RZYBrslSvB zfbVy?AL%u30fAs>&bb))u!!+iEARvZp+!;3gP2VKyYNR37-Kj}Cbv2!vFPK`qihRb zjE5g0fQ~~fVGqV(o%|SAfZ+U9SQmrVQ%!}H7V8(HWo}>mphowf3ai03vaZd**Wl}<-02u zkBO3_IZcLXB;irsa-{dDUvcZCt;r3;l7s7_y?NVj5RVctqNM5ms3Bx+BUoMO#abmx z1Vs0KbkPO7iC}G?CHt5Gpa?SnKj}6Ko8$%721LD~vo;y9ajdzh46_odB(Jfxkodq6 zM^3ewgPJ&aH+JX|q~_QLwB9r%*@VaiO{A(0;J@K6P$5Oo3YRP9FCHXamBj{glp zZ3IrD2&&)x*3Jyr^i?HEH(K=##YmU-*9ira_@hGL{GyJ?>)@FL8Jjx>)9n3 z(lHV7zS06b%Y1)!T9I=0$BYqW~e3Wq1KCv_u()?Ii%_3H2Kjw@>_#h&+bNmJZf+7VLIz zF=Gs!vko*|T@J+bdtJ^880E$dkXcY?!BCN5?K-~EDiZm@v?p2TacsO|`hg)>!+DQ> zYhix7vgqx~{n@XXsI41qU6&MR@1nWR3ttQQIBSK0v)-MP!FFd2KZP#1<^4&E`Jb|c zBH||jVwBtVSj$z=w|7YPX03ndkNkORRESY4@*Ctz9mtEU;WJNwMgLVC(nQF~G2Xc9)V51m4^N+ST!fGRtX@@u@*b_BChX=s`sCECv0d!gt zpm9S=4mZJ&|J663+X;;uWw3YCeza?Kpqd74TmMAP8x_-D!79taA{Qev%7jm=?%Lsc zlr)-P{YaO2;opZWX!P!{Mr+>WqS0*VD0F)eUh%I6;vTHl^LL)4e(|JMZ}ScEwpB9Uct=B84Q#M*!LI~i_;&+V z^{@Pao~`{1@Wdf`$EJ}J!^8Na5@Jw^>d9}AUnaHtR}DhXK=t@Ps;@hqQfq$I0s;2u zUmM1Z_~-<}$sV=kXaS%IDk1u<5}dAImi3y;2ZLm}Tdr+^Qiz@X0TB!j?{AFVd5m7c z&Rr}N{>+U}qqQQ++#t=!`R|-|!fNY&u?9HUVcwhm$$>N@$^T6$AQojMtg9Z7?Xg(! zWq*m{x7ok}j>s3{|5dyE_hLs~@_#Irh{$*PvzXx@)a3u2+W#zO{eM#ohR84C|J4bA z$zy7;A3Q4lJ26n|h(}=B{_Z+v06I>fO58o_e;jZZ`EOlrFo@v+afDp;zE^*wmCV3; z|F6Y5F+3qEe-vZ=y;$!rMfq*u82tZMstLmr|0gZZKWP0&%l<5;4y*kF=I_OjwC`&S zr{DPcZCNr1)?h^3N7V~|7u^44F)*aR7yBV^SZ(VOHQ%4qpuZLlzf(IdcIQamuwyF# zsP#h^eG`w3BE;!GN^t{X{Kut!^4NM*>L)GZf2Z}&Qi+HHCzZWvFtZV7+;?JQ}W1Mvk868#l*s z8{CAwZ43k&NXNDkXOvfl2uD?MJ7|cFgC{qoJV?Jm@@v8B;}%_&)v7Tu&(65|`lhFb zMyU9Qao#vGxsQqCr`~$DPYRr7j)lJbjIWZ`;6NOw=w=tC&El~oPaT=lY1-1nw37x5 z{!i?0<{MkE4heGxL9v1J&J}6uqhpMj$$ ze}T&*O}iMF#_zw~ZYS%Ou@o8R&StkkeW{7(g^B&ebB`FlzO32(rHgwwa&xsjNwlOlR>M>9_Gj|KvE%~z)t6`N zL;bv@%nm=nP28DB0IKn`ODm7FNV!yMxFz|bRr?2Vvxh*3JSg$5d>+FVtxVEkh#m4U zCy=(!J+9pSurMnVd%HLLu9NSz&0@L0dSB_QkO)YgAi|~I_yve?A20(Y;IBzvv#Au3 zjMFPzoC%rD=C(ikhVC-Te&PO=xCf6fC+w_C(Zc+&LVdP{;C|h;nD+_x^UV6-Eu_J& zz-Zj;ZeY}jGzo`5XUa}|B+1vWC?gi6=dQm5yU9aX5k}Jeo#LZ=FFv0ueLks3(kAiY zatc0_>-A|54B?_6iI>#nPe}H|@%6-PgVjg-z2jDik9v{|`%DtVeM`a~%b;4GZQVQU zThDJa!P9xs#hmH^&B9zn+T>HcdWW9r)a^I(GL$z-Ls#oAx+;vZor=N>4DXn_9JsV& zjEAl%Hwh90f^#YjKuuSNJeX%rIp*OHH)V;` z#p?4)q`u?^8+MvfsfYK`)HuP=cT;Vc10qJY!qS z{YVOCx#3dIfEPaDSzA#&@5S&{V_XS)muW*fErf0blCs=#gSc`~m^xBbSt?Dk{<<1S zQhi2q_MW{3f6U2iw~_+~k9Fb^io^J$+fy|Yi2|X~ z_*64Ih1uGdxOZ*$HR^Z~qmAVuJO2Ka`czf0yY?; znhKS{T<5eUNuEm)LA~jkVYNhf*l-bV>ujNri^Dmv8I$y;^9(ncWECIOyxhyRbK47* zb$`%~7$KpSpVsxzKmGK=OB-cUjP}*4d%<(pP$7$>g4jp> zi{Bte5la62#}U%A9?d@k7apOJ;6(A0U317*&UUR^1Jeaksw9D^&~Ng*is-JyKzp(2 zouKC02ug?e?5Axb*m&mWsfhJwR(PS|(?PH`Tav6A1}2s&#U!^JJ_BLh)H@Cfm3m z<)0Din;N)R-jiHX(p&6ZoX7ck7k<%);2HfFn~J{64Pkg5fAwwi`zO%#;QQqij*c3V zt5x(dar(5E_s>1c$>|^0*%&>&%W5xvhvxHD8fLJzDe8t+WrF?d$`Ls3rJ*j{XliXB4Ax3;a zD2&c^QLX^dR-PD`vw*-{LVs~zV)wkLlf8E9Q(>;j>tf1SN42Nh@wsQUhvA4EN0kjr zI9jX{xak!BVrk)+B>+=Bo%k2VabL5K>%E(Mj%|dF)Hs=EE+>+CRB!MxymfJS<5t{- zExUS#XYLKgggeUWj8cO)(K08)r)At->^Va7(#G4#-}3f|;>DFP-+L}f?yb?zYWY#M zrzI>KI<2>qIMEM7IJkkO0f2w}tq1K#iUIUsftYz(J|GoM`tG4f0`>5$C-<#xBkSu6 zE92OqM?!dIR7{V{Y+lAusnHxrFN)WGU8=@Gat2m&jiLSZ5ZmQU&wy8Vy3##Qdt08; z!h%f}-AFD*5s8Az6st>rb&H(U;q@d2Qty@7`JD-icl#@FwCJ%w02){{-2;MDL$HVW zokv^P6r=Q>zPCF4kt3Q*B*NgbqE`%zk7igRnO!HwzR;W8(rA{^PK7aBnKC`MmCrm6K6z7pXkj~Nx7nU;okt?{MB){eu)x(%wP3r z4~g{rLE6SjS@z`m*{x1R;v(6BO(2XAG*JRBGpru;bu6^}17SOgB67Xv)x z5i2Wbt7t3Wlkejvy){g?<&j>gyP6=bAWEw#q8&K2yvM=sFpuUb0lZ02V8;N`_r`82QR zt}w2)Jne%MV(ua7)scTKO;8x#f&2Rw2W5PIG-Bocwc~N8(oV?{kJd7HC@FO8n z4-P8q!eX8FZh2??Lj!u9V#6j@w*-^YyYyaPhNc5%s&j!Jx}?C|e^?>4QeyD~ih;h(BLy!|Lm8kmKpz!GT}AygiBGuA)B} z`7!pG{~>^BJ(6brR$w#)X14Cr+Zm?%g(5G=Qb;+`GvvNhHTXqJn^cfPe%El0mY>A%g-U zS#r*4hB&}5`!%S0>pth5^ZxbjUH84U#5FTtbyanBb$#8{)m3s7n^7MrO2Xd+?|~5V zU&*PVbliU>UN9gDo9@P&G+Ks`>>l4}i*LdXv9_pRD^PZ~lpu6^kL>0J^r0 z^|C9rg)w+T=qhkhXo5L*305<5U1n4re8U+5a4w5w>|XZ#pA8CoIjO*D@bUZ2L#z5&%s z^82e_Uva5~cJqPcw5v$=ogioi!=K`Sj`zb5Hm3@Llbl*b@;ihY_zL;E0akr#1vs7o zLj1ecDX$<`r8=NocA~E-MCZY_43W3Pzp(;EiJU4Mtg+-P{3QLw)X1rt<4MW?I}q5v z0fGFE z;5vn&kRY_aOddB5G9lQTuw;Mc&Ger(Y1Fd;iq*ARo_QpfJ|dA+W_o40yXOTxZITnTeb zQor_PflfVaP6Or9482gtf;L1^KpJvs*{TrBRW=r-tJFTrdb&lCf)=jMp#^v9xEE4oHEb?U-^v7ku=R-%92o`NFK4kJ}VV3!OPKCLhKlWWmqtX zr{Y67o|jQ}?>$*iE-5HMg`|x|m7e^7{F=uf`3G5sil3f&rgRW8=0jl!FO1?t%~b^Cjh+u@C^LTGjf zDlF+$6RU6JEXN1m;I65uikfE=eKZyOsMsyrXUup5Tu|5sHv!+rgB9J zmdCQAK*|lGneC<-pkom?&7I1Z9MLfkkpD%?SS$ zXX_YRTYkRua_6%opax#%JFJ$j2*{TM!^ zLSdR^#Uoysk$cUQ^EJW_j#(nU%5j`p>>S(A!h+rjVNVYHVroq+?C;AOlUveU9Up(W z89#w@UX+Vgu2Yc~;=0q##G~`C=O>T*IBWQ?yd)RW=xj^DiL{FEm3(+*#8}nKym6;9 zn<}GDw7r-ZX>5V^>+>7xspV(DicHY$R7@m8)k(Fn$LgZ}fBH62hdMYah$6g}uht{}YS0!Nf(l z_};{QH43G?SC?UD6xN=85o-=XEshEH~~vlZ7WHPTsF_20WZ zm@SaXy8i$j=FCsMA`*XFFW^Y8M)Xc6(kp4)Pp{o!+w zAvGp{JC5GecXV1~It&$O6y;>_in3-Ayjdk4x`lRp@3u^QIbXL*2D~C|aZdTv(5f$n z{zJC_#p@(iIHyvXw4iVC?irWK){FqF=JvVCFn4m9HF1r8uNMg|CYx8*2M6^n?$5~! ziJm9)zxs)-c0w#F)l|t4?D-WxUCC&=*%A@d@p5j?_u9HT zzwf%gzUutwHT?iLx+q6qYZ|=ywmbOsv~8J`(rJcxiZe$JY&^109&sqxHgbn_U8*(} zwf!P7OE-Hx2Z`G2S?g*VzAyVaP-!-{16S+yD}SaVMV8~4k7#*aBfOEkhPobyy(f)o z#T!Of+9}Lo)zxBjt-_wfg3C1ks~}>ks=0b;m!i*qpy!W#b-IxbjHBR zwbqnj`JdxW(g(l3DX`;IGFHiGi|KT`g1v}E;RDp;?Z8>2R*V1ULl5))L*MSX%suzZ zS5jIQaGS%o5cmzoeW12`5`9_tO~2cU(d<4!VJ0j}VGK_mw&F<@g?($I95o`+A04~# zbf1QXM!mDIbSrwChqe=T^*rvGw^%8Thx2@CYnsK#x1ypl#Z<*_? z8nYi8Tu64W3Js|3Wp9mYExz1-wVo^@V%1=G^Vzfw>&~h(KjjSmd$+j;rP+;Yofve` z^>x4g%K=!}K4Ms!8)rbudY^x+)aYM+va`3~NGK<`;QTWAV?VZcaK>>N*eR-l=t2;l z)1v;{HxO@3W1eF)stwzj#GoMHGVrLjcNFl`5uSiPDRK@@JFYPM-a6nUHW4#ga++jz zAKZkP$@+M&R$d)5Rd3wCB-`v{*>{>>Q?p=px*)yo(KKkOwtmEaXa`QL`M(Db2T&y+ z*P4818EpPQT(8}*JJ^bQ4}mZ;5GyPf+>Mzm1{@z3f(D9v$FX3OvVXz@AGHv79*C_w z;y>xcY1?GPD_xi~>}cX;{aDB;Jan41cn4gXgj;0I4G0x+w!la8 zP0F`Dqiqtt7OdIPwK}{Eo&cVy!48a@0c?^7@TXxcf8s{DSZ=iac@}d3#2O32MEMWq z0k@tfl29_ae0dXnanNYH_51U1lxiM)F|_S?3v?b!BNe#v8f}1JC2gUbz`cB$NnoS{ z;|g+jo)EZm?jP;JJ?aq1-}q(d29H4(J^#dF)##6zxLWM3rNV?MgY%bBq5+V9e$&P8_szmG3<_PAGDJgiC172H{$Jm3NXXsI|DUpx2ZxCQNARftu;X+=Q*=S6-$`PFJ71gVC?L;S z$KR2!f}_}J@jh^e0Zic2PQe$;<^r%Cjr~~(`vQs;&d$fX zbVr$)8IHik_tpZw-T2*Fk%R$dnGT$XO`XgcE&01@6P6Rnh+DT@_T!J9n}d0fybN;d zwl1Dd(QuU74$!SxhaYoPFP>`YUqMPY4`!WyK*&%nWdj0GAeGRe)0qom8-;!aAXV1{ zV^IG;0N?^qmOwTj%PZl@JU=URcM|d8CsFsUbKk{3Dmc034*Fb3r4kz+9jK zb0Le*)8XM0cCElUstAL5os{d)78yc$6%bcO@&|CKh#Gw#MffT9>60#9(U2?WDH z@T1`0H~~YSet9ku>;eK7e|fGB@NxZ$4{PMoB%;{y7ph>~^7;S9cn8Nk6NCbSS@n}D z=tll8DDRw6KN&0@fHm0z{{8KX($zCUBdfs(Q-Bc$fon!6UH2cqoQ!!I2D~p%UK&3q zB{uhoQ!o}7f^Nd@qiHBA2egPcLL*LaoU{Nq)J}0Az=7E*R)E74ym2J?01hzMXR@%D z!M0Uv8rc$7JoAOymReVwj!F*MZ7Lo8=wB}<>ejLJJ9*t*Egt)3rM^7qqv&CR`_7V; zaGpmzJjz?-s&Qax)RBSjBgdEMt;_Q%?U*whE|-1X8fdDqy3E5p zUTRCcuV#G*FXz`i@7-TZAYxU^Q{KV8CyJ5$>evzJJFEWkRwhwa9!fgMTxgSFyc^Vh zy=C=C+aw%2HtVd9+7yBC_O3^zvmw!A6YfZL*8_ehjm8fF>Z8Td#@-lu+S&Tv;`TX z##r62b>AGz&|*w$KEqW$iy5C;_R#Bbu(PVA88=3Hv&>3%Y41@!PD!&WEpr)i`FeC? znq(bff;@lhn5fWi5np}g(Ve5zWX4ySS=NuI4xI|k@z~%(J07c{qT0pPrKb8>7nRv~ zhMqi7Ts2B}cYJTf{dSkFPBd7+{koy+=fH?~cW2AVXNUY}7r(t+B@ug6_LhD>-zu~0 z(2yEzb5T({AHc0ltpD_v7Yf@L1$771j-zw$7u6_QIe1W#4tGh^e4M)>lfV}nU zyim3ZrFR+!d57xxsV%GSXRQ}5(=+xqWRKloC{bfHxb-R2KZcH-cJV!rq8u_5{-TZr z6;clhQSRs$Qn|W|ne^|>f8DrSU!3(=qN`qUI+m3-daSxALW|Fv?VIjO1-*5hSg*$3 zZgR>lL4S8qF5syZW^(dD?qrkDy$1 z5Ia{J93DPSU6ELQ{^(=aqgR(kaEjy-=#&rQTF*Je^k`M967k!tO~-0E?PmzyxGmYNszb&o*r&a(y@qzq1NE=_um2{5g~!Df3UPy*l`fU zS8-im=~STgQ9dBZN>UMVxUjA5cN5XFkuUYW(9LZ=qv~BzrR&YekIfG{Be0%UiWww+ zlYRk2Pyzm1k^)9xD%A+1oafl$S>cyS1cW(;J0tr~ zx$+Z*wij@7HBvoKgYuGIr^Oh4l=5uPW5fWmxWe>B+GvC=yGII9fKrmOvBDKBIl3EO zx`4UN7qDM=gHRiegPelN>u@Ry5v$yltIO1GUpIi$_dSOqq~nSGTS(l8w91o|d-;%c z>{%5LLY{n@)*G$&S(EDzlPa=NHl--My+1N2zC9yTdPKn;vQ80xQLdNiZ48G_A?C}3 z_z>DC%Qc=@rG;KGq_pE^sfX1%AJVR-(m-XQb%;z?uW{_kO}1QdTZu1B=k$TZ`;$Llc zYw=p*MKUuCgkDhf>q#~+wy`Uqy4u@`UJ8DlWoHA4ACqc~XPC4h&#CJKd)8P7$_Q&6 z?)C{)TiEm}#!E>k!SBAH^=;Qf7^2%{c`ge2I<$Jqp%SuCe)aUBq44mamqL|}p`x1o zWtwJop%>7WF|rSN&Ri7b>72M(oo-aJH7V?6m4#-757elpqkK2D`!WZFGNP>$^K3n{ zZU?J>li!11snqDG$$miR!$Qtug(2ey^W`h-7C2YC2}ehPD5GFkyw+H~iISYW#V`s=kiZUYCp+T*g1UgNIT!B@JT5|UmX){&)f4fEg&IYsqxC-)31j`Piyn-iXcOOFp9V z@}4GZ-batiLp8)=^R{KVw+T)1Cut$vhshW+5^}I3`zd`zG#fL=uX+U(Cc;w%sWJlz z6oTWoxXo7Uwj-k|xhCSB3Lf5_}%4%R%AEm_Ie8u_s>X)x<4{|b{ zb*HbTp@Q1_67aq&?UR1=gDoKgE*Ni9H;zTema_B-@Q)OG)E1%jnlQfEeeN`+ep zf5TDHU%ylUB)RTzH;os;HF@Q4{(PAJ6B%nqAo;}DvUW15Emx9?tM{e0*?F4{nL0xv zsw)orb!~er%^(AZ)B#5Pup2|Vd7`Oct~A_CMn?fD;M=_b`%bX@ok~^1D1#W$*-Z*p zxW)&NN#ZznCI-P9^a3pbn{nlgoWS+c*qWd0C^8nfW=#B{Ec|nbb!c_qfhDoT%x0Kl z{td;RwcRdux$+MsUsHNZ6Xh1ItC}_SMoWu1tG;y!*H3KAzcCFri;kf^o1BkUPIZlH z!Y~a8tF0&U&8~?Qj~y5dYw#UE*SGcVwSQ|YCTt9X%E*9Kb2#DlHsDsttOG(`$Bz0u z+XScbP&ftjh#^Zf-<4i&QI!TB$i{HH= z_)LxoQa<{4ygUK1B_&ShJ!;2PB31L|F3ouk*2mlX2P%mP4n_*VvI)*WK`K2M>JnI& zL|Q5veRB@d3K{B)u-*xS%XNC-0;u|12UncsVD(WnH z8hh^TT$Bt%WEsNbX_12#qM2YurYG?uP1^i-b5~dKt!;F7JhE6!OJ6B6%H#^YyTeiK zUtPgn&sM7gYP4peR%b*iZQi3c2Kfo~Eb5qICbrr~B{lAk-JeL0nbA=oG9??8yUUm@ zrW&SPsDtQk;#F)IITsd^w&HG>6lVg7w!_=A-YoJG74G3Gd}^hMA%p!jYL}UWsL{+> zy3c`nkv(E?9J$&EzhLVWdR{@^No7d%k*@F6%U!hP<&1vM1TTci?iZpIBo%=<=>jjF za{{m}Rfjr$)MbohMLhA-?OQdDpwRHZd)!uzhEAYH8mZWGi zluX#>M)L$^QR%=#q*>6&X45{-z4+EM(gVU>pFao7*=S#)7r`Q-z77@3g9k{9)FRtj zx&!tP;UVDqkGx0#2pB`TwEIHW7~YFiuNTqtyzi^LcFoj||B2Wa?@To!t7>X7r%-X> z=Vv8KhZ#6rAj>#7w6n?i{Ajj1cJP*1054GAIOxGeA88|0Q-P<*07kGP=t4z`1+!?_ z>K|2bByL5{4dT`)dI3g<@2?ZIRfvY71dS-|{(RwM z4q3KU*?6ANeVV7$B~q_=Xu~ebt0hr~w&Mpa7lI}BU$W8wvJz1va1FdS%wW(b^FWp- z<8vm0ZIOM28>bs;c2xZW15vT!DIJVAEy}TQrnvseL&9Tux`J-ATx>wpWjJYfnJVEO zT6{oKI8E0=&on!I#A46>3aL!|-E=nYdOFeTPz_}%HDM17nGqStXn8V^PZfe(Q%%nf zbf4{K_p?Fw!RlWU9KwDd#uL`Ccqx++!LC6d_oWf$eEP{dO%1@v{{|x-3=nyrB8mfY z{Ep}*`V>eI=))-@@dWTn{%=5(z(awb{QM+6Cu}MQ*upH^n(O)B>bJ*SLzF54u3K>S zF==_HkA2nQdEBqV?^f7iEOCSDWvR3M%Qud0^F~L@W-J&d5;Ug>xPm7Ja-yf!)+`xyIqGDcM%py!Hwaui_^{`&(~PSp9Zkc4d+^}jR~O((`94Bq zJzyi|4{e6-Fad70z=n*Y2efkerBw>9`ga(}?=W`H!TaR92$fRsvp`w}VkGdmI>P^A ziD3q~!hf>Fedh9IHPj7HSum{QqTfeM9)bu&`GG{ScHhT67|S;qqPYY843xlMv>ABL z`(WRh(kS#VYrpV#(D4B0jL*dcw&`%-3opEmwYRy#eFk?`Srgdv@8sa$C2$EkO?b-A z(F#t+tw+qp)F0M}o6f=-|R9g%i$5fG6!{P&Ld zuL5;o`pNH|bMBYh(5&L1*c_hYPZnT}6on_!2WMg@(vOg#ijU0y=Z}hLR{GysVQUAM z0D21lq-XUzJxdb}Gd zmcFvVHJo}(6t%&PQ{K03Qc@C3SFiw&#-*Qm@@z6`no4KXM_fB8=+D3EloyxQ3QQ^# z1%K%=_fjxy zMs=RIcYbT6P`*=y)Ga(aKOCj0;~#r>(`g}Yi>ra(mp-Owh7hEHD!20BF(XeGAcK~H?1fx5daN(U@{FB{@==0~8F6Yru(0gfWlN-kWN)2?{>Y$*dE_wrQn5i4VZ`)t z7?YIYs7J-(A;=YaUv`J}#G(41{tOnw&{~-&nx_3a_JNEop<1>PkE3j8HRYZ54H&0z z&(I|#d%ZhN^CpSPbU67ilZsrwQrOB@H4bS@AAJk+Vg0jN?RaM&Gy2M6p;YRVc#)7M z{q8^*CHe+pwS}3vs&kT`$7DL!=Z{iz-i+XGjf7Nm16*dZS?{0;Y zTR|v2Wf40le!WjoFcirwGb#CfifK(Df0|y0$&} zc=5OzrD!An?xO`tr?4qLEN06(ESwB1&r0NE8~xC8>$)qbghu!MW?nZmuN7;M{#RjF zM>UeA64}R5LZ%m!owjYIjTf$Clb_YN7O(2jtQYQwc0hW}v?khTtIg!{`yVP)6Y*vl zNWH0y;D2UX&Ocvj@j>im!GOd!EW;OvMlH$3nT_2HtKxMT`L6owPgjW`>g9%VD?BP~ z_@v6b#kW4x_*|a;OwAzsMMk)Uq+SiXVTPB>!0@a{=6my|rmrfTV;jX5)4De)Sf55^ zT|fy0L9bo^ltL2c&E%TaHi3`Ke72E)#hdf~u;repvAz-Ws=HTLc^cbr4N}dR(j~IS zZXq?CkCI@%T^YhH>|1pqtEMf%{j_FmhZ zPvg_Hj?rjE;*VR>c?goVVZ`io82m!n^yCGvD2xmVb(K~6+{p}S<;v+!6`DTh zn<*a{XPD_>J|z>Z)u>y{5W)yJ#G{i^h4p2bcH4b>my{RiSpB-TQaLLhJja2N3SqZ< zk0LD{TzRHD-8ozG-+usWk`K9(b#FX~CL*AtW3qactcv^+dUO{p z8a`8ypXTao2vZt#G(5A|c&F>VNCJNp~#1=#V$2MFZ*t zP`12yEhCOqF>zG=Iz4T~b9-S?t;%8Yku-|ul-rwhq!8PC<|rpbZZo+RhVd&$`2DNG z#g=+W87^7fl#l09r86#2U_o$ZuSl6v}rim6Rs7&e+|dLLQ4kub}oW>7saix@0` zmT~GUMwyhA4r!*2roEMTo=E$Aj(x$IYjyJ(oM5+`z%{&0S`?YmLlP$W(8+cvWE#m@ z>N_%mOvDCuXtf}A#}pBFF4vwu>LGXmw=4z)1)f=^5l^#{I}5MUv~nHp9Q?vL zZtZ5v$}Md=8}*7Lp|uF9a|aRh+>`U6OX0Ah@k|F*Un!g%mGil~c_b{EGp$QHzT#E) zC0)P5_?H_T=1&P-m~#)x@8jc(0XQNpQNlejdmYyduG_T3588i zTi|M&g9Npr2^VdltlDtGbgC}G>WK&>yXx)V z+G`6H4o^;FUQD}5Le1WNqD~aaxKJp;B)7a_%Qxd_J|8okF7R146ql);RqmA{CHB$| zC;>Oh^KlSYdg{Xag)kb@XAtbYgkKl$xHWR5l1Y$nYvaP>bf<;@x!l2!I_*7@D<#KB3D592JOFUHbRzRJ56BimMe7UoZXM=j_UtEfxP?bU{W-3 z{>Zv}H{1MsnwjizSle#wu_N>lN9%K@v)qJkjEVXJ^F1HwSr;Mfr@?lKH0XUl45kp( zvk12NQd56o!E-Ufk1wPq{PJDM?JGFZZ-(se!diWp$tx_>#>5&YTaq|NEYJy(c+1@WcFx#gF=mv-JL#uRVKjpo*Ds&34lgB}`(eCoK-dB>P@!BS??B*00_+qexw`ss z4&?84zkJ5#q=JlqMt{>wfc*uC?e}{k&cXqHM)~91sd4w#UMn}-f;>K!Kxir3hI6Rz z%SfzG?D>w#NZ?IK+5<6Cf1d2sH9jlIQ-SR-IqYE54qOli`_dg-;A>c{g&&^)toG7H z$Xjjv`b$r>;;$HF=qR=tn-nhAIICM=GTdb zH0N-S_b^6?8L*EuN&yz!4s2vk3%t}lD0~lMr0$}j-Dr2f^;Dw~=DG^F<(E6Xi9Y=K zgF1I5a}0z+k;KDrvJc(;W~nd}I<4g?4Tew!hNcDZZD8jbpoQ4Rc=5s!55YT%F>B7u zZZDdz8>qG5f;i7|H%iNh1iIKe3U0fB^Frk36AY3k80>yvn0t7fp;m&1b{>QUA39*D zbeg&kcFMtq&L56`3NDWR_B~7iGII9_+Jx?o<38=e@5q2NN((LZPwXncU^i--{2O~X zN()8;`-Qpdv@i?JHE^20Ep41y_ZD zgABOYhxIwN%yXOuTU^yUdgqf?3rsj%*W!V`ii zM5pwiKk42dwwOV{!^As{73G*8JkKq38 z9kqj_pPU_Xp^ry?a@LVrCV5PO@m~MVnfD22&ObO?%2$J)a8~`Dvt+>8_z%vicr?3v zpo<$nB(b=ZynS#Y33ea}z8~~JaAJJ~ki;oH*p$+VAUaY@F+IR?b&qFa&W0n?x`An4 z+~@=HR78EnHKEE}T!L-yTbyFD@csodg$R&brw0NU`Ho8Zf6qg;GIOKtgYOKO;|K)z z9cc#uk;EjU$f2#_|LI}WIz0^POcstvaPN`AUrkXt(tYn>-Rmd4#$5IhG|%TR zT{7A}Iwf=x*|shY8h`KFkH)8T%TMTn#smLoe8R}@jsL^~7VkvJpt+PZDm%x*)-hJD zyh6M85AT&WF;nRpvp!+sf8(>t@{|ivtsC65<`V*;yBzzMUmRyRhwWdDxs$VTlk=|M zzKfCo)*SG~{nKfsQ9Hys#5u5I#p8j@wYtVWVd3CkadiD#(Wj$Nc%5{le8wbwPgZ4^ zGk;kW32Xda=*RrbI;h}6)@@IfN3JULg6LYdS){`G96Sx>C2pFlrMaZhL@$#br96zR z@vL`$dVN2yn?}EOGN84iHW%VN_S>9sgER+tJOLk^bfVT)Rkdo^9sC z`#Sa}tJU$;>o7yCogAJog|@;z+~11zRY_t*L=%#)e@l|ylyF&Zqs!=>#njFAEESL$4{#!J1ja3|%Dq!9 zg_*`RHO+<5#Wg?NxBap^`PLzL03lmRYRECOJD@Al<+hOhYIcE@^B}fdhSTjpP4AJo z<&P>K*X>>;iK+$F5%)Wl`>pd@`#qK@-!y%YL$D^$B)m_X(MA?Ley{KwmW?u> z4C?r|Jg|Jk)3+FDx1lV-j=Ui91&Ly8O=2L{YP@i%w!(JEu4omC81fbYW{VesK?ARR z8#o0nK!b@UEe<_KC4dXZLpnF@x(7rw=P z8!LD^c%foS*U>plLa3r9tW`c>qF^ZXn3w~z@A{c9{Mbgz@J3Z3Rr_@5^=?Q9AM z?E3ylP7DtXvyZyeQ9NOL|1&r^EUBm?D=YiF+hi6tU7-lYStMzkEi z+za)35bwWd8F(l`9X4@Yds=_~-jsrd@tEL;y>p?Qu~Uu<4P_NBwbjlmT#2U6wqmOz zdjmYfqlFH+AcVdanTb9ypWL6ZJIMnfyboCG{>fV7K#kf|M<_LBu=n$b#0%ybrNq1e zxxkvS7ttZ^AN<}!9^hC|b)j$f3x%pQ3#h&;d)L=-J(TwW**T550vTWa={7t!+N0Le z#`!n&eRq<`TK5MgGvE@KvyU7)djmwpjNRhPUlddy4@wuJ2$AutGZ`@{@uBG=@c~gR z4z_ABE7Rq#2sj|G_oZ?NbLWk|eVf%navhaV9%q}nd$uY>EoyUPL3pH} za4$`k35~2DHiO zOuA5b(?{4MV<5BmV~t&feqjP0NlnEV7R0%Y%3&m=Q|HMQ^z9M>HF=tvFC;nFvJfU%t{IdG0L zupKSk?2f9SqaA1AVlp=7GoX{2$^aKu1&<;n1ven(N1qr(m7$V=QCnM4A#ey_}N zI-Ou6@r$>6ch;3q+B{wYGpe$j{ldhzR6K5qR21vGJ(|)lRbhYr@=fA98CtCMKyY6D z9A@#R<~kBrirj{+cUB2AV0ck~RaJShGGK&vMln!~p2GlLecvhSl`}A)u;KkC;`Gtw z2(;)BZz<|QQvc`(@ax4BWVmGnK8Q{G^1LPz!l(LK^aIuz`Fjad&VC{l%vs*ds~z^# zslu))sutRVJoOHX`5$Vxv_5@NQcG|!lSp7;^y*$$_j+fc=Ui5Z;$IVI&Yw;wq*(49 zwoJCH1f??iImmC!Whoo2M?5K5sTCC!+w`VDYxdW%8m>}tN67~;X-HKANbB!6| z;d*n8!BB7Xy5qIPdIgyaZ;T`Hl3W?~j6Qx9&gQF;O_^{FQ8T1ol<`x!8!TZ``q4FB z%Y44yRG#+PnRG%TtJub;)n1AXb!S5AH6~TlU3u=)=rjsfR>KD#)qadNKC2L@yD`4U zw=hyUA}WIQsU43LWqNdpl*z=LLtpa(VG5 zR>;M9gI7YH49M8#`&VKJxe%eDVYykZ?j&RuF^hU;VfWG>yuUPeBz8@F!>FqG(x--* z!WM-7ji9?p$KFh<7nO-Su8`!k4t|xYH~HxLKs4ztKTD)cH+?SCW_PE?}V_cfF5t@N&4s%OEjdE?;JNb#MHMp%MQKR~sl&u!NzC#F_r)NJ4)9o9-&T-0;dJ z_ma_sBc+RhnjG9kDj^?-d{&PdwYJK}ERzW~3u{!S6W-KZ^TPE@8($HB8Lye2*X`ou z8kL^6RXfcPcS0knxShP$mP-d zwYea8!oWmlp3z>0K8YR`GQ;bkGq7{LMmTfCs!F*35{%q(le1#@{j<4XbCV-6vqvkW zX>7&|HqGaV7r~peoT+TK=`1t#>j?w%Cx%TF6Fv+&k**KKRYYU#*`zw@qVNJAQOpak zh(nt`mg4FZBkZ0>!YYUrRN{owW2|2ID{u-b?FYQS6J)MT$kO<#Qj49B#P>ckDBiZm zA1J!)L=6{x(W_RjkIcRmz>y5GY-oo%qJ08bW$(z~NuDUi6`^C>?~3tg2GgxntNO2s zQRDBK5dBnU&h+?wjY_6#cLQ=Io~kw(&FRh|N`F6y3S{%gO7eKFO~+SATy;RJd!7or z;ZaVKu|V(+s_wjbgk{tuNs#-eNX@4mI4ebEr6f=2Y?*0^7o!Q=IbI32=CpZiZ!-=L zNRus#4+MQH^k!Ufp@x5(l_jg)VbsOdzSnT1eFIniA#F5xAv}mv;3%tcJE0HOS`L%Z{LtkxUt(^$)NmBp5rO;mn*!s4rTuQ1RWUIs09U z-J9ipca99YGxZucO6Wvoa+wdP@t;<=>-!9APdOveOudhgY*uO0m~y+pWxGA;d$ghoM!dCoCak4J>a5>N~srLVIiud4Py zuifnM+I;O$pF$$?@i=pL)XI>WeTpVox4+bJ2-EP;!NZfaBj%CE6*tNR{z&-5@OnkB(h9!0bqBm@8B`-+jBG zs(+Kj4^8;noV8e@GgHI_acawiQ+nH9orL3!*w9~25PDz#j7C@?5C zQna&SkUi`0#Z$&{ROt)b=VR2}y0vp>Uz;ve!zew?kr2Q9eBLrU!lga;`L6yCEw&S^ z7QBn0Y>JZvVZ$4bIoc@;hXZ@lqe|a-IUsaySm}p2qinJz1skMFawqF2IffKAZkuif zZhv}pwdqo8FBX=TozDoX>wD(2NCBg?F4q28Bn9Wrd#x87v5S&khum!(&sJe1bT$NIhqT96MCM{XM1t`eaXk#+TcOeuWuTo{cji0R&>gqqa=yX zla@eg$Ic{b)~n=qrpNU6en(|J3^>80_P5Z@+V5Sb0> z-I*kUT#!W^-J{3u>r(+KFw-?;VwN_RsOZ-M9(+-H^$;anjh~`6og7f^P1dh@r>}+} zAJi<#jtgC1$fX+F5=!NCj#MrfPq+DS^WB@soa(y)9Hb?tEssZ)YF37KNq>m^^2ShObi&F^lyGIU^1&4z&x)RV}hC zW5ccYZ&WVMog&?+iXh(hepcU6Qm-q6D!cc57b`aK}2!-`feAB`(_8Nl5HjA2`FP zTtt#_9rMK*%-1||3c&ittnZI-#u&XuS^i-nUs<{cejE`9=e@{|%Dq=iCA>q8yyDv} zfrn%%YwvvTB>&!CMWCS(v*D58hO!PjIE3>n)!5gJ8GGd(DLHFux zM2?z8whhKJa`Y@xbsrR84d+B3a8j>}42u+Y`%Y0&c0u-%`w72+Ya5z(k?h|_Eh1J$ z_q1JX!KhZ2`#N8{B2tKbJ{EtC*}Uo%c73-!a9H}+L23{?c-n{yB{msLBbwiEOmaAj z=-pGs?YN?x^RMXN@m}A@35yd_L+CW^)07YKBer#LDa&5g$ER}E)|b~;9NlDFQ+#4P zQhhsT&V~`ejT3=vTW7U5o+hD?w@B-$FNjVzLATo&&cU+GbKU#$8E zkInK?z1xJ`TP8^!+>OdX#LxXd%UFvt=1;bHsXKjC%?x0zM06%`F;iiBy)-*2Y0liB zj66>M#Qs@FNByX)Q$|8AaUeI-rW8W@SAM2o|p{l@`B7^YfwlZD#%mbTG-JC5SBYYYh zKCw`l9%nl-@lUTw^9|H65N9M}{IXZ|9+O%wSX{Icqjw;h{HRVEK^9!K*(z;U2}0x* z9T&DPv#Qu;T#I>qWqK7y3+A07c{BSDiFtWie*7IWjK}*gYILXVUV2lP*9;XnL5W;_ z54Q#~tW<9HJ$%U6ZD&Cc?$kY^7`0uKKQA(jHPWJPf|@*%t7sVBC61SRUqqdd!SrZD zFk8q)f%Nehn=iMCE%il-`D+TU*r=z8%*lvCm0t88!bf)4mdjbZokK-wmg-WaqU3?( z-At9$SKD|ty}mrB-SkY3E75NSuJRqb=M{PZt<_Vi&eLTK!hM^Oqq_R{HhKFi?=6He zH0#P8L>jwt*ow7aJ!KX~jl!nX$n+*B##HQ6^hd4sU9|7qfW(Q9gczUUA;o9!d~zLY zr*c8YLi>FFV z&KZdATLa7&s}W2a8RZijg(q~VuhUoGDvovN_S5@q>~S3WnfhOcSmWL#i@_)C zG<8x?!l|Q{B04>;czVify@@YQ2*xtG`*=l6NFLaHNtbqd*3-vg7Fc6Hs|%u6cg2*_cOr42&+vRLs6|NudJPyQ>R&Z@Y-~3%B3~E*9?h3ro(#3b?n9 z8(BD3tzCEKcu`P(ZMC_kj;Tnm6!qN2L&Tm7T?kKiIUgTR*Jof+BwFrv)6Cps>THLf z%&Hm$vhQ}wgao+0J|vO*N_!C7bTZ7$%R{a(1|gi}kWWQ@v8mv;Dk|w*UYMWOHHKDG z(tW(1PncGB&W3;k?XWw*jjZJBE1*xO7Nfw2sm{KjrQUzfP`>rszJrJFds{9$|A0US z=e+59WF?L~FVOAWJQU+_uDIfbhcIGsr&Tb|IKNDCBEph?G;nO39AnLbBD38G;^^1Tqt0DS-5K9bodccelWvZ&xf z0d>Z(qi5Tk%_b_KoP72>m9|IPD1x_Zlg3Ydbqlr+PD@bY3h8y;^}2mUHQZ#`#pDA9w1`V@TPUZZ1vsf~ zBfL3QW9I~&zeu1zyu1FuH%hsLC(E%(r1^=unB=wF3UYMO_ef+bim#m}P~i>$RUA=M z2B>-xjDTGSW-E^5{Y<#?UilLI*Be+Qo+$fQ>gW5wo%_!EW-gJT0J{4F0`?OPv+FMZ zNbVnIxZB!?nbqw9dQoy3Pzw%=oD843pgzu3BDN?%E22``xb3hA` zOHcxtZwci8WX#X}0wtZF`GtSxGyl7d_jHUGkbl?TWo7gaP72VEkXMFbPa3c!;UWu?@6b0|KNd3@_N!SVQKa-W3wW_o$>9*ru$Xq5d& z3i37()z}Z6)$2k2A)Cp&@@0E9CjH_mL|sW~%ggPz0_59&wjuo&O$v0~HgQ({$lr_D z1Caf#2sI^@ExUgd(F!ccbN?>k&K<8vQy?s`JHLhXQ!PLAbs*`@V7<#cR)JoJeL)&r zZ(DRbFW3k#r+sgqs$+?8#m0v{qxyP~cr4cq*LtzxPUj+D=>@mU0G4`D`nW5~v1V(B z4>QF@GcUDr+=qAS_g4z#<*#}*1h^W@$DDX=%X;g?=~lXW7>Dumn}tV)ADwc)k_p_T z8B3DB&tDQqW_rw93+LMRqSOyE7W$FXP-e$FVX%DzfOegaKh|mE`q;F|gFTscxKXmA zx;{~b!}m*AhfdV_e*~90sfW|_NyPX2I*5OGeRSrQ{q0ZJ4rz`lNUV{7O<8^M+q{%D zF`ueSo1ELdUg1ifSJ)<)BdfY=+QX$h4dlGt5W~wrD^3tuquJyJce-($uYpQoLWE$E z?MXr2rI3+Z_mDM0@X>W*-1Y(>AUEGSiEQ19oLL$3`MaJq*mKxX0Q+oUBA$odj=YsE z=<6DEUPggq02=eO_q1NyLq`cm9Sg&(k`$qVO@b`@@e@ZoY8oPF2fXE6h6JMV?Jvb) z9J5Q~dza>)ZHYoVqQeHpIkQtHS&wVx_mKpWe@D#i{pQMf$cV&B5L$hW6ItTM#i)$8 zg?Xe9_6zaf?OHT@Z39%!`L%TqukNTfpPrmKI9>Uo(^~+cwd#OE97#c_cOyi}krOt& zc?C!s>X*?sD~#NV_4&lgsCwUCVET5AgIT4;Cu&v%W5q52>ysB#&w*7YBqP)c(dg38 zA)FT$yI3{z>GJIDzIhX5^lT2~8n&$>7k6-~B#Ut&rv@ag1-{($l)>!)>!@3`$>du90r5woQMJ3dzL=Bpe1rzgRkSw~UOB-cb=W3Kr=r

J-j{m5wXq2)1@^Q4M> zmL`PRWFLQg(~{|dT~Y7W%1b>!AkOFG+XD;EAS4o+9I{1xM4TVM-54gd#oyF9%1BQ8 zUA?rx9Bkx!=>2iMjyQ%&I#eLG=s9-#uD&>w2CkMpY-8E~jnWx$e#j{4>U~;m6*}62 zqSJR83?VzCTU>Y$)8xfggGG`I3bKs7ZTFa z2oJ z*`8u8+4rz-l}knSesP7`W$`;lcJ3*Ez(&6|J9yyvXYyxR4GTPHHOuYXOoVJD%W?%{ zfX6tLX1K{C)4I}0x_;e+C;LF&XN85Sw2`xFbx-1G3NZ9@QRmcf(;c$+?B%aErG53T zbD=i8Eh0>3Fu^xh%Ak4SnrVJAsffXVO!F&V6xGuMAvM(B&c*n2{moEn4>I^P(%)28SBjnz$dl^gdrExqAGn4YWwegcPf!?22wvYMI}n)zwYt5qB0~R1c_Cxe|9S#DtN(@H(*H*O zfv}aps07W*i?)!p_J5o8JeBZh|F?;B1uNGK_Svx_Ak+UfY9&to%3Qx8rpkfi`}aW& z4^OA~QN=?Q<(6u^q1i(h6Q8Mz@<4zm@cINec18gQRln_iQ$D;%;1bshvZcn&wx$Pp zLN>m<+1U>-s_iImU0rH@x*d$Td*p>Idp5yuo+>9Boq5&4+;6~%JIh+%_ejti8NBHO z98GW~@9OrmO*_xKt9>!4*%jFZ+pN_mwb5#_*V>rx)*Cm+i@l+~bV#KmVwAa@ z8%VLHA*CydT26B6(Q;Dh=i(RBSEKD4DeXKDwcDrpzMhI4lAMt+QzPV$o7l=6DeP&E z9t`Y}Rac zb5r1PV!iR}GZToU$E;NoGt5WWI0POR4_}@*^0FXXvJEKBdpWfV%DTgJpJzXs36Jd^ zQqC>V`rN{1$L)5a{`5p%>XzCMh%Sqp9oQH7GR$CTyv66Bvw{`UBD_q&M$hi`?Ol1v|XYI{lOY+d0f<=p2S}6<~g4Sa6>U z?5BMF2^LG30p`Z2MVdXt*AV39-0{XlU?ch0yGgqT_K;#H;>b@S|95g~iWa1HXLgQ7 zGNTvRVji%lTL6|6ctwvGdsui)_diB*_UFRGYd2OgxgeekdM#pqoOwi=fw)20$=Q5GlHh?1>!!Ii=!bk90E&@H?W%y%pxaa!YQ27 zn&`+i;D^#y9XJ7A*4gumPK?J8GzY<@#XV$oCc)v*Dyk5*983}Q{92=yWb=ID6iY$M(}Sg zY6VDI{tpN^e&X9H6~^XMrA0&c_WlkNnZtiT#2oopqe(wSe?nIBMd&JgMw)oDu7vo&9%+nu@ zH7SAJ`=iZuOa^5Oc8mmRQh~1;LXIQjWbmV?Z++|Y_c^82wQiVCPB1qB07>)7G3V^o zzk~8v*hJYCqAb``k}U(%(*s?BTI&LZ*4qbnGS-ydej%#}Rop4<)UT`vxz zoYcLyj7CN!3G_VLW>bPMgKOlJRT##zSGj9T0ZL=gQ=}h)lOyd=7Hoj!#88)kaXWk0 zt?efV4nDkkEJKUoOw8!zkHj^tg3Rr{Ltz{? zN6so?H8wv>xTnkzlM3-t)p8DZ`VlJG`o?oVAa-F!-31k5tdWka1!yYzhO5VJ9W!A- zaTSStywpT%dv)&Obf0((#}#ZJ>+4aeTG|=`CNC8GI}MKA)Z6;Yr(PTzdwVm`8%n6d z4RW3&8I*R&%A_^`JBw6R7uKaaKAO_ly2q-|?>mI+-mOA@cz5yu;~fLLg>8w)dZjQC zjG5;6c6G4Pcxu{&(URQ8LZ{c0fJB29?i%*&_ii5&U60nOMomvXiw~~UpZNIHGJbn@ z3;Dngd++k|C-%JN7fzluxRhOzU*z-rg~vn`2PdX)g&QrM8f{^nYjHZS1^%>q-($5= z7QX;01<5xv2PcD*m}R%?hu!KbIS3xxEZL;p#Vpm(V5mJ*utOnz%~IvfGMD+(r0Y!Cmw65A zKB!NDI@zfjYnqgS<@MbhqLz*L^5N*~$BlQV&&exr%Y@!``kEccVzXB1W0+$CB7?da<_3Qe*nRXKwp z9yIlF*wL??`6DT*`h@jbH>@D>orXk$kwE6laGht5WK~<tfhCq!}`D{^x5gL$uV5{#Dvr{c_xwz2LlIxpLjHipyJ~0 z;`zw5Wo>$3bkxO(`#~4Qr+R#NpLZ26wj1Fv9t5j21 z2ymAP80+j4TMoum?3`<0?u|1C{*E1S;ueanZi`1*Z z8)bdD+!bo$1yMH&OFWG{rUs{!B$^ZaE^s0|?+okj1Uz3CwyD`wbieLlpU831IigLV zX@%}ytYm&jFR29kxLNkinK6%Ow@rd>ovH8ZHX;OsHPQ|- za4(7+6VE!gD}CaCg_Y_r#O9CP;a!-%5~-ZY534|yKiTXk4+AJA~+ zGIv#;a(B4tr``f%cIQ;-Pr&5$2wHZC3uwqUUl6MOB>P?0jAyXVL(%g7Vdbwc>H1v& zCQ?g|iCTJ5{fXmnDXh+PaQcwC8Fj5)+8gQD`gPUs4P{TIDo1mXs)8Y|jFiK1KrhD!R4;PMwz5WQoy_DxM%$$d$b?pUgjcCGTrI;l(g1BS-1 zg2}$3J5_S!N9ffZxRn{l?{f3e5gO%Acx2~Rw;t;@=+2?X$p=0Xic^YJcMX;u;Op+F zKN_}lMkf^MZ*$RONQEKv6XIlN0p_4SL3tY&8$@IbNF26~I#%aEr(wtT*(=6$iqfG} z6~g)MkpA-~p|Bu`*NJhFz$mxiWeI98{=1d%Y}tmbXm|@eS}logznO!ux$?!Yl4tg( z!SAoazlo1xDb>UkWdk(%DYc!-oqq8S-L@^GXGE8V@b^ny2Y4zUPPKLJR%B(bbDO1f zS*-R&f}mru7s2Ma>}KTd!}{u$#W?F%9#Bho#4rZgTDBy2Ni;vBoz4HHEJFeTR#(V^ z#+(v2YSDmd_d6`v>6isq&%2$Q>YQBvE*hf9b)4ZzR{XPj`4HDtN*Co=NwI1IUZb`V zYy7l(sLsy0ufsrEbxu70)=e z-yPg;->fO@#nE4Rl5_ggsXzKdKBN7b`d3>G&+U=0-(z?Gz(IfFveJ86` ziOc)Lp*0pcDviqWx1X@SN+8H28d+n57`h}FB>k=lpa*`&_d zD1K0T)W$TWb^65kS2+Zw)0C0YnvcYMPpIV^Uyx zG8tTRmE!<8eM98TEC*|OERk5fYfw;7BH4#GF{y%YMPAt7@+8#5api&U`Z24n@`)|iIHt7XKnj&>_UghytbgfCTj8e$o68Z`2Hc2!-ve#0imRC&)-z7simcnj(0kL zmlZe-rQhI?9uXJy)UL(Ez%E?p{aD=c z^s+$4GB;Y_m|}#@+e4w+m$?y(NB1ephvep9;qtWo8yT`5iAJ_e_+dqxbnH9LFUip_ z`5(Mraqr17dg3SWQTVZJOgVF^Ed|;yLCx-ttmI7FaN=6 zvvSe#D5*vsyRSS4IyYh1Fr_bjq|0yz$4|D5Pn$3SlZ6H4o-GDTH;>Pdt4Egj^+7U@mg)v@hqQ4Wd0+cR&0bZ8dTf-Y zVYPJStIfnP_wGdvk;WHu=?URh zBeqI?mm?hom|xU6+OVoU$x_|9evZDJly-3+@4GW;z2(MY zvEf$vXlL!TA^N)6G{gOuXOG`|rgkQm`7=4Doc@u->1Z5p@|O{7+POHMH{84wF8cYa z!is7l142(;-uHMUruatsD(OHPd_>@NE#({`G=6j;!!4zerFvM%&bJ~ctT|NB&XL=} zt>g{bw|(_l65BJk*i+;rXAJ#(lwXXahfB&xm6wyliSlgG(CqP$s~po7=Tiru?x%Ok z6dnn?vE9qvRd>J6es1?}F`j`O6aJCbL}vK=h=TbGHAbE&UiTUeX18mVms-}3f21{i zIC@z2Oze5QWQoH>j9A}99g|c>c9!OuUDr4#2x{>gVuXWF3*SnTNoGCjnpMF%Cgm&M zN;mzL&`_nD)3>D?Iy8%AhPb@{m}wsR3uBl8ga2TT#UAOZyg{VwU)mXAM7I z@N)WuwL#g~i55q7;q4K-ACd$6h73JS9X*cIuiiVQbND3Qun)sQf_h{d&DmtnUvQFb zvdz=5a63)?c$VF6#4<=Z>^^Gv@{xnj^knsDvk0&n!@g|EB&lF@F1iJ0B!>v&QF-Lr z$>TUkGxAto*=}?sG-L!0?!#rq%4l9~b8!3AR-T00Y}VwKlyp6s>1BN4_P%RpXzBGg z)_U%Bro_g>m-b;7MhhbS?GV(N?yqn3I21${bPKzvt9#%|zl4yvzP?cu|8mGH`PrZp zL;JbH(Jh~@jU9*ao&tfC5`UfIt}%2Mt{hVmlW@a>wJC)C2ju)?{d9BV-1atsud;Hh zcIA4jk;)yiW_aowpCf9N%DWe{vL{|vyeqnBW)-DWViEVy;vQGfgMfz(hs#C@*Tjbf z>YUt@t`UM04B)NlwtAu!8&ppALMyt1t-eV{uLj;rd=%t4S#k8@wRjq($gQB&WH(0D zO78Ml*7iIPx5B<6CqsU$EBT}P<5v$~Rc?F@ja!<8H;I}ibb3P~wCi9spmksY$<*9iFhQ|8WwF=xDmW+8UECF{e zE_uF$cw4={=)!SzfohW%HDzK=e)Yo@+kry#hW#5DT7ATg)#*4K`-NP)#hIvHtD@Z# z?e7e!9x`2Q8;|!buXAD6EP0^>QN(Ft``Br!SVRKucS{(}I-r{*E-aZvC`YxMuMY0q z=GJTR$Y*GrpuKuTtv39 z%9_dQy;E8Z1J+dg`(U&Vl8J3k`61lsg!841%K^V7-dH*# z#d=l7i7V;0T`gZc6#Sv^i{h*522`X6q^k>KJ#5(5M|5DA6C-$q)#Tz{-ze;5S#7m^ zzYdfYuF-tg@%vv_Al)U)?)OCHE?tJF&U)MBb?=&J$!h&m$ zYaK~Y+r8<2lNCPrK~~L+AtolPKiiEr23IA1Ds)Eupr2gPu@8rWl)kdXUd{AAN0>^p z<~ZNI!JVB@7O^bjnJ-t1&n|7UaeMOeN^oejMfBUOMh5wZU*8$XdXA&MuIxu#Ek*n9 zBC8^!vZZU ze=0=A2!%DPigL*+x=e^&?mL@Sm-*#w*Ai7hgvE=i&qOCPRh#!gW^xrYZ0APVoaJt> zHMa>pp|CDgsO<8|$dth|J?i=gl1q=`8v~o(KMbw8a!g_DtpZfsJ-6EXB3}8!R%Q%L zv!GJ*#N1Ph#35q?rL!L-&ZkA#Wjdad{62OtW?X;u(^ZC1Qv|fIvbVo>jie6llSzDE z7B1O#nr0`yKB)g_2CXmOx|Kb3%kA#l5m&d*vJl2+ax%u`cxNxBYV34*X+(yXRkD$- z8-28=XK+dupSAGAlg+18d9?#mo+B7dnyHU7Bduv@t=|v=8)~I5beC(S>0@CAbA?Y^ zhHAXFGE7HC&WZ*cyBZ@~$a+S2ldIBwMm%o+ES@CQCrg`!xYp&O!ELBvQNWz2KUU(g zvOHP5&h2nDLXzL#0} znyaXmH<`iNU!v{Ld^~i;1iBTq+KO!pgG;Bxw5B98xpithKxA6;26Kpqe!Hev>~hrq z_>8aiHHK*^*b%hAl3Q)j6NF;tqa}$lyPH^?obb^l-V!G3<`V;Iqz~VW*0Rp3@ZQU( zFLQaF`;x-mU(SjfW-`T3`y(B3N4ohv#$NFFUC4U7kM>fN_=_7>h05lZRfi&Fr=N4= zK&?+e=33pz`I(swu7xM=V_g8e1@`jAw1MN^9(DmT6NQo#S%y~$IT;LQzHWEOU|!2$P~Dxz%Hd;rqX zB596R-^NW;sI2z0y2GJnb*|$SdsA1|uRE^6z>4t@iEVZJrSnQ2=PMR*a5sLj-??-0 z+UcVhtX`(Gf!3|-SxPrwvR`A%xth6e(NLu!=147-E7m*zlPRltLHJu4f!381-~6ud z1tZE^sPl?ew8S7;>X77DX#}A9;QPWC%9x+tl)e)wx$00@7J|j$4!T7gUB2b5-k5kn z?9(Jlh_G|}N-Iqb*2+)9xw{0Dws+JWA#Zp3aIEid&^+!}y5X4J#$XoAM9X(?LgH-6 z0_S0h^eD;bOIh)*L|gclr=9eS!(f!%sBNC#g1u{wq{rapQG55c1A<6vvY3(Qg%uH( zEA%>L6+X&=U96b{n@6+tdGZAx*Vid8)u~+7$R6^Y@jgK?s=-hR+5?@;xJ0)E3~gl7 zH(1ngax`y15nje&W`zvHuoszqG~c0wj<;G}>nT5AmPiOw=o(x7-1jI_r#ynXk*^>; z0j}MVUHgPi%kI&ov*TvXp`|4XTV}h;v!TPr%_Cgv;xD^4$Tz7oZS47CCzhPrkE?#p zy79%*$M(}@RsGNBiw`Z4t`a(3@Y3CNcju7s0B1X0jf-d9#5=^7-zZ)X>s!Ki{ok&y5Z_*P%!gk#6CHK6>romn&DUa`P!V5@|xiJ*E5sGiAe%zp55KQ?oj{e(P#j#sO!AiEoXYCN%X9I1MJv zUi6Z#XSO_LL?+XwPq#Ky+E1Bn`yk7j7jKp~2Z6OtbWWbDt6UMMaeG&*mv*w$Z`dLh zr)Pa<{cQCOzw=E^ybl_gtNX95ZXu5&psO@~x_X}`;L5C#IDm(Tc2wkME)9#u1n~ zfgTR%cd2sy0m3f8cQ!|obYxCfEQN&a!nJ_y+iu)0X>>xJTmV`?{@-oEr<+ru-AFrI z&h;M&#Sh4`jb35rc z>SyFJdQBQ&M7syF{YQU5=8%P(oPbuKa|YZNN7~K*-4b{dxWDPH`3dF>T@{0ZxTBPR z7Eu38+F_~Uuj=`?Mv{P4<2G!22UA~yA>1m5{z9VO>uVNG!Y$J8Kj6}t#6a^~RVXi* zetr_RYf8DzLY#UwX=!?r#0#@H>xLJfzWB1 zKGV9KKpE^o&Yt~)gEr722~OfslV4mw1^ug5Kvnx6R3TS|q4>1sn#MU~k;~toX#Uow zl3ap@>_1x#zHj;M`()0OGwS4$3}nakKRT`R??e&dXAee#9=rb`S>Ne|`^t_=dIFLDw2$u*!OXPHI? z{rxIXi&ObM2VL0;w7L5y_J-}=z;BD(b<+lvfPj2IAP)Fs$}lL6+YLzZAnB*v=Tys@%JAb@-MtrZG8 zyx+kBQ?&V8I~}$tz%>3>NqZxOh33zXt$~Er04s_3k(fVZEchV1$nUT3QFVB)9Rfew z0lwxX{^M)RKfZ>5`Hh%HK9~z#+1;CD{(g26!jX?qe4e`fltyIrg9PN?sB+K1;3nUt zywLljiwLR`nnWs-UwsYOEYgAboz33B==U!+vq0uFU;X|*beyXfI?MZ~bjgc!ph)!q z`1}uE34@{}{^&6Uc8tG^=R%C-$c5Y-%YY&ap70Nv&6?NY=v^>P7 z-`?G-W2nB&#!dh15V~(RhK$_p;8# z5isR*EKzpB#BT}Qa+FT*i^gfv3=lG}VSwXRXjbk6Yu;0hoyFRs;VxVm?AMNP^&cDE zw=t~TZ-O$#pRB+c#Tco3ogSGN8_X)~3sSql6m}dQ^WAPatM=368&Q+3lK8n;{w{mx z3bWhjUUW0! zt(V>_UneD*+j=xLf#czt11o%lYc=G{#C!2pJHJdj`1tF4efENOQRCmXyJ`d)871_1 z_3`M7lS|Z50VK7F+>>zWiS~);R>O7$ynT2=c0pU5Wt?46`7ZODw$AFbr(RhvMLo46fAhPQ1x4WrvtDrU~ z_gsCYcEpuekF<_zmW4%3ywxyGUFo4yS0d}5PYhH%_6v~eKmI>fDTTPM`4WJgA=D3n#~J;RhR=Uv3S3J-*xTX-v~VG9 zP@TqQhF4d%;%S6eivlRTBuN5lwdFRxA?PgL3x#@EQW*Q)=CMIAzg9?Z&O0OY{ma$5 zSHtKc9=(()Iai6w{fH14#{?ijG$e#rPr2jRY&pBx*p(c6dtqn+S2;^gf>5U6g(Cfc z3rE%X5G@6HT6SYmMCtzr4dgJ|&DZ{=pWerSF~E#M8}AkT`GAJ#&5Q;CQ`oEc1C;V* z=mx2PNCk`Y2K%fxGqp6tb+E>Eul06N#O08UM8Q<4yE|<6K_1v%=742D`EK0GAhMr8 zc?Z@+u7n>DlOvR%H0Wp{uzJIj1yDjiAW!2mNuBHOgE33nJ9M@%r&>-`gjVY>@RP z;WnfA9k5LGH*7MPIuM9`$b}8$thP?D3u#9Z3oC zD-T9a%t6~Na*ew%llm9{+5#g=v*0hc^4}irpY<40Tiz9LMTSm;&2hsP;I=70fFw4| z$2ky4-OvT_Lx-2*s123uzlK z3Zf~BhA?tDa@IF143t~m*aMOIaPMzVlAI4L&wQf{-mdd zKOo<4(~wVmN`~SWCPavvoQ*nPz+w=A*`f#XsA!u(LwX9lgep|l82UqgK%RhILefss z^ka=(0|Zz+lC=SU9$^-6n&gbTww(A)P}lcc$l3R{7l2XdS_`F;2l)Sm1Gyz3=Hj;K zIf;B1XTM^>S&eDdMUzZ^+OWaX1DzBVA$v9afFxOF4XaSD5&Owr7>X~ujVkcLMP#*QS(4fi7%}O9L!|qsVj{m0$<_MkkK^ zgp(-$7P&*e0Wf!$z-BnnG_nRnT|Jf$hMVFKh)Vwp%yT1>!@A76+$B7KBbvYbH4Z<; zzY9v6rP3i$fiZQEbCrh7*lYHmGUgwEbJ;vN{XpN!-xam96f<=sfw+4+^aV8A&@J<< z@5o77&Q8q6t?(4!uGScnjw}JrN4%Dx9bB&{!Jtj%>A=#K4u*q;Zl1f!tgiC0^;JMiNN=UsJx50@{@#bs+W`_j`;ljr_qHMnmPLn_a#`X&%Q1S_ZK>uAb zjb_j4V<-bmWhV$61xfDhpw&Mhy9V%^4m*%m%2Ycg(6IG?E#_mdegKM8rs3lKU0 z^`y?04@kPWM9~e+O~7>+_=N8hHj&SD4dhMM&L-GV?_ZVq5kY{ z;+6U9$b>BhrV41}qzxgG0(C(Vr#OM^*v)aS3GJanSEpA4iap}>9s2-s*2 zvUrpzDsT~6i`wkvq>r8x)Fi-W@4LrQu5Uv(7oqR7JfOX5d&|SMY!p5C!gPjG;lIv+ z;wNUvzX1)Dm;kPUC=p%nm>Sxkyugztk&^+MBmm}#z-*+0Nl`a$a~X^$`g>^uk>`aIJ9t!+_V$AQ)*YtCC{SwhQ zr_0x79D`xW(C$lEAemTeFCPiGiu+Aw*lOv8Vo;r0`L! zz|BBN_JfpxIW;i@!)umdH@E48%59%dIz_sEoqGRPO*X7AtMk7{cleVZ47V43%ku@B zlfKD*fq2y~+fJFofd*?7%e0Gk?SH#;XQ z;j|ar8C_7k>SOdQMY>;Qu3b63EWpw7A3*3Ut@}h|tya_Stm;sbCIUznb~$iHtOO;- zEe%^m_o3#Y(dFZ|^R&+%=9QNONpS`Bj`||m3phDMo+tMqL{C1a2|}y%IoCL3MAvJ* zrbau_@X5W*ns2^#|MAzQD+i19i2{GZ)o1<cOrG zoas_UO2e}i!Oex&yK+#Cz?r3p2&pH=RGOEE4QkHqz#X{dzr!G@!*!pfk_fGz25h5Z zBeMyHa#GseXx0k*dp`G!Ud1)Nucu{GKG}cf{$Cp9HW7RjZrw5k= zO%AhNYLkqSalk^gHK_GVdPp2tp{aL$jtaz&{jQ8sP-{11!G7Jx&26y;f}vGsWy0)n zx4Xv8=4t+BZs!cx6+llRN2kDb(T-C!2JM$N5#T(`ueAD(ju4CU(%?gv^-L9_16lc= zn}=I^I*1mrmU(^ZNRWxXLLJtQkSC`~bjG_M=A@m)e{hMWrm}abxIH_-v#u$iJhxB( zL|M7}!6XfO_OIs(mDPPwUu0*IGq|524^*(=91!>Z56SYFDok^Nr;eJ!wXk!cgK9GN zfs^0jtdwPVEzd`*DKJMKyuf4s$ef_!OObI8Y}*)^m%KYq+{!%}Djc0UtjUmdN-bTE zv_PMyU@Cf?aohkEWz=jejH_$$Mdeg>A0d&d{*dO~Tb%3bzu25c7@UxU<3#*7*qN^9 zYiN(m=mN`WjtOmQtC?#jKH_f{JseKHFg1Mk%qu$cn)6uI`MVRoLl~MY*+;Q#N~@$% zGxa1Pim(v-ihQeE$w3PPF3$21&Y67#BwWH(yPIoeD0^_>h;)kSp^3CxGWGrEVb1eqI$p~DSbQM)Rm^COZh7u^N4 zV(Z!Ut=*!0gY@#Vq!eC;*N}r0?yIS3FNsy2E{c%81L|eM?v0~Af?lZn_6)32TehO` zEF?}lFXuA|QKuYhy=@}GOi|OZ?-%T~N_#IPa=+{SB_vIjr(%oSy zo&CoB=DnL&mb<=yqXQ)g=nnS-%Aiyhm(1UQAFiD+xfHr1nLnQMv(}p^_l*1&AVo zLJxxZs}=Ke5Qiglc5e;2-_^@6C|zy+tU!EZ=v&-d+h2%s#b=O*Xw^Bm#d5`38CR){ zU!)Y3Tjjdv>VP0jxkrrVj{ww4t@%vZqS-JP6NFI$oDxC69@z9^eTn=t9=UQ9MC|z~O;Xfsn~Xc;LbA zx|%Fw&@q{(VcKxrXjs4W)p&%BgKpOm<78i*mr{9$I9^e4=Q?@pDz&DPls>Qaw)va! zmJ`v2g8hurnM17(G7ZI-69;!}quYjLoA@pvzI@bX-qhTS{QXyKkAFq-fSq=a+)C`V zTGa~>M&{F3vlK=ME(fE-WQi{?6_74{%fqVM2gigc&z4W}R_gKFI9&fiX!^8(q*|8s zN^^hu-R4{uk#Z%qhpI&XYyegWFQ`wKpW5K>Z8jsQe;UGh^S?zAfw48ur<#TB1KVTrsJ+j1SsL_wjY! z$J=52L^kK_&31JbL&MlxSYiC;j=-Fp*?c(kO<l^oFpu_&VKHo`}w~%Hd*nTi)I`f`q=Y?GM1%7@a9NA*0ouR2*Hc zWvg}^oBgr#bNLaa@*Z>)Z`F5eU9Mu&p6l29(iYN@@Wi_j|L$Tj z|5a|*XYwCJlE4V7D)ceKaMnv;WpIhesmf z8a~sVzrGKflW8UL`beN&O{qE2D_zy@R?{PfP)9VtPZ;SkteQrkg;HBI@Nr^ScOu8>2Sl#IMiYeCd$t7q-uD9{7z-2$0oe^Bo8U^t zlmR!2l`<+UzymY!c&v3)TWG3nk+gfyN^;<|qAjcW$c>0`^WYwjzy}_FlKul>g=#?a z@1)SRxsYvCt)*cXuas&uY&u6pm>orA4hLF5L7M|k0uU&E*_kwSu#RJL!JX)L-(=RcLv^VoJThn^oo2`UR;TD z9Vcfzb}EK=i0vN3PmvQhkCNo%dbh#lXr2U5PhvMt!w@%L-!&#HblyGI^G5Nz;Gh)l zBFpwG1Ch*qQ1n(t&K#p2i>A-n*d=r;{7$*wx{jy0&#vT>on^3Ep>W;Ml-oHrudy^P z+xgZsCVs2RvcM*#sz<|1U1YMRoBx&@K@IYN1GV38Ay{`J7KnFjFm=$eayaG0kC;NgwF}(lIT~Pe@4Oa2I;`N!L}0hfTaOLwRS{&s^Ir zNzykdJnv$LraG*C^6BmtDpdAyESm;j_u_VBKT)GH$tu0z+nXVy!J%8n8$WxTKVzh} z=CQ=Zb67#~rSEc@ec28%lIkX1xk;R}r#LbnRz(<5F<~4M&#q|S%5M0ET-j6xcN|hx zTW_tT#Sf(*MZp~%+;Lq}t(~~Cg_<$d+!ZG&;*Zpx5V#W!>yaPj(Ulf#9J%EfjKw02;&{SyX z`>{96qczV;m0h@9m?e;xf<79(Gl?E;@7aQj477|3$H=|gBHo}?w^URZ#YP@^aGcF0 zGVa5*=`8oKF{bKV#)WOew(OKDw~XOS9$*oRkvesnCfow@IJ3hwH*OJQVS5k!`YX zA?sx9`!a?Z%NS6OxIxz&(=l1QyH{vEzwN7-mGslx^7~qXqYQpZEhL104-p$GG3n?f zS0g=a7{PJna$e8Hms;dwp>G-Fk)7e%lw5Ypu^u-oNBnkzE>(ucXBXM^c8~q(4qJM@ zTl>5s;9R_3BKZmBhEI=5bC`4I$#w|A@ns-I*X(@0t!Y{Sx7@mn@kB;ONxbtJ2j8PaT@5SYLGRKiZlE*A~^`E1dTJ&+UmxOT&GqB_ABG$E-TiEYlI^0ugZO2 z-df+3Pp2mOLOq79bbQP^!pKIH3v4;lH_#)Qf((%@D9tAiv)n;+T@gwuH9uN>>ialu zOF8N=bZvNKpi*4+tH+Nqlci4d6?8h<*JDE*-XQ14zYcEu?hntLnWtJ#qkz9f7c&HF zPj?x@PV(xrrD~y1saM&;9lD!maqwBxXLezBGf%yp{OX#cqN}+&))FY^vDda_4~jP1 zgnmdu?oDD(Tx&zmVmAErA* z-G?(0{hPI86wCWf#^d~3Az%@eza!}3&emL&NP=U;kuLZwnyL}2Kh!Wm%ReoTaK80y zyA32+ag2cMabr~VXwDk;m~iVUWy@ivG2{IxJ5EVo@hyBtRy`LS(x9nUu7?s4AJ>W`bo=K8y1D~6?px+jl+d>4zmbt3yBC<0Ic^Jd%&6X+O?UElKeq3h9Wpy4t zyuvlG0;gBrh~)%G2^*`MZP?{mUdor*2*AA+D9N zx|=40t-l1H;d}yL5r_U_;>&<8uM&f%+?~XJzBvGB2nhyOUUrNc&@C+1*UnS5s|?&Fa*7{L+Y%<%AKc9KRL zMtR;M#8^g~en{MoR#=`V`Y+!vX0(^dlTP(ylFM4!wS?B^?Y?HEfL%V?AaFB5H|zPi z58*b`ub_l7VfM?Gdt(-FV5o+SgS zil0Eliep+YLA`O}EreU_SM-#4nd#epX~P-Ofs z#5(a0%sl+5Pp}i*`y_^l8^PoG@PO zY!?3`+Fp1jw(b|xQ4G;2V0GEiX*g`2dVe|_4@7GAA<)Pzri0;*FVf*NmtBF6Y1qZ* zeD(~&R=?mrbbh9Gi(&V00=~zKb#g=@^6?S-%=DR87A?bL*2^;8UGseU+KY_cLR+(c zR_Urv2rY?0iwut@myyF+7;p(FQ3y^l_RqWyjL>0y+7SrN#9KoCCI44)68PEafBKn^ zZ4Vyl@B=eX?gJng&E0GmxKgQYIzYO9iv*B1v&f!n;QRdl{2mYevw0Txw9l+ovGZEb z#U0(9gyvWB+}_yYoBd^OW04dq#*IM?QHs7F`}y{vSieXA9$ep(+Z7-KZ`~$pGFn&= z^h*w#3;j$cHUA2gH~yzr8Du17yS?w>+eg^KB2tZY?H7N$qGJm452KG&=3z6O=|q+Z^pLaLUvOHGdS<*87Lx(Y%c5 z{U>go#fI9Cvur?zR>7^tw`QWB;gHT$Y)wx!AkfEAH*yJm>6JS1TbIYBI}Z9atl!t< zu50qQHo5QYCG|%2=tAv@!HZ=oS0@?O&QI8*>RjAos)O~9rr#_Nb;L%Di)k|!&1Qw3 zPnD>$(2fRAj0aak9i1k1-HZoi^F>s|sfKf)EGCn_Y{SvGf>^v@b3q&bo^x(VDYfA# zVF$X84g{RGw;X>v4y&g4=Cx(%xE=thGCyfKxhY@ui+_Z+Dq<@ZX~eC((`c;Q))0r+ ze;|@xHv+~kI}$TlgGP4MzPjPgH@y1QqQm;MqURorch&yA6-~agZ03?)Md*D7-}u6~IJsX7sXn*Pg*w*+xup z3n8pdc6jnN97>XWVD)25Y|i%>*dn%frPr6WxFa{U!D;`zjHVNJwfGa% zOQewKH6`X9fzXSb$_0HlDvC(_=_s{{BojGnjl&WJ1*!uMeyx+zTygfmlM|md!=$(3 z_VS%B=VTBf*u5vu+>6EX4c!^#Vt_Ob{UgGv+^q5fuk{Bt!K`OFOd}RA4tzwq>@M@+ z-dfg$O8Lm_Mbq(P?)x$we4bnC&gN~!u4~HW1vs~UROx%FyzRZ@`AqNN1*vZ}$DBk! z*21YPk7B1-QsMHLIoQSbZ_Kqv zOpT^ZQX>5aMwG}M7}bX+@wGps{z$ud(zAe2pkaQxQyxo(69?Y$_}8Es{2g#z?Ytnz zM6kJWdF)k>(Y>9AYlc)C`Z`Tp^kO!2!47&K!@i!y5LZxNti2ShD|P`Rc%mp1NB$)-MU1n8$+2-h^!%>D+*)hyri5qSH)I4E0{$iS2#9w z1S`}JBz>H)Rh%m8ZqR`)^X4D*I%Ene9BJ_gB{(ddkU$Szn!fwa+R0Qv^Pu{kZf@IB z+u*G#~nD%D=3^v*lIstc!&6v!Db()fcKSZ z$k$qlyNxtn*Co{6T34UxL~axqOxKrc&M59LxF#2$NjvyMw&P0veTsX2$I(cuZz*_l ztH7g|v`f^Fis&?7YAhB|h~qZi)==BWzM-FeCV%Q(mCc|`SmDzdX z%#2xe4_Mv~RTlckQp7Qeq{Ja(q78cLkkWJ9gDp;;C^4=Kr@q7Y74pi*?lGZbO3SZ^ zROahUkP~yNi`yn7JjRxE7Rye1+U5!8*LO>cX`4Pqu@6`q>=P(7Ds}ynz5Pbrs7hY< zXZ?zZ!ckD`}#^tV`#sT@IsXAy}EEN5yfE}ygCO966ydyx}hRiXE~STN?gBPO57ly ztn+2#e0tW((Tl{B(UH{i+N5=jsMf*yG3s#(QK5n45L+2cJDV)vO+fXTycNcF)T6@! z$%qRmZSLd8=f&ejsBcGx$m1aZd!esv0J1aXqUfgv0-|}8nXwc4ZwTie8%@rkh4!+c zLoaN;6tk=^(Nt>MTQhk=bM6MqlQ0LQ#Gd`<*{kkN@LEeU=2rUZcm}HOjCE(}@Avrt z%4g3ZI${!7%e)_4n#s1VYV&guMid&{vo_&Jhlwy@w8CIrz>bOP2Edr`Fx-)Vgq$3Q*4TQIMvp7mz3QZQ1(#Hb z(1TOY97nP~vz{Wh)Q5}KRkk}MXufdwb<8p}Cq(ILU-(>NVJ@5N!ZKR=^x)YCf8>6$ zP1j7LL4G`!VA1Hw3*fb>OSk9jTaq_LI-nm!c-tml;R<}_`6^CEvbp`mHzxT4mF7?; z;^Bb(Ue8#LI6)_E@Xwm@do?faw8r|HybP-^Un`sRb=MHedi#D~Z`s9KZYz5w{huHJ z=pa6}t#V>h{NjCi0!@$NH@gPw=Fajpb}ZdB_4D2z*%wcZnJv*1*MAVwM{ikb1`fdwj`&a>xaTqq~n z6nUrVvg2=x_AB5T(R*t9d#qFjTf>6=n`I3>(OVJT_OypYL<5dL`}636Wp%7l<5P(l z&*CQtgG`Z+k{x#AW$< z+be0e3FIMwCQPN2FlsP4eJJpMSFk3iI$D#*)Og|dF=hyJsHg8}+IWZ2K*ypc=apt( zO-G-fJLdw#{KP^R{gSR)m4DTh?)srp+S@qQHE4A4$k#`Q_6(JD-MMTwlF6QUyo%@~ z_t9OpM{c~-!dgEI6z{5#C->dqwvC^9G2(3yC;gpE?1g7^#O6c`<2txAxtlW>xC-oj z?0JCy`hv%tzAl^7_u3;CWGvo}Hw#VYuAs+cj>XM%`w@395Q4^!6P%5%_m=6Za)jcz zH3s8mUa_NJ`>i3X&YbhYqeX>gNF1L0d$DE5PW@4fo$0BDs%A*9E(MsfTI0oScJQev&BlX@0)P;(J>3_k1>CAE3f%P>AzVTUsD)@ zCwJTd{`Vbx_s%wSXRq1CLQ=Svn#w7F&}pu_KOd5otQZCEuK%w52uIJqV}8OWeGDWE5ZiypU>&_a*P;)q$w5JqNnnx0-QDl%H=VCQj}3Vhg8OIJXK-NV7h2 zRKMQ?ZDIVN;pfEp-}`fvY(gnpz!WQ31K6OGW@5Rb7DkJ{XDrn1ZAqun4J_0ZG9x#& z1NxJXMg1JP=o24uS$Evd=Cd7@@c|7h;|zTACDbCdzxRq8NExB$icQ6~lHqF-7`h*n z5ehbo4c-d5wFCTGUwF1uQJk8|JGl9r?CksZG*c51Nl`U){agaZp?m-6i28n=?CRpe zKVT($qV$+%b2 z3CItaE0}^6Z zQOZ(fK)>qoSdSpv{_nX0&1TE^twhD$`E>dy&|ZHA>n{Ue5co=!+Gf=Ua{>1!ZFtOq z10g{c`pZpnOVn+M7Po>h=imhz4m0QP1R-3RJYwf%Dsihd&lpjH+JAzTv{Oi1+=7;7 zz9uMm7dqINnRKt5n7Aaw5BZ60=YiAS6kza^V8s-HVH>h@2XJ^9n(7b~-W&idlYKK~Jrga6A!Z z<*$2U`cUhQ{(XQ)$iee485me1X^_jYoACxz*#s!fJ8*TtzqTmV+qXa6uWXZww&%Y1 zP*aX0w%BpSuGbV(ihV8hysZCTn31o%{v|E4JGYH@ogY~xdzu_gOuczqEqi5uHve_^ z{rhMEX=d#<9KV>N?g3G4W4o5ocvq#l~RVOKVQ)!>W5D#N14r z<0^Y$s`Gto0lfiip`-JU0tBt^cI^IRf}TC2@-r=YRzl(KJv{5(?Wn< z0szO>8eh=E;P?)+p#z+>3F5-;NJ68{;@<=e$?wS5;`ug^s)#C>u%q9m>L)nPuKtLu zfvn~GC=*jPvipquF6pF|13u)FE4hOt943&j9`beV+UNSBafTq+TWe4S7N#o(v?}q7 z>C_gXA}ri{AmF=;Qh=mwXoh0Xcv0+_zqMBg*=q@2{1BS$fI zHdk7c00qd}ReiO7oV1w*sK*>0J;)KxC}7>$hST|ud40V2ge13QlwcaB zWz}gtvc1Gr8DoE?w7`Eq)PD7lT)#XRy^_1P$BQssWNPWvB9n)j$Ha=p zB&2Grk+(q#@ISonz7`laY*@p5)$?xIqUW3gTdSKAs&+iR*n&ey_4-=NXX@s93OpA` zI-r%RKA+wd>tI)4P7vDRzTK#u6Bac#9{P2YhtX5Z;iWuzFzx0R~oumc4HtQe*F|-$Y6S^T=5Zb^{|B1nm zjsIf0roBe0zybSDGoOXtE?cUDCiuN@bQLmeSH z$pwgxod<@f3%5=931wU6I|NWg8L?mU)Xj068eeE2`Cc1WH_10N8G>`HTW>BFaJUuT z3blhV#A@Ygc9GPXbdNZI7mYxdqP7p#F$lhLYfHx2*-bSxOshLyJ4RTpO0@`vyHOu? zP@E&}`a9%^)V{NDcz@^Wv3B<^S?XIVQPKr?R63~HN5d*FUzvQc< z{1-6Gf}D(Q8(k%XWa*?s#P+S#lm+iqUZdnA!r2=rnw;h<=#uC!rWXwWYMl!u_KuyD zPnpro_7<}99KvGx6zuZ-9$HJoTXP{)4AmHm4(~WW`Gs4p=0R-v5Dk&rSxEdgIq+D1 z*xqq|!5>KT?!d9r=XGy3%S4mi3GV>Vp(3}FL~VGRG>CiO0viQgal!jZ z09Q9Y8lT2AgSc zjsG6m+V`IYZaV&1oyG>K&g30DM5qh=MNJ4 zIwY#ZXu;e^#1%yMz3{<)F}|?Xr2F5Z>rfh~EB_5Re0z`S22f{QwgBthkTcBx&xEG| z=l_UnsEpE*9VAsB&zEJk_;K>px8r(Bq&vqx3@*Cak;M}od2f~}a#)3*>$lLYyu$SD z?CU24)7$!EHd@Up0P?#wLkcJ!j5|HzFjyYg^-?s5kKalH>)QM+J7hmEcT3luPkWEc zSfgLNUvO7Z^5&l>^ay{Ctn&=w_q$6RKhNyl+RDWDHD70#OODG_{Pp_D2N8FrHTcdb zC980&(Zqb1K=juC)jv=)WjsiBm5&lYZSZLnnbsPV$SkZ1Il~cZ{y>w7tZ3W$Jm!W$ zyu^xxP_#$gxvrk*El_GRaB^#T3-vfair9>vmI^sV8B!LulS2j*x?=bu13T4DcRyVs z%y-VRN=V0832d#4XBz~;AsQN zpa97d78F)$0wGjEe}5{q>+~m&BIydhm}-GC#jal8v?PWg&msX6ZPo9FF~glLc7W#N zxBKe<#jn9+LgsF37+{1o8PInz^Wd0V4cZ+=g*it3He>X!f&FeBIXsBm7-!H?zguq( zV!uK4>0l%Rz!(eo-Vp9G1QJYBC*hPFNeD45-AxBK?~_YuW4WqOPx*25Hg5Y~W&11-=Dx^uwq zEeXQ7E)RfbZP1O!s6Mr6tK-SK&CuNsBmG=5b0*ZDK__L4FCa#rC9{mtX}s75{u(OumXU4%dUdq5CUW zhkO^_Ts~b#Qz)+@%mg_6kLL5zZBoq(t~E(l#?mhNAc={K+uf0Fe-Xo--o|xpqw@Lq zpgOuu`js(S=&%bzb9NcBCb|gLejR|0LRT1^@HTN02tq2~4N@^%02H+2Nu!SZx$lOE zGmG;KbFlNj$-s;qHYQS)R*UBks%dN3PMci}Knku)x1>ST^XU2v6#^7rjoI06vh#XO zu@7P8`|39>ASimzdk`c4)reB6Lr(tASc|HaCRMdG7nT?=Hw>?)p#WD$I)QU}*Qzqe zq@*&g?bw0PzQfq1d!(*p4duj7@Jb*vq-df%V0U8`4l(wEN1t)?fbOB;bi{QziI8w# zCsA>qTwqydZ(p74-sXA^91RF?LbCUE10TbF%;pw^@oHel8%T++Waw;A3w`a+CM^Tp zX8s~}vtx&;2Mk=k=P%Avk~6MKdkwigv(iA<(mkMZG#B7RSOGx7J%9(qEjci2?10<~ zF2k40cBDX~V@cT73LuotfSlVER(Re0NP%fl`mP|&iI#EcU6e|0GOAy@ zP~h@5vwV=&{k>{@n-c}cl>|N&L7~lDR`|oh5ebD`o-`n+<4Bc$^CCh%bZWXz;)RPOm%j zz*cGsh?idVwJwBq`ElE$kPb024Tzcu`~@?a)%3Ni3#-gkV)?#8j@ zD6^V;iZ~>}^nWo2=|^^F2o$E>E#1{Opp$p)k?Zv{bOZ1((KMl2nlM*3?CWjyH8s^w z>5D%|n!RYdF4YVpktpgQoV4j#LO1P=6b4_7+&PIFAd&Xnf~>6q#`&!|0d8Yqy|sG+ z|2cV+mDw_ANjCMWTL5J4*7;`!#;6i!1`wTsYc^X9H4y8EgV7HHPXT6z_Yk#gC+6?( zL1L8Oic|2ENP<4icl58hW?M;<3vbV$yBSPNKgfl~tB#O(Q{++s91o6)AA>2_+6@@_ zBp^3CR3D0B4niS`fI5FO1CTC3Z^8a{*NoGNK!$n$$eVooMqvEd#;Uth7c`676DL0g zBkH&LEJicb%|RX_aC?w}xy{-rA@qx>qpbV4hV)5Q!2aH~6V|Mhy}R_bbbnnl^xgeQ zN7*gHnriUnM)>2~#!im1tFzHuV4(n(m}y4WYR8L%UoG={##E30}A^+ zAnfm^YvGrJv9o$GR&k6kA#=h@?vhQ9aAzNyc6vQSe{&6-*Y1o`KqA%nb+CzbjYQUGZ!R43f)TD}E+${0Ts;&%WV`fXY%1 zbu99|-;NkhTt53C5Oy7Vm9+2MAmk)X+6r4=iL1!KTG5K$Fa7R9J!Lw^Md*r~euLjVpF9YUH+VX^-7@kG zZ-XIu^fk%D_R`Io?EPnX3IN z89C_^q9mW5mHxHrCb9t=J2l5tck732iC8z+{ezS+DZylQ;F1;?1DpPNSN6Yp3*Qz= z3UZqrhEPleo1mKZs`Hl3PR@{Tvt3x%Ha@;nM|Jh|RO@dfu zL^OL-ieg>B6-3*m*?SZXS%8&%q{lG6oMmj?>0v&>B&{X-br-0C;`D=&1~Qv#%rt`U z^la$(O1S73^=tcl}jo~q{0Ec zBHQ$z4l)$&$&H12N_gsrBR@a>h^2C#=MXlMe)Wq%H z=fzE@r$=fzE2^pbjr#{SF8E!|?nrgy^f&<~Y7c90Eth+pv;XUR!2|p^kBBIUxuY_m zlWZg@`k_)JjSch;k`WL?z2X>wI5P&bFi@frE^?|~>7^NDb&xp5@CKwk{c^gNTlJiF zw`yMX1jh-9eoq!!3p{_yc~3tr9dUO0*Z@FEL3oB}s!mqAq|hZBu?k{WdKNz6wE6JK zNuAa&tUNL14WG5Nf}QHzM91#j#STG;Zz+lY=n*=3nxp_mg!*el0Nm2j&}n5|MoFC3 zXLi65T0)$j5A#Vga7$~41>%p@Raywrzdue_D>sy`dNa2_`SJvewRBYH4yik6BTS!f z$#f?j6+(048y#^S((!%_ExjS+Yk4Zf+VVoJ7pT-YgG-mEzx~B@82J{Y@=J$Lq`(+j zsf?3SU=x1$YJ*CMe_&|Q0He{m!`dwM7U&DFl{KdvtsQhaIuF(Pcw};fJ7s>mS5u87 zWMAxkd0PAW_K~v@A~weFPI$!Ut6uSnZK&T_Mh7VMZ91Ox&#=Vd6XXW+SBKI*wm;Hx zvFRG@k{dqb_aQP>>dnyy@5~0F#J9UUu=Dp0KqpV_?hx>6{<%Zul77(>s}YQISi(lG zwQoz0m*+KWeT&k>=94TJl()}PWh{neyPd(YTpcmxIaVa~@hq<@Szm$JJvCP|0RUmN z%SEu$LA}G;y?YnR4TjTa8+5J;<&zylUdJ8&_A=4FLH*6hJIziqmL{-k?0#l|gXlNk zA}D|O{U%ZIf05{?Xs-r6;`}R+(e;S?9O+m@q%ggyvHiZpp{Ny`esAs6%TAYHe8Y6C zb3BP&FDocQ(Jl_kNk}?fAHU}9L{l0&M`AO5FSEGnDVZrZTNnA`RmSw;4C^+?Co70A z)CyiNI0XX<7h#n1;EalLJgrHA!aw6HyGrbG7xCHnW4tyay0&hYUAYA0Zf`vhkesU* zVdJoU7adjSHk*im+;JQ~QO)O;LsMt8_B=tAQTY^_$4oxi8J3z~Jfr9tG7QhRzrViU z7uI(5+Qnm@izM+z0`Zj;d*{??&bQAAA|00f&)Lgf`Vmyc(J~|3x84{l$oI4}jRrm7 zy>_l-gOw)057ddz)0iB)>T=fbt~^=Vn*Si>JG(KG4QyF$xF!+Md=QmgJ6|Jtg@fK8x{$PNBp4AB5+ z=R!Sqzpk6TlcZ@L3RR{{HJN3*yW}m}uvkCyb+nMQ5Q09gi8O0F@#daH8}0ynXWpUH#%q?!xL(zw`aoyr0b%5>)fkEf!-9Lh>gV-;>Bv8FQ6Fn=88DJgev1 zU8_Yv*5FKxdR%yivfd0Yg@wrGoBsIe)%1Pv*@A1wL*Ivv83kYXT841mrd`Idgn_6> z!ZDNPp~bWBT$1rqL90P7AXE}T(JdoDZ7BY13H@f7mh$uZrTr~K0i8~gY$7uV!b|1v zwbT9O;tL4Mjq2aD33lVaDfpT$Sg_jPbOKoG)myM&fKJ|SlJdNe6Sj9?OG*)myF&4e z#4o1anU-XXL!~CaXF`5(MAz7@A=KVQA~Ko9a=fUEf|h2_}N|@2n_qTvY-?*yrD89n_iMW;{RsQ3Moz%Qy(v4 zaq7nD$-J{WmRSflA&=v3SA_kFEjHJ`E*mu;WyFoLF$Cn(XA3nw9(_?3!@fQxICwQSv{4wd!Dig%pH>8p;b^`{|^ zw#^BJd+?0J-1QiKe@W=+xG|v4%m23g@JUV*1Z2C!&~`K4w#n{U%g7{U!itC{`Pg8F zVAB*x43rUme`l$8Y>buF`#Zx;uF-V$4CIpW+I*;4*ws&w*2q9rK}y8dBvoAH1)Q!9zi zHNH-A)jQ2G7p1>M{NW^WOFHru>yCapli!zV)(BOho>FqO4kd_8(R{r@H*_3d?(m?> zHCBE&p(D}*p{;v!wh2C)Mt*#ybUr}Afk!_i?-9RDum#1}Wh(2Uv!B(o@{}CS&iq3fd&kh9pmOl3hevaj&)C?fm@`?76jZ2h%T}96k5P z>3v7{3LSm@;5*B~XKCKpmna%!`mvlZ?7*m(+pRujN7u~9Z$T06ajWk{i*w12xw(xm z<870DA0}OH+pE|hBY&Ol3rGe1U!;-)NChSMm6ADi{$g3!J9{U3GFlhCTpY08R>M)_ zHzt!rC-o(leOl5u@>S(T+V2Lt(0-l(;+5Q)X|3Nyaa~RuD5d9At3NM7E+72ESMU~}=urU2H((!0_du8p6p1G(C?nm;^mfYs%JpR7**_f$9QioAJhSSe`8qR7V z?bWPv#Hmz@4W{F1>u4`y-El7~y(YCpnfK}aC*`fvtW{@4uJMTkQ}w*UEOp3?&fZDt zMJ>^hNVK!zPwF(ExD9rCKBVBwx@ucGN>4eS)fhNvl97afnC{R|mu{2G#xFj{`jdzI zr?YL6#FD$5yV__8m7H|}SFu9;ARru<`&dh?*5r%tePC$QvK%dj%>UT={&4%8r=HP60m7?4hZ)y=(DXp%tKecz zM`<3MQE^I=F~lV&tLEvt6J8m*&w%%>-&z7KH}mYQ`8)K!aJG56u{TsY3Q z(TwM4LLAxoW0gr_`;e&OLq6a~C|ksR^_QCJ32r>)$id+4o#=lu`?VrM-8VdEOXS>@ zBXb1ac5!}H@*K6Raazgfm9xX3*|xP*|D1Wdgl@Kfs zkjmW4$)aq>RK2$%C8ddm{nNh9$Acd~?v-Z|S(uV8aoQK)yZX{|%WRB;Qs@2!pW395 zKP6srFV`u=Kf5{Uv4G2HQ21A7h4aimTbX!TkmN)Xn{+#JsM6N^snF1n=+!ty>SiP> zsM3Dnvjw#1d0o^&sP||Q9bg;UbIau56v-BKrNW#~pEq=+617k@T*!UPrB0@u8;Pi8 z3#0+jxWo|`5jNl8nLa9b<3s;Ynv3+hZ->Bl{F$5|x@#}oV7L4Pv2xJUwDSO+n%{hX z(ES)k`^qLWVOaUS;LXd!bE2W!TW0jsO?^)L%H;6u1WT8rv1R?pK;MkkYNTlz}P+JFU9 zE)`-|@kVM{;<8_hao6H;X;=DE2`9mIirYE=3!|y;(5LpQ5>f6Js?BDpz25TybCi2| z?W`w-udL9$aP5PbMyJqNu3gclF8jI$ihA2CSmz?Nc%$uUUcho-S`ZXNNN`^zT%F>* z0(9L+_6SC}L^I%uXCo+bfNVNZGx{AeO@-+herg>~#m!UJySP_OqyOV!0!4a8q@%B_B zuU9MV(O`S6TT$o+ETQ zNnz`55+yo03GLf|f-x|5Hg@VUrf8#Q_U&=Fch`U@b(CbTSkc)R*O&lPC|r~?o51SO zs*tx0U`oRQz5Hh@N^}s*pcanioYT;`l|Trbj_Hke1=V_99r>)kJStZ`bRTi`!F0eC zD!t@(65ik1&mS|f{Ir8lL;ChJEHFkQH=Lwv?F#AH)oT7i9rWmo%-F(m*K=K)-CI?L z4&!Ce5ZM|z;Iu7%KIC~byJRac%l6X>neJG??;VCyW`J% z`gXRseDBZ-SvAr@ds|bRUbsouzaD77sPD5~*I^a+3+57X??$J)G*NHi*BaQ6LkW-9 z10&iEhch?U0nAaC@E&m$=I z5^%;#oC|$@_e;YYRCUL-BFNCr<4MFwbaC{qkPERr-yik*VeZO<4DtP<`ANzVi zDpd==<2MCe9+khFbdoGt#`%a9k z2c~rgo=w^lAT^wInStKW|`3?ILx*)x}xak^!4aE%7phTQ0ZslXdF9f!d!j-CBT z$1Hsa3dP*g$K9}b_zp?{iTe*Sw5Aa0deZ~)P*QEfQO|2kc4`A+@{GMW~ z-ARYki7i9V@lpig$l8HfEz!6lzS=p`r{xxd!I~|`30+W{0_VwKP6Osw=t{}VyB#fS zADfm3Vj3ycu#JIsV;oqu!y@3SO^pEbkpqVCHFXH_C4PK^ALO++0b3(voWOQI7npMY z_D4f_{ULxQ^VR#)vWf)d)N{xG{S^byFh|ut8)mXYTWSM7e9dCL>)}ZPc1SjKTL3}1 zRyGdY9OD$RUEB2hUVRpLs}GiO0KAm2!91<3GRbG9GbpiA;O;1%b=qSgrEg5+N%p>@ zkM3SMy8#`nQes-`emB0R>IaOyqIz(RbBm|ON6x#u*<~A|3gCD@cflJO$82hNUNED- zfT-Xqcr5@YaOcFh+yECl9)BfbzsC6(fV-ZcX1_S)QI=ChK2tsESG&RWipi^MXGpgk zKL(RR-|Aha#A{2(Ho}~LF=c|gm(@_!(O*n@>j2QeE{X$|Fb{-LCE(r(F{W<8r}zPt+tvV z|24#D!ezLhQSZZmcgN&RPvNf5L@lWOHJH@DdNzHWe^~s#^SWQ8t)u_uSWthsgURC$ zcCRx}#e0@o|JTgx|1X)@ul%DG-}et281 z7lKij5X=a1*yaVU3^S)qFvT8$+9{$ZkZh;zp(Tv6$*O#rc96T@bq3OrW^e$u?Kpx(>1o}2$h(AH@L@}55Bgv=u(8eW zJAv3#0u^t3fFw-8Rno;7%E1Hh4P*BniV;YX5uYhih*ssf9RK;PAYjSzt0C79PuW~i z!KW6|lU2{EIk3}I0%N8LvGi0mz@RE>D9(^CVia~}#{`>|Ftc=2imDZ_o?Fue4T)Td zfScPOxBHq|H53?QCGgAEkWlC}2uM5RzT0eg7PHsY7PhxGw{5=*$-l21QxY2 zt%>Q*HrfFv$MNF%M(S`rkz(hw@@bwxPF+36GH5e{9h!5`9>@&39pzV8Qc{zgp!$8O zLr~1?r=CL_%&fd6$$orIKp%iPfKK~kItOPr-j5lD(T|7^=0hJr-#(@2S#c8EICam2 zo+b>|=$t#VU9oUX?IHCN?N-og7{!i$KkF9Ea((>*Ahg=%>Q7Im6>H7>p~hbAK_+Lk zX9G5pN_|E%^0)Z!0r(9>0lqoQ$#{7uJdWliN(F@QC3I)+Jo!E)PNJy}cI<&lo@+MM z9d+^aet7<7fd`hXO+oRcqFgQQ)^C-50TI{;h&$KQ4KOnYF;SS!#e7_#LC zRey+eYD6KnSK)QYA2AjPFcGGc$ZaRHe9Qw(=wg<|%`vw6KS*&<9LFbHZeUN=4iIi< zb4{}G4zB9Q0NlM}Z?0w97ok2KySb+f3F<0i2Dzh>y@RFsL8u2ufGf0pll3H{19^H? z=cBR=Mr@h^C1_fN9WUNF)3>QV1fOzhY)U0|O_I4Mfk>kote4Dc?u<=^x`k%$AIpaR zpqx>^(jFre3kthi@&6y%-aD%4ZFwBUih_uM(jh1aNVR|{Eh>jDAiWb2=_S%TQ4~yod8Y+;7A z97m~N(adiLaPJTdOc(qEA2(Lb9s^gZIAZMF%&=CQI#9}A$6kN47#hvbioNEuV()yd z9o)4qt8%H^ApL~1Fc5Is{yODZJ2n1d02u+!-u94Wo+7)PfFm8GdTcB8dEB1!&yqbu zaAn)ZMY32r^=7hn^#ut=QyMV~Iv?NLQ{a%xKx~x2loN--E@=%V7)N5YBEJm2DVLf* zJ9e+Sx6^!RGv*pqsT#d5NloJH2dYiH**Il9j>2XYV-3iz8(~!Ynk@PJO=v!jP){XR zlXtck$}B>|@eRv0NI?wt0e{ zcMQDTHea$B0ldI_1@JaI2woI+o@89@!}FP}#6uX!yT(OQN;m@cYHU8m9ui}{g#B37 z$}X-eu+ccelod}P>33XNq?!Rc1|iZ3B6HSnUauccDKPO<*9hvGnuW=UE9i(qH*YLM z2^f$!*QrbNq$&XF9+X^vcQIf*q#JgY?F%Lqu@Mca=w5I|RF8YU%24HX3y8nlkFbSB z8pW$55MkC#ACbgYW|^9oaFXTk-{tTj;aiHaxoWir0|5mWw=@lx{pq|=AkG>P@>aLXtiKNx<9i zDx?d2Hr1l02I!iw-P5N>sNYbFVYmenXOSyOkZh#c9Ra7awzQ)|REpo6D5fQ&cF)ZY zC-@a=xq27o(M}L6DPu6jsYX>E5D3=63fU(>_s<%#0*-_DKzDYWDWb#0XdsP^_Fdf7 zrxO=W_%IPx+-iW{-UlP-%S~my{@`_QSc~hv zE7oSt*?PH~i9@)~u|M-7NH9DIH7fN%?n|W4^zE@8;0h0pC!8aZX%tds@9&w&d0_Rq*o#vw)E4cb4N3|a-8bVjTL8JC^V5Y>$Q*xt$`F|a0w zq<=N0f4U#AQ^xxr+o=e({-Upq&Gb`+p;u!_w<-%NxmsTs;+wX)8SpYUO{GNr z;@OchWwd8g&*kCw4-DS1Vy=!VF7{B*fpf{(PT7NQ3zhC;sPe9em1l^3n$mP2l*$)d zS0w4oOnau8Q-ym?p_RqGSHG%WaPd8dF{gIJSO!KWxtfU&U!0Ar9^1I{7%^XBpD`Tf z;=St30Ir*>H~(IvO8>$P0V-97aPZ3*UeUR{MIWM1&3j+ zR?64S_nF-k>tFo>x6O?K1GY$#wZO>)FK9o!%Rn|#{&>h&B=fX~dCPAM#n9P(OzmX= zYbbK2xxa`L?-8Z3lC7XI^j_9g82xEek_p&5n+VL}j&zgp04HV@jlaIO7-Yphm9_Y_ z58O`OiG5cwKwCKV1n)}?V+HnHG7^ZMvY|)Ag>4d>nhSw9&Ac|2uNnCUz7c$SkWRP- zr|>!(!RlOpr*TCPG6HxL4}@Qp$+Jeb@v84_h(VlA&q7N3K33x!0$d>0XJ%Vy=eMp= zGn}z@(D}0RGFsjt{ONncCVTb4bQNU>>XuJ%->W!sN`noxu7_MS0IVES8H$(AoU=2P zeRl>=%q;}ow~gnJ)ur6z1UODq`!=`xqWyO141DzIjC1`nzxyvO)qu{2`gsUgdX=9t zinW}Wv-T45&*DnbCgr>>V;!wmxXtT^1q>0@prY{@; z_G)K45>17c1O$MKpkS7g+9)LQg)#iJfg)nCBrp~_UlIiSP(&_Udl+n1y$P*(gIzr+ zcPn)4N7??OC(t}>%Nkea(9a+hWnZB5WSfn~n)iM|V@p2jH!=}b=8et$YT5Iq^t>is zv?aj^N-Fq79>n%dQ>6(qR?X1?xn(I0I>pV(ZLA4Z*aQ*X|85`@_YVB;hCsfu#Xqf# ze_0rh|95+%5ae&^xGK&ZYM33%E}XcObZ8#Qfu0LG2!yl=Yp1@F4%l|uLjF$^o95iV z8PNZ&lUWP-;jo(&@(8Tcm0#)Fv0FbM-I93wgL1Mg<#iOZxV(vF^od!TD z?5Hi}5Vm&1_hMfBY`Lj`5ojoZPnViaZfaSc5GqYI7ZCm&}Dh88cQd@v~~lUU#2 zb8*fbaV9RR!(G(uS{2m1y&8z zHb!anYWRO~YI&`v-PypKqAp|aYE@|7?b-gb%(y?dW}p#L>?2F&l$g}>y4rh!xj^^v z<&vG^bdJo$=Ib9dg){x-wTlO4*B35phA21-rKsO1b1gF7T*}*B`QC5$B9D?1X;Ft( z@Tu00$qZKZ=Dorda={6m)J zXT&h4w{j{Qz;15PtF z>I~(Q^Zs*c;D0r_dxlFHimN`pFFN;fW3+t&t}*bBaLj_k9Q6iOtE{??Tug7|&VWt1 zVxqDKSMBO+0_6g4*j!V07fp^ib1Fs5oaRt(u;@HuT$CVFPAOO?(*}KPw+YN%R%d;u z=uibZ^V)u_r zM~EK6N0eHim2uC4tcy$?YrW@Qc!m^@Fa+mWjkbiG^JQ@r>v-WFb`0&+vl$xTX}L}y zg9u;7PBiiT9*|xY`21E&%_bwxxKP0j@2ZGmb7$G`T<-$2J{I;P4@9q=is7eyaJI6L zm0!F`$kl&+HR1=bT!^ZQEY4$yXXt5`LGgO-^Fuo<@(h$ZflilimrW)!LRaH=@1D)0 zM4qsdIz(V23$P&iPCx|@GPl5G6AIs(rk%r*4TE0KtcO}%8m3Aw*RB!*+TIaj;5rQh zs3@)y33e&cje$dAk^PoQZ?J?L)lIif<7%Rn^L8vn=9v(1HG%xRW+fT&#?wtWNLju) z5OUPHDnK!t0H<4?y_Kjxum+bGdT;4?CdP<8XkCGIs*@jO@!I;q!^CU;hhX;&drR6| z*3UWkvZZG75*-MfA%)YEI?7f3;?m}DVoIlY`)m5Uyt>@(AM7rQe+^ZP%(+K9;1Ogr z$Uov%`C&!zowIa38!LfO(Piz}bOkPQ!tlJhn4j+M^Nx~rq8hkA|6HVTdm8jJlF5-* zV$dynBS`n|Ao0Q4tZ}&J?GI+k^OL_Ver3LPiEiSO5zYOCD<+)PpBXu^K?<&1IvatG zAwp3tvMCv5E8Pf}{8kx{W+rag?uKs7*VPjzz9PH>(r8E#ROU)(vtaC`c$3CtWUI~u#I@2S1NA&;u0g{ za7BSpVs{IHb1L?mm7{O}$&$s*d99xTJjF_kU+DJOny|;@b+X5ol)hLUO8pYTT0XRs z8r7`Vlg}t@L3xdnNMX7#E}8mq>AL9QaF2U3Q#YxcMEY6F!;pwA$*0k^Q_1;D*ZVHw zhLh(r3PXJyRnsgbN+y+9YA18mLoAnepMaM?Dzo}uRBGN>Yz>CCew-3a3K^;@?e7#d zvruKSO))VMh&|jH5qisq<+FbOP_b`F&k59-K5e^D>Bd@#)e2Ts zk)ypz33D=iTaP_N7k!8xyH=MmWawM-{HX0XhH*cuK9hljnake4RdYsUxXpi8dHD6i z-TscxGKZcscF>OOVwqjG*UvZxTx@CKEi7!8uX^e4fxKcpy1?@p$G*i<$kE5qR01ce z1NXq6WGtF3XryxjLfyeEylA73WZ$&mWY*eK{Yxp&Qn8a($^o?AyKXv8UZ zD|69z;+$C}-R{?{Kl3O(%KnJhqeC}uxE!gRRiZKj-@@V)_LmT} z!TWx;!yoG&Z=Vhn-ad!tjXx@T_X0aERi*M2?$s0e${u&`SNIeonlh))1C2-go@x@C zC^=(xd3q2gaa>B7jbbX?);0HioCL-_tn8Knug>Vu^H-h?w{Otwr?)~_#O2whCzUKm ztmqX)b)J+;xXIVRx?@yKs|v;ZRu4(r>)^43cXemXAG_u|`^&HRXYCCg_E~S+nG5Pp zmjAKg{G*R9ND$8$8*+Tfrn!-h+FFy?!kK|u$Z;z%CiyS2hw}ZF-f>E1;ZdlIb6d4+dJ4x5XnhH@JQBjpc0XePYK(y4n^9GY{UIcD2y>pD^UGqknuQ;i~8yl7q~J zpDKAjrsMkKH&nym{5Pyt!^cKcowI{0wRbSvYO;rrY%3~zEL@72A@|K|2ruZ#>U0&9IM667z?o#per#j~<)JxcT|CDPfl}%GCf&v0%?0mdYGdeL;CPntD zbp0V0+kWXuMa}1+rmWW|lt^SR=T$X5!D#!~hxcw?sKXNPuA>(-(3H)CTh%>~r0AEZ zM^t|QbF3=D@%=KZ**R2Rdj4zc1euJ|nE9Eu-aC)gs$XcXc2@V%-7aubJWFibo#~C4 z;esDV*l@vEZhJ|JBPuZ6gGV1+jk}q#cJjn62mX_1#+rPmeIEzA&%mOt_(4?@4Tk5c zD?h}lWjO}DhA;?3ah{UUkyw^>f0KE$goEtq4X5uG8Ea?0Wbb=a9|~?offy%m{}3bvh)6K-hzV4)V*$b3054dbOw36K<|O!p$W^++t|}VfOAlpHbk=Vwy zf=)s%y0R9!fXw7ztxR@D>js0I^nr}O|{Ix7n^vr z*N0ov^sXKQl-8#AwW&=wAD!nZsUL~LPG&h)*R>po9hT-2QuP@rztc#Cdd^=m*z|GP zlL2f`+E3xMCMuO-&6_lw0>githvS$20;+r}^#mF`L`NQo)`Eu`8>DF@eL&R5{$$ zwUHTiQ_}>U3kIefCj71<`R;mb0!&th8|^K;7d9~A5^EAcEO55yT;!K(g0GD4J*n$6 zD+UP*T>bp!79 zEG5p}W39MCvuZNK)!Ibq2kpz}vs~3Q!3xDeT5!fCw*9=fBUQD*(Ttw%a@q0R4!Iez zr~PQs(l9f}S}DacmoHYdTorph&TgyLaYN`ErY#`QYY z>;Su~@_ey&xJlO5Z~DJ^Cb^XU=n_7%x=z%VL@&lv|B_W8qh02Anys#J>fV7yE(v?e z%l~1cvg*1bLt{<91d3Ri*N?IrxR@fW*Z-~OT=a2z8Z*7eMfH!LEVxb`7x6PD_~4I$ zf`Y|hztbjb7e0Ke*LAdY4wlX-f=l;iC43htcAYOgbNK9Adfo|!H*_>4IF%*pfRSAP zJaqRg$hYe%s#5&3tJJdYtc|lz-|Cd-+(R}YPP)mTFYMY?WQfxpC?(6iZ4I<#8J8LJ zHj&74EEQnnV-&N_O3r;?T3=pSMRrWhEjKJKjeAu7K(?ooIN|TmDJ+s!c$+5JP1~PU)@tT_Lvj>j%RNckPL^tT?Tz6CM`0dMk?37V{@7*?)+sErO+tp*-8d-h@R6pZt!|dP0 zpUW%EW`Q@X{r0lruHN*340b`RCGb>n*Q^i+4M|r~Q0ID?V%i5rl+sb((^|z@;<3F& zFBm&|UX6>KwB=H#xta)5A%4o_s~LVfI9K&4RipkoThv@fqrr?+qP>9I-o@y-he6+a zZ=c)bMjQ$9W=P`K9XyV5W{sOBN_sT~a?uk@Pz-k*w>JhZGtRfTyIm9fJ~v)jMQ=0M zNJnHe-lz_z@{N!EEczihyi*r7$vdJWIPc0IsOdNB$*p(xbHa7Sd-sn|zQr~1@fNw0 zRDK%O#zl(33_d!Sow|Lj_v4FGrD8U(ZpP&B^97aHaYK?-7C!T3qJsQGJ~N#wqof2?fq7_ZjhKJk%zckjur>qSu09osB(mYUS7Yg2<29j@5iRx#Lz={I{ zRu>$zHQ?Tt7f1ggi_^0n%jytyfV=gtGX>c|Izj%PqE`5zywtOO@FMIt{x2BR0~oG| zKVZC8a8PhKk_)n!p8)&3j|=u|a*$8%1l?-G{5qO7X~IW*(DUa4%qq zCY*6(JPL$*a2*sWaEqi_`ui&)IM8wbF~ANCK>r2ye;;7|$GisvvQpb+cF!BWtKu@b z(Oj+DK^0k8!tX$p$OXUF8}a9Qr+?}G2j4Ygs0ZKGgs-UI;!!1wiK|lmzzY+W;ScNo z%Mi#bE~9J!J<0RzJzDUPM9-)PcEtL}ju6c)U_i`Y1G;FI5z`NrVShmd`>9POZ~VPs zv&jD#kO)$@vwr5KfIfP*Uo#>PaA?>qeUZi+wo&daxi<~oQ-gpw zJjk@5hx~Jl?kyf2vRJSt*b3s;P7a_z^bgkkClr0*BkuwI;tCI}4IR<}K43&sP9%Aw z@-OP>fyJUh8%xAi^WZNW#r+yU8v6sJv8eLFLOx3WV<7;489IMYYAmV+*aW^0g4&_=K$nJ zg}2V$wEdrGwnzm1AWma7@U7IcV4gSq<|lG`HAIj9V3~}|g&Y6>&=G?64cQyK`7p0j zeuB5s2bi97JJ?vVRO6*@5@P0jYV}M*XFM1ej`;~-k9l{@*;+=w-3YKl3(kCv;(7gG z+MMCELqX1trosB~9x~_Ag8Q@2*7Mr+HRisf4PvR9Z^ECJPWXA=V@v7ePB`j^7rywY z>-JC88-nk@TdC*6%q_p1%dL*TcxzTfJ;D=d?LwxD|VdkNe^2ogyGBZLFxvAzp~) zR~U3HGQ;b48f$mDpovmyQ3Hr7IIa46PRne0Dh`DfXb5BG)5X8^xWWzkowF@}$?-@)u1d zc>0oRlB7}9<~|dni>BA&_)}6F`$873SI;_1cM7Jtirx)fP-Ry5!gS+h)oP|@d|!w# zh71>a-a0tx$Z2M2!QC*Vdn1WE_O^tIU%SRAH~?455Z+K(=${y@%2Y-6Xk5(?aV22- zFqXSlVjUBdlG_lc$Cs+7vj%NKON&6w-MCsEQr|&L4mc`*epm+yN1udCxwg+i9V)b>3;W8TRzAmcbNmRm_ma(SJL1lNWKk_f%)xGZe(g37Pysz{ z<`7a_Mm-IO5^<1?5-F&qF=#mow+MW0zGgH^dE1$4gQOhgY5#s_d8p)(Y+Be(@oMh+opRIhk3&}XerAx5`$N2 z#ROP>9U!I}_%jRH!cjMxxXQ6(6+4J1t81t;{#HW6Y#x`OOJbg{zU}WPnozkx2*$-l zihCjmD=#iP;{`4hcPNOO?3#txTr#(^-^+NdCE}ScB%EI~39I>OjAW5FXVPjE7(RX% z2xSe#v=Gm7VBd49NaTZ}8HIa{z;wbBK$%{b2%cbr5W`L<9-I9M#5E&tMj{cqL&WYM zMfI@fn~98>QdPUObuvXrH6e6AR|WAPQ#qxhLtg1U?Pj+<&cl10V`s-^Z$$eYUZ(Yf zU&ze7yn5k_cy$Is{_H~g-I*c_M8{3^N<0YZIbSSpOjR~V?=n)u%KDrt92(Q=kEY~CSS&WEmyr$eF)7(+?-A4W)kXyb zf^*~e^Ytyl0bD-7+aZ!GdVWPzr85Z3O@}WkqSw6!cD)DlCgY4Hif#^vCZ%J`9>T52 zmiS`X>quF@Y<=dgUY^Ep6Vo3=wIPv;lCE^-PiHLeer*6rOUrV0^(0-OiyRaV2mzJm zMtVSfauzD&t8tA+j7+y*Nve_)O6VND4B4pBVx&4Z_Nf-JWR@%H2Coi_lS|LyLrQMA zPpN7^9pV5b6H^!b67}ghT(faHZ5%Y~)PC|xL_Y|6gaBTIL-vn3IwA3U1r!(v8;osK zJu~4ctraA@G^qN0pCjkJq0wkggIJ}ktIP~M%1z8IC-F;Ky6n|3sFQniTLfk(t!bq3 zvftRCZl-X(Zeflye*wh7<}uIXmDOX$2a|9FQuh%*3szF`5OUZWdZie-f2Gja)u13% zrDRuVFa1%akr0R8IpeFE_GPeZgI=%~Q{r_S!#xvI0z*^NbT|a+M!WcTZl7}+I+0UX zEai-n{L=NYN7HSfj_@S-5lkjyK%&uuI~dTUGn}?q=W-D1N3yNhjPqj>*YG zPU}BR?JnW0U&M?~?O55d5qp zCCChM&C(#ji*E{t?t!Q$mpLYH^_hBJ^ld1utQk?otG4z`S9k@f7rrK=02?~GH9A#8Y$&S^t;IuY9*Ljh(}a+N0KqIlz|=R^Pm?lK1$`>$9~VBnHjsZj->*b`uwzg4 zX0UDBzG=uM*t)_Uo#&attj1Rv&n%k{`0MJ7As=(x;tB3C9#OpHS9kZD_*w~Bxj<~Q z<&2Mi_hOL08y6pUW_G@dkraCKbRCG&dyU?f0>&EXn@|YIV*x548a}ec$EXc07*|v^ zl$Fm77U|B^SY(tAm-P!`CLEU0tUQ?TZo3LvW)7D+4Qk-U1nT-KD*#icz3PQyp#$!d8dlxG1fiubClaR%T} ziQI)?tmRg`Pt7rzxL}Wi@7`7W;=LnS{|+&C9J(?HQjtV! z2hpVpEAkKG&>c_?Njm4g z>b@hHzqusDpNO+iZf{-Z%$iEZE9S_5V#4e$hMFFN67l2}=NVdZ=nj0J9I=P;Z&;id z0+kb?6gt_NX_b`$w~B?30K%I4JF$x!+uUMoayj>nN=9^3n;nnw^CYE{$a5P8*EEI;^h@1Z)-~5kgIj+^_X2 ztTw`%365lV4Mp>NiOO#=fKfirsyV)-PtsLjAra6MlySvt%^*SPklk8Pnr z+b12v@qDEZgH&N+ZLr-YEuJ^0U)6)(kfije?s+@R3Fv7)e(Tim5=|=b=;Q)Gtpx}S z$l3Zf_9<;?y6bFFb9T`=QR!NpE%`Np#Fp7NmlO?cv-(}?cn7n|8hFc*7+d+q3qxt9 zOi?mpCJ$e*2kaj*r)>`rXa?QQNP6?YEN>s&XAJmJuW(Q{G#xM?_IDbU1}}zPeM&2s zN7W>6H#a6YTq1k5JE;Tcd=Tn!;2YCJDl?ny2l<#33J=;bU`aYDp z=|nY3Szc9-c!83cM-Deis5d)Z_)5tZG}CR4Hk?3=`@61-*GbyGQlT}$tR*;c=HS24d3d60Y{{j zx#e{r!!D4~JHTXfirlH-JX`p!{z4-9x0px$dSxe2*$&GQtw;`p{cFVZ#i8oGbkD`i zS0(FAK`MBy=qr`Hd_yl^heEbfE-4Ped(ic-bcYdcSm_{#7gv|M9;eFde z?O{Z96!*zw+VwuAH<%A0eQDH^**GrHzoxoqS*9wn>CvVtq0?ro zMEh6d&IWR%@i7H#U@q4Ix`Xp2&zJn{tO8-oMcH#VV#7q({WOvc%oEH6DyzmIrl5!j z>KkY;1AKEo#(;hHOaI0wQ4fm8K6OR|{afqYNRWRwqPgN5`rk+O-TCLJvxolcnA`tl z4B|iF{2z=7%O-NE_oLY1;zrv#ygqxiF<**0_~xzdKSKsaPS|Dqw9KuVnO~hayZst4Z*Z;CCS6h8t*?}=M}K+ z5O`?M|45f~IOaoB$7i}n9*^9$XUdn( z8b2K;JL5B7y!Z1Da-7zko)opJ(^4E*>$8v<7`IT%Y2}#+x%)t2&gHmF60OL|rb+Y0 zW5k_l^b4K)jSvjn4N)l;oBa^Onrc+!>qZxzz;c!EX0}j;rjVdLd`#EnYrU7*C6D>uUsb{R6Ht0dB`34be}CdYaI#a-2} z9d}*#Dq@`&&rrYBS|K+yVZ@B=DJ$YrrGZsAoJoA-VPJTbBh%n98R0E{TM1(an({pr zhfgMiF40Aja2-N8%o~NCDST0kJ|9TZ}<4jm^Yt)ew~!U6MJi@ zijos_DUji7)!0ax*9WX(S{7CV9bYP;n`?TZnx0AKRQO5XJ#)g*iytnlKPty18AmO{ zDviUudIT|g!M?gRs87{tKe>1m2}?7PY?}-I*^$>B?%WWbx^G!re46g%)et_AmJSKU zX^g=pS5jwO6(vQ6-56@=YqHhqyK`f*Vj?vJ417+txG8Q(S%?MhoSPls zV_HUrxwieNz)KCb`2MQ~@pbRogg=>VcZoJ1Xo(7RShdh~{o%C>X{M5B_uQ7i5S#Fk z1x`uIlik~i;8S^W-r-M3+Z=vl$~thep#O= zU?+S;>dD!<$8(PYbsv?lZyUdKhH`H!9A0Z1*wq`+BkTv(1)8f>)-Bm*;GVOr##|8V zx_aXN^^fE(Q~&oHA>XL1bq#|!T#%}7AmJx2H#Shyys%`}Jp2-E#?|w&%&R93tLSGA z0gNb|n@$M%wr?Bpd`&7tWmY%TRQhIR&t=mV?|}WMN@p1!Ex&AQWtDl=_GLpp|G_O< zMkFD5!@DH&oj>$NnfR(pi4fYkvqWOx7uBr%$ajOY3ysKLJSgwBk$;1RIg#A9xMsaG9S zSvkXV6;YmqD07o{m7BYrT%GLdvx+u$Y7_5q%f56p`!fG^Ql>$)1LU-man*=s>9SF3 zwan$QR@VZPDbp@1+xT}E1!L4yHy_fU=i*3pc<|+j3VclRcJ(ld{UN5=G0nT=3q~g9 z;%Dy!`ghI(4w*)yG^xULHy_M86?wm945EJi)f9vX{7hDHtBePoG-44uD=oA*#_181 zsj=w@?~e(EGJ=z%FRnoBc@+Qg3MT*M!|GhD!p8-a-dyfkqRKcJjWH4iS9YP_vDR9@ zIni_Dj@;=hzth3y__FHkZ;Y=Kj!!Am1WFQU588mVL-)54_Ys2#vL{fhtmvSQ&{O_| zD$xDhtP@?6=Dbb+8BD0rdYjU|`p$ySX+UV%taWxFm6-Wz7 ze%C&u**U{f+n$d`R=J%8>QbC3Rk=InmB`hrhgMSpWL{w)jcubEls@Zqqkqtl36E|q zQEidTHgWi-RpeYdmE*M{XP#at_CrE^?o)#L#qV<$T3Ld+;!?&wb>8p|3%L^G`#B)) zg}3QqKZ{xdfx(Qe*3oD{Q=Lbs<+04$D;1*mnRB%d-Js#cP@cx^{fWY6#cR|~+L`sJ zha>Hg;mM5yaxi|9TyNWmc%5%rgXiEmU8tj$kQS;6#sph7V=dUfD?2~@JaqY|iD}n> zh^Bmbo8D#l{3-RZFm3ILE9q2DE{%#5g1C}l4NH6BgyRTd-^>cuCzJ4e^JeS~Uy$o6 z*2Tf*K?b(UC9Fzax?Yi9bWLI1waSgS&*7j?s`>iS3^)$zgvuPpH4nOBuU<{B@{hRD z>t2U++T@D+&XX4@+#dVrOidp_)v}A0yo-xmsh7lOo8se|8bsSURbB@od<#cgJzaCm zv5TH+O$HolG{U_LuJytKhgD8XSK}DpkD9HlmoBw+!A>s|YVFDdkwprVl~*PN?p6qd z3wOI*Q(fQ}IcM3E@|gi8JM5_5b-E|CL|T7LXx(>5_#c&c`-KCT3yA=mAx>4Y9d#CQKv zfr%i;>fJUAgzbJ&G@|UF#!i_96$2cx$Q2}sjGa}dlm!1y1FeSc?xEXqwT<=6vr`kq z^Y*Q<(suqX=yFZ@{{bBmD?}nw7g)Zo6>G6fbfd6gnxfk=1 zRxSU8l};%KIoMFlFTj!d8FDw4?7cUA$-UB#-=>PhrZ%nzns3UR^%0I=PkF#d)P8FH za*f*2J_%|#ZP(_nGLL1ay7O@kKXKEC&8`y{|PY;2NbeB>H%tcs8aKKdL=vI z6pHtbQGjbXN=#ZrT!XF$hPLcu1S3KbTrsi(7%EFcqtX>DB`#z1=F%eG_yOe^-BO%A z@HRC1A~*&Dyl{&o<8p)xa5(ni`Y2R5KAZFagg_ub^N1}(44m82VQgT$^X$(%Mzy2Z zg~|2ZOi5ePQR$L;U76TJqYIvt_~LWhngx-o5A2Fkyg*!eET&yl!5fo7(gZK@Z4>4iZ?erwrbacU|y*#)BO- zX**|34FtVN_D8IMX>@ciSY;W^GcCuoZs&^rzSm{~CR(?erIh`Ptf?e>5G?X1!IdHt zJp#LW$jR|0>fJ|lhu`i!97e)=7hgJ}KfE5?_AiD`%P{*tgIpsCu{R0j18&6;i0~gpHC?fxhBGNezXW7nGOx#NhGd6>A z`NDg5(Kc=3(EB#70>G9%`Pt^IHFSJ_p4vzO;VFrP-7uMJ?~ z)_^_&F#Eyr2mMt>KvfB7l{DkuG&f8v<279_fv0v{Ve!iq9vN28+jpw`>3@?Ft}|)` zA!wI+3Mx7w{T)fP(pV#^|mQ>@j!6f?39^M$A&;2<09~B$Pfn* zt{@ENcN!qZ5$R-yq+B^qg+$H%HP7jvE%XurB7OU7{sxc_q}%yiFyh@7J4sOl!I*Ta zrX5KgLC}eztrsd{r$Cw5mc1)tx4^lu@qzr>-!gJsHUjJeG%{HDwGVH|9<}xlOr=LY zx)&%)mAxury)>MZEA1=fyD&P>_B+joZrEpj5I!>`$Z6C^FbdfZ191(Ma>M(S(6p&Z z5aJWln)70dl1TZAr=lC3zay&t1yu^+0yaYVV;|0H)$DEbw&lahwPzc`6!-(jdz>2= zU#gO5&-J&)kp=hm?!1P64Ca>6fsUU|MSL)=Z6N88g}6f0S>Obf5Ym;EMZ)C1aVf}% zJUAN^%)!~nrH&uqZRHoWNgjtnWH~Km-5K7 zePmIOaf0x5ybJHu$rnZzfO}@ML}n?zq?=^tesr}oWdDTaU+lBP2u|9c{Bw@0h|ylb zrh2N_(>U=9Y-|M_{2c%G=jaF37+1;jfPv7Rq~AJ)p`+cmPI0lFag>t0Io5 z+_uMpNp&PIbiPrbT{)Z2>hRRB4Mt|LB}n=Y46F>?Dq@!Ui_oV^(>1cQ!CuOPTtZ0GvC18wNOF^K`2#mN%O=>7;z(PT{C6YXWAMGue z5ZrzEcQei!ygl7I@wtLOhX_gAiOc>M^H;f<>#ZS z$%3I`pIlos`fj!4XRMnFQBSU!b>jNsXeQ4m`Vyc8ZC4191RY!tzzt}`@e4P;2Ri~y zHx6)9oNMM$@LpzJBEK#y@$+ssW+uLP=SbZx622;B;+g z-PwlmR5#FVAsTVUEfawX0J~Z{STOys1*=o)3K8G#tpCE%$j%k-71b1v+HIxi`IDEF zmr<&5Ap08~IqDDD$YeyL!4j*Wiw%y;=ntL47l1GL**E2nI@kC$`fS?wLHg`A7B{K$ z*V5x~J>X~136mq;am2wnd?|{$-*6ge9Gx$%@E(TYaYqg}a%u1@% zb{PLQUVi@5rm1fsA@u)6IPgplUxxj?UVElx=y%9A_IDa?&>gIw^5j=t=a``5^So6DYX5IXTE za8Z#YBP$sGaCDHSSJv z%5@Ln8D1~-PWQ`etVt7`OiD1ag%Uox)+x_eWe=1n?@x@jNn&~g0= z+7i+D!`tAXjS_KLUPNA}*ii6^y- z!o7nLTEW+hDieBtC>N;2GGUGuxlMHS>2 z>MSpezI{HXF7T*0%Q3~D)#vt!7}LWNGR*8RRllFrySJ#E&b7+Spvp;SnDY&5$gj-qy@W95kYmKQ}Mk z`CRdpn%FMF$ilTpRR1m8OAZ%KhhF5hDOb61WW`MQgT)?OJA{_ZGb_ow9I)MZd845R z*H&9Q+>lpofBfBpr=;ujY~dzmwp$*2%(O;#N}hDkdR++;9FWoy?c7-W5_jrnb!wW_ zzF^)`?pR6X%TD=<?BRMp?O#}YG%DV2TD3bx|(Bu+E3pX zI6PNMEs1_pV*g|JDGE+U^wmmirLv(LzU^da8dbYG_0q3!gw0`?<@Ls;T73F%w1^f? zvb#Mi+$ThWhA`q7MOue|{D@*N2T<1Ci9u08cA+cq_~1M9b1D9r#-FNDp)TE>mRQ0<_S<201MmXjyUjvT291l?PuEOGbyE7{hhU5D!;bIbBw%9Z7 zmo3r+<+Gs6N$PZ51c8?YpHeg-=-JhRH(ep|{$! zPiOaB0-zGBlNs&v#9Il7vyL9^IatA5g&dhP;6jjhSD$}7DSYxp+D#Spg^NeG(smE{ ziCFzv@g!3F+k;_Od3cr|^o!Xu&X~M+_5~-T&!lWE%NRexZLS#_Ika{kR#JObe$1R^ zcwF*nBV?cwr_(Y5$o-tmRnQ80@;DBva;HAv67}OV&_v49JTzhRWbZvBcMB>V9Hk8U z_y`l{Q5ohlGEb#l`P-=MkC%#yPkNusid8!w?#Vbh?WZ_Y#_xLP)bnL})r9w4j*I#~ zd?Q>+y^ar31sq25wPGg}bNF6J-|3XokxBMGi^s(p`F_>Q>8MM0poxb$9B}N{Dc%iq zc%_1VQ@~T=xJGTwxBEk3gZ}Z7i0-eExN2aom`39hXKOL5>);8Qo_rb9 zOrMTHH0`E7B8Kd0^bhoDeyA9-chq-5I5qk+zUs<$n`b)SDOuY{I$+{NyA9U#5@Mbt zi{H~3l?X)X#f4P9z#IHkkIhXwW&pwxxrpqGt zRbI=FDOAN2Og2%bXRCW=gBjP0X{h-r6*v9jkmQ;)7Pa@XmDWMd8tW}_m;{Kr5-UzypRB`y-`N-;B zJ5I-}3q6*;!Fk7#>>`?>z!onfoEzlt?RfLiTIy7#0=-UAkOsQdXvu-&VNo%=$mKe> z1$L>d`i-kko~^nl*R_h8gt(@ zy>(7KVX`GvO@ z-aM3 zQ8;03-Hta+Vpq=GRE{{weED(QS*v1_>=0*eiAW>1I)B^fv6gH%l8w!c{+z+mz(onc z`a5UaUU9a;WkwbQ9VWPT(G5>=_(-);d^5(4u-A)y2PYxZHp{}({2jTrd))3!j%u*s zjn!TZa7Y(l*uYR(urIu@SFIl#;C3iHNXCpIyxN;B=dRV-)comQqh6(P$k{Dcr|(=? zCqF!n%bHC&A}J|;3B+wIN|?1#kS@fnW!~*@7I|5Bcl*MK7p0M{%oethmFFT}#h~q3 zZdD2U4!CaVuHAT8-x@_YbX0L$N3QrLTuk&sa*~1`jQ3>88jNhcw9!;4Ki0vX zZ=Le_P(!0P+YDtlUuD~l>Bze_*t7`JhjwX_fyZT9mdT{_=5cgkrMWR5+b{Q3hnoEL z6X^&G6_cV`os;KSsw7zCT{I0CWNWr>-NTVHxc4J$_x4acTcfF`wj+(%gtUefTPB+y zpH{EmicD3RUz?w2N7t_{89zNV8r`l4_rME0Iu};2c#|O9hXIUO5{UixH`-i94BHW_ zsaL@%*2-;fZ*X*hM`7$GWZd-{s2bRE^ens;WMPs*&_r&LdRIPPhlV5iAWR>2$)sak z+NE=&Z@s|Dc$56GBp_omcmcg@04PYG$_V)b<@QryVb6=8AeT99ojOqO^e0Ct(b;g? zt621sV&?-wEG3)vbY7GLo^7w%z_V0mViWc}dd?x1!#s)nm{!DB0yF_s z5Gb|w5E`_ED0;%coX!UjJ|FiH_6gy76`rm%HDPe|5`rGh#Dr16y?F%J47HyLs6KOb z2d4cHm!C8*Rcfd>**KRiDBcAh98HDFv zlv9v}`QjGfV?$w}eY8P0> zH^bIMVUpS2v9UGvBNs4>&GfNJoS%UHJj^o2l7593j9>!c%`#PCUdS)-fY0^ozJkiE zz~)^7X9tNLPQqk@RV;Un1+;MhD8b7;K|OA>`5^cYDxmdgZcO8q)l7Fg&ppOtS^g_Dl$ z|MCK$YP076W(kVWL3&X62us-%zD$KztAQ2xD==Z+9jvQ{lL&Xl;Ilk!5Md932(~5f z3P?cwSpGc+cf4lN_w5V)s| zQXK950?%3_EXC;5Ww{PZME}&;a8)FB%0A*AX&C$_G7A8b%>ITZtz3yEMv#krnSP#v6*>>1na5>mJD*?aJ@xEG0=OU)y=pC|CJf`5 zT~~7WKC)EA{+~?0{;%S#{^&PRf6@?P(NC1Jk0>0#yfvvM%vUD8Sh*-umP2l@gVE+zbYU57k;BlAOY?qI8zCq-&hDjJH!p%1yw+c z!il?<=>S8!VgDX_<_ImP0*9*7Zr}6>9z%b92C2c#7vF+gZa6V@U&4bziEqy!H#lZF zwRklMBKzwtB47L9$f(7Gi$DN4;0)O1j?x^IeC|`yNnhf#57l)u@(?ekC)BHm!_xnJ z_eYps#W}f!Ig|zMO&)&1oJ0hh%1P6hnM0HPf^iW_B`Hf{9aJMSK`S%BF6oJ1#CCh2 zKcyB&;PtY66w=2D#rlMEYLQ2;4^;2@U>Yu5B#8(+GuYQo7AhsPIXa!^TSJZq$-M*j zKzAN`s?gMuc%ZK1a4LU(4jD;kOnA5{{CUr!pHG8}@D#@f4{~)>l`lPO9h_l2#^B(! z%95vn4&(X`fipZ)=R-K;gu|m*U2Z16e5_-)vx`5K$8h)-*mnZpXhXe3&%VrTecVjY zzh1b{4zd9I#>S38&4654e|6Nuk0^{9Hv;#}U!gZr?b)ah%CB8z;2TK}mjpJx$h;I# zRDfmSBTT)g-HZjWQsCfVJ~z+=SbSs&W3l+d+RVyFc{$Dx{3jz-1Sn671f=kC!&O0Y zO?)GfZDsMMH}6B2*5=e@j;Otes&_4Ze9dgbtFp^-m8Y<>;)UEDCLb7mqx=D{$InzJ z&CR2iRGdxbMk#k2699X4kN^e2=Q;@^(bn+^uM4^gEbH8Q`=%=F<0h(Wg5_rCvZ(vY z6?rhl?CIS-#$GhO#75?Gci);eJD!9K2Jx(&Xv$W!YD^nD$evEm-j6QnTU>aBnATmc zrfvd_8I|~M(6K{rq)4W{i?H9e!kkL|XdE!>WGkXKfl3vapeBsNRIHZ)>&WIGdKn{EqkIl_>a#h-vrW9EMd$5oD zW0R(I-^>b`{5rN?FU*Gg-M1cxu3LX0L!BqoBb&7?GLYZY5diF!lmpDwyh3FX7Rm?Sg%g%B~fUB!}FN3#WlZsxmkw-p}WzTT(l z#YNvf>VWTo^z}V5J5omP#Tyrpp1&?-gleDnL0yUyAZ0gK3YiOtbwMaV-qz04Rigdb z@{B{sb)H-k+E;42NgmvW3@u)5id`POlo*Kx*XGgIOv)aDdO$AIrgE2|O%ehc9Ukv? zRTqN`n)!ZG7Yv?IH~!6p-IPt+72pzK+ECM-zL?QW|Zj`xO$x!mjVYs zd0uK>r^?o{5={8Fmz8)Xf3|)3e18=$a9KZ0(zMol^m|bzn*UDEh4+9r?%dNxdu;$; z(v$YJAUvn?^zolV_Ox1bKz-cTeT}FfL zj}lS&yc40bXvn$80Pl;s;iKn@qgtCvl*h7jT_t2O?{=Qt`c`>k7Yh{B`zX|<;Kfbl=xIOiYH}n{qgkcrrd&?zb}v1DzI9#!p)by= zV|?$I73f{(2RPUEOF*9*0(re1HVqk2qL|i$qfrQO<|AoN6Xw(OXdqOwKx0BrO4*>= zy@YTOCXATXJYnL;6xX@YNVzrU%IaIQX{idk=)y@C+bWXgxn6?P#y<8MdN>C}3YF|{ zqOS8UB21;s7`w9Toxbn`lanYL?d(MmwtnPzRD z{#2-znN$SC0b^&Xq!FeSh%6L;-~llY*oLz7j1v$^1l4gBchuNq$Nl*Q3TrACu4MVia%`bj#0#K@@m6G27qM2H%TJ{bAOVT(n0ejmdB1u5q;luJdrKAOWz2EqQf|)$%tzr$B(OXCT})>1<$=$jtb_QwjJOhZYge0^Uuj1N^Hcno0g@n zi|KzDw#|)`(>7$Ya)Y;?4|dQpYW^|_HHhW4?YRWh`c0HgY<+%wMKSw$3g6GXa*VgV zH63HeeCFz4O~qdBQ(I@R8UEWR+2^fEf9U(v1clC%;KTj`z5u8+lNt9LP0LUNLjyGM z5Kdao)ov8N^6eeLD$?L89h?S1G{i?(X9`FVdj|ppO|`FiaQW{w$xmodlWA>4v_CEq%of$<1=fW`rP{wF^@LsS-%7Ft1aB9$3WnJ~pv|<05A%ThVcfic1 zmNYGeQy`FX|CDqS8)Pw70C=C(Olh67Ak93kz@tMvbPz8$aYA1<3>rR;fhNiz5C(d} zAo5s7X04FD=e}zCNIT6DJenpHy z0tFZlBee$6uaR#mwzjjrh2NVR1^3{MaJ0ev1 z2U>*enasXuZ)%F+Ou6b7PtTCuz>yj#~?fS|t;57TAH3K58A@KJdfve*W?GGjaNsjw#;MA+!M7q_pKH>GX?~`fvQpGxfc(uy3q|=1CziasT%>-CGHK9Z}KWH-qG$pI1;Yvqv~Lepj1!SbdEW zr!ZaAK#{lF!-RyYgoGwJlEY9KQi6edOMttoxhCZ0VSQm@v%!iQ<^QR) z1uvU;9by$Zo1LXEDTs`k7CyP+kda#_=CyF{R?+&DChlGi&JU_??#Cx+h%EHPePA!( zx~!;1hz*q&CDP4j5hOzVINmKKUC(1Y-Z(G8VT8@aNZRJBF%s3ugMQ@_J^DMREkw~H zV(HvLUAa2?D>dCO8t-5EcU9vLA-wZ1B(8siO(W%XfT*?LuL# zF}IrOt7*)OkTZHKgn0^7^wSO#L+$ZIU#3Bo)3jzTbOmY>C=+%k>E~^_B?}fb;$|++ zEVzc1O!ORG%#;&CxwcewTr;4jzoaVsu7FpSRX^z7*YW>`4T^2$xndxan6?1y2cJsw z9lpG0g%RK{poBkpvkLA!a8uEP-V3AyrSqJ$ff`Uo>TpTqRd$Faoece{8@c;%;(NQj zK%@zuZ}HGPP-XO_v_dW2yF=$2y~cIAJhv+CEk%=^+hPzl8^RKg#7Y!-c=gYR zTF`L=W1Hri@i|sE$I?f`U@1z3NwHMk^%-xVd~{sG<8?I=#v~3;3X9K5{D$O8i$r?x|w7(W}|?A z=RT{Wde(O3UOWKIg>7L|rq-G5WH8f|#9og*OJ{<-7L`pHX25 zIiaSkAgV$>PNoHxxY8D%0AkQQW;?sW(c_zAheHDhm6}42IbaY|cbRBMQoiq|q9E{f zMo?Zp@9S0j3^aLYYKd#Ei97;Kf)TrDZKZ&)P=K&X&mk6BF8>4l>=4f;Kl`i{=xC?eMYTxL?%_b01bvSUTf~jN@1@`K0$8{9V+oWJ>9%0&*UBL{J&yN2xQw zkWzCy@@cl=qde>WNF$n74M1qo7ta}>3;ku>#6^s+xXWB|3yrx8$8$yfdC_7f%Hq@Pss>n{F{Ck19LHQg8RL^Hq>eY&9Oo$BIq!>?6H=_dTA z_u|Sk4#20!mH6U*$2-Ie{rzr(R_;V4zKEaQZvR9{*#A0}NGB$4Yun!O!Tt243*@>+ zzqM`z(ZJ&Q{cpnmfE%WJ@s*KdH&uOIP78TrrY{l<) zNz3VJoE{dqrMywTaxs7POLAyJYT>ov8Xd~X95w|tF(9NQV`ZrfJ)<-YU^Ng2c00CN zvg5ug`0DrXCvmyC1w_l{a_7}+ZVH++y&KXybsh$T(Ze=^m<#D7#?z!Lz*(WBUAq4Q zZdA3{flNOm&R(04JyIaye~!-Y9nPxU(r2+sNkGeD+0Q1&&%>M&qQvXCBFq`X1gsBZ z))>Sg37Tq7dAS9brFn5AR6_3dn1b$1_bPEJ5MZ;bFH75kItl;E_u!9y6TM5Ux}GS| zia8$n;a#OrgsZ~sM3c#5g zuBXsg!_ujYX1hV50AN`2xk$bV8oC{57gX&OQLXu({>a| z;u6@3H3xXPO8kkddqPH9Rp0n#8OL-9N!mcMMQ{5~`M29@!A*M??t-WfF}rH}brxIXId7u4^y6nUB?eceRT3N{*9&Z`P@ zK@(CP5o7j5B#l8G)Iw3G?B+%;aTT8gS3t}lEv`Wd@KOi5)<(59S|ExQL^)JIL{_LQ z0yx><&#Xj{B;gcQZw1y>qwqf&$oT;&M3X4UzSWK^Au*G&q#`EjsMYx>v6EH3mN$Z< zg7i78lAYU9EigI9c%(?ogd^bJ)mGzS*fkk9x;Dec; z>7M3b&f>$LYVQ&Lw(m4{qVoxI6Xf2K%8=d_3`6}HegwBxyE3?{?5JZ1<`q<9$V#=` zqJF||_Ro=)(qW3wd)AQ>{oZ+du|}Gwpo5pbQsnM({_TWU1m)oq^uXEZ4?m6jz)m76 zsF)+h8OM0(Saam`FOKJWqK7?jUPz5LFYY_@It_fB#PpzI2gbfqK1^9Jg&i2eZ_hNqE+GEo`~G8u@^hfSKo|eiuKbDYpUD1`Bjhig?my}1CmsDK z>BuZ>$N~92jHRbVDDfAkhs^uK3Jo@D^i{uKdRa)|VBvNOOd{)kf8mh8aHU1nsaThXGc?wQHRbqG}UX znHmKbA^SG7V~*m25^aZ~Lmgj@6q8>Wu~(Qg`0OC5JakwxsA$+)w#Cyym#i9eMf}5= zj$b;p@%vYg3#`@)c_WxJxYdQ5gerlCL_oMpvIj5SJd&V-N(@OCS){ zmTqRM(wsq2XrP}IqEAc=!ZjFIaV@;QA+|H>nAY8pqtdFz(GGU{JQY0EZ=Zy7A8V`+ zaJ4S*0ZH_N{k9R1q4KiH1>x=q2S;C*a+MSqmUdidvA57sEe!7SCqLjfDV7`*8IE3E zItqNX9{1Axfx59ClvHbg6QYfw=0m_<`kHMDWK&OOity2SKsgmsNs}S+2HwY`6{~Uj z(VHXE%cXly*QUzl8$UFLUhXXA(r&ntp>w_G;@0vpAg%hC3Rl6`cH5f?5g!p1FclMb zdG$HwHXxo@w?#M|+wv-7z<5{KiDwx#{KYLrR=#gRn5JFNI2Vt~56>v^F_P`mD?E#l$^D*E<{V8n+oD|( zWXlQIwBiM`v$ui!`_l`=My0ZlJBipek;)Ah3dV`mder1>r#w+C7s;aq<#w^nZBkS- zR;TS++?9QvvHP;yWrFpQ2P~zkS}!ojJF?9i{P10Vz!%JiK9gjQqXnebIi#!N;S8Qe zqg7^NB_&ZxEmKYsmeC(nPQMK7x#ShkZoW4qNRmbQyuq4_iX!I0+j>3(CW_s~b*9*s zvb7nJ>!F6f7^0)h^{BjQzr^bq+?#C@-6 zyGeZmTXKn|2^X_SXl?Y%_U$*P$9J|Q$~DyWtch6jTrg?32Co(KBULMy#U<_Mq$KAw zih?<_J*e!GCqc)nZYb+I?rqf(_-wNPt;sOOaI{c{eD$r7HRH_oxWku`-RM7qWC-z8 zEFN`5kdz61@-k1UKiBaWUC|s@y680 zw6*rlB;&~??P5RvgC(=xP~4hx^S7@1H4kZCspr@;Q%leG;YZs1Ef%bju9u2;k+7#b zQUh??0#ZzDOLx;+^E&RHh#4r;7`ph>JuX7rf*cpxlW}5iT7Q@~(|g}6623xFn4tDh zh}e$}%%N;rTYdcl<;+DBf zMTf5UaAuDZsQ7A;z2&02gVt=(*iKM>Xy;uOb$>i2-Xr0GeW72H%|_#1BO{W>v|REH zR*@Y$%2q$V41=A4!IGJx{REB@Pij-M9=vQlOUy4A>UMWLuoZdLth-Cu-a;=S(l%ae z_gQ+^7cVqM8ug6t=g1Brf_xkaW=xy+b0I3A_x++cnVbRs+~7mxr|uRaCLxBAVg)@r z#LixHW7u@7A!ei$Bh?=Ua;^abT~7Pj*wrPM<~f>2N!kQ~AuvN8lF9Qyxk=~Ql6DD1uHzmfZcY}{ zQj=`qZM~RZG-xNPFRhBKplz?>&yPhmOIyS8vAEt_*~WE$CEvrq?wH# zYzeBpaPvGYYDSoM>3RtNyUIb?rg=O=cS1=Jt73he*IQ+Y)# zC28Pr@PjH)FiUToxi(s*DyROH7PlH+%+u=3#-tNU0wplJf6oK%*O0kWS|4Hjj)!Q4 zva}HImH6kxfgh-JVib@R_I)oBHP>NImftM~#M4FV)2^|y! zq)Vs?y(iQF0dD@^Irn_$-uum&^PRafcjnypNp|L$d7kV%Yh~@V*Z!^F+Sh-smjLun zwRE)rH*Np`H;6C5^(;UGKuSXLuQ&0yNqpZTzjf>8&0CaYWVgwwDXFQcD5GbV+`L6`>lOtQ4HXU3fBLxY05Fh~ z6p?--y}<_{VYorcaO1iMz)O72TQ~mg0Q}eE1_|*QZ2IM14bJQ*&2$Pj6rUkDmi$;}erp(|=}AXw34;>e~9o z<`(YY@aXvD6n}R9FS%|2NdJpi#LxdC*#Au~2BKUfH*b>OB>R_KH%MT_M#^yW);+1) zjOvDDuRQPYNr#a$JxR*1?xf(CF~Tyx_8O&R5s*a-;{GMtze)B#Cs_FZl4So)uz!~e z1)wIqLHzSb82~_lNI@Qxq)IH8^O!Q%EyHBj^JXJM10z43t2=R4nQ<>>*);>{qSTm% zR9t;4^iHVUkk5gwul6^=>F2`l5JDVABIA{Y+NmD5sV;T5@za>IhS+R{)MdNy_p4yr zK$B_NSBLFw>w}@~8Yl3)E(4+AhsJ$c$t4c{eK4c+fsyp9g@*ak6%#XcO#5vcf1?*N z()!SQJ>FXWKtt5`{7AhI$s$R`X_v!(5G4X7~Qn2K{?p3ucs^;PpL<27vlGE5%&VgTSh9rC7) z=d4jm^?mf9`zG(x{SwO@Tl7z$ws&$R4}vPl{Lx~w>mg%|NX0Xfm|eIi*0wKKek`=^ z2ukVNLh`hHSIm=)zjA(u3dZG>U+G9gYTVq?THi3f=jJ9hxv=(#4?_JQigu`uOJom3 z(p2~%U-2o!*Q|uCZII2POpUqNvwM`u#e(bviQ%y$zv-3d1u>#U%akGoZ~wPyO+{NN z8>gf5;2(CWN)LBUHmYP~2ERJ~dC*9$fA&C_70R8lV}Di*PkC6F@haK7SWxg+Wo1ic z|K|n#@v)+v^_uA!Ova;dn&dOCJxXG?F!4o#d2zi1;5|IaD&g;N-7cQE#}#?>)<26o zP)6K1g^{MjyMbo7FLxp!t8m}T4{1g(AVr@h^_0gw-(?}wlG~P{Ipv`~%Z*>m7ipbw zJo2sIk5^WfHm!rUyyN(%=f}p7@iuEAB~^o^=3rE#^6SpIeu0nVS9SYj*MJ|h1)xS3sQ7YFX%(YzYns#RGOkx4>Wv99*|0BLxg^-iYBW_I zlfS{CX>$M5#u!yIgNYtnNP@x}v7~02qC0)$OQYCY%Q0U%GJI91WqmPijBg@ekm{3l zzMHv10s`-YCtvmL{izTQD~5~WUtz2{FG8P9Bla0&83F=Tt^v-GDDuSs$B)!*0?VWX zS7h-sO{Kle?50nj3z{FP**MO(_rik3kRh6s=9YNp$<#+)jV9i(+PG;20V8DAlYx<} zjK#rXJTjN16`R)$rg|}j2`GPcHA1IiW96yxS@F}YwI?zIOHN$Ia8a}}h!k#!FU~g~ zR5?it;cC-aX?i&(ZzQiA4dBt;wHir83@IIqqaQX*h&#z!zG0XrUo$;mdrD!}d8Wgi zAX?stGl*H_2zjtbW79EXvtjmp)++YX!MZoo;|KB{Js&-8gKoDL8&s$$)R;l~3Tvlr zA)Fzsq4Faz)bEOf_yDg`EpwmDn$gje^ks)t`$XVx0vS4owNnZ9p%oJFO-!k-M!RJ5 zT&YjUCl0;pnDVx1iQ4`eAb!ca$XV08=1$JSQ`ic+z3oUE4PrqdDq^v-^EPP;V zu$JwD)$W%E4P1Pl2GTxmmlKt%yv{|J#i%L0^d!rly8%FUTpbTHq9vS;YUJCzfO_)U zvXS}kr%Tw;ZwKE3&>vJqvr2xcr~X)RlO4Eld4A$y2yItrIveIIC^I!SG_+0|JS6&2 zgkl%+fK8r&88PN>OZ-%b04~{oo}fL_}2j0 z{Rdx6iqmwN&p+i6seH(;^;;3}QOQqkdD&x9O77+7ohFAW$GJCCW!5_*p9iZyefN3r z)MY=D!&3DulG%7XrMft)&z9m}6u|%6J3I(EGg3=fMqO_RTSP8-FkSPrk0`(IHa(wPcEy z8t?j~-ffAFlpQw@Sk*u^e?p+aGp?tNy(f^`$b2UZ zp_q@`qta4l7Hu|^nzzPgT^MGpECoy7^FJNtm#PLTB}W9_-|Xcm=~mWrMq~xi(QB5v zT8cr%kCSI*Aqa8{c&YB$$(`XRDl(ZWePp9NIM8^MHvnWbe81vPKY7Zx^8yV{JTpam22X!CeKu;_a& z>scF6Nmf5Jy-oM1Z_KT${`a4ycVXF1q(fz+uki6-`j;;ytsYtjCr|5u$sWh9vENF1 zch{|NzF*ZlVFuH>)#4Fqeho<9ZR0!Eg9TME&VpJGYw5l@V}FW1)zg~CGzHjH?86$M z;S~^5nr~Zvemb2zYxWW5N4Nf_zCxH5H8duVVeX^7l6yKwqn>xHa2z=VsacQk(LT@} zw41qF{2bafehqj}dc}|+6lnH=6oN2x7M6yex^FIWoq?Uh{@7+)w=M zF6d^J&XA6<-NnP5+3EN{r3=g5(|MF|eqfCKq)xE0(I>Ucq!u6{DBYCSjQvM9iRZh6 zY}|pT?}n0#%BaBrSbn8Y9@TasifrfK7y8~<{NiVqo61-ME+#frj%jybDjub93a@;b z_x>92XPRRhpJHmwZ!lJ622zkeSzE0XdTH?&q{*x0_JJ|&9gBZ;81PVl9noON@SiP2(?sp!m#Cg}+;Z`f%2v5^lE!(4@2<7AUhNj;|x1Hrn z()fHGpKm=E#!)kkz|DY_a2>>CT`22J>v)k=1Z@mloJ2*#(7jVRFd0`4#D#`b zbM-uRkv_-AEk}MAmJR^TQuc0Hrf_d46>8quvUrDM{UkE0IMgAQ(=Tx+1|AtZZ1r8B zez(^{?X|)3@55$b!ckB#S?9<3UU=EA||1Gn>bKYj1bkhE>b?Yt;IdbQ$G{5$JJjqkyx zn6B}i`rD>C%kB%(J#07QVUk_}9b`;Lcy;D?_@eF1BTFdqo#0o<=X%#LZ|)8|&OTDf z7nF}5p_}-bE=IWUuDyO{9HG&ClhEDIlWqtqsvK4R0F|a0yUfEwmwM>eCs9B*`p zh^2p&KJJsalU&)>0c6{Ok$#g}{@V}cKOemtobGygkJZQaxBKlvgZb^73SA*pO~2;% zon|$14F^)!o!d}TX70(*L)YSxfkIbn*wox&0$cz|Hwpb5dFCrBI;c}s;nt(YFJMF+ z{ye2FoSa{{#B$xW1UK?w7ye8|^~8KF@j_XrVSM%Q6U~^=PHHDm@|RRLugjX~T(?p} zTyR70#98_4f@;PSUNX?Zw6#iC8>|D#JW4Rc=IXI8MRJE+n5S{eU#4mVl-_*vUB(x6 z_VTs|+q;;92b)nbuM*L_^&jYSRiAKMAK#E(x@*jMw{sXqQgn2*)`Xk*X8PVvcjizy zWcG@6-dCvxb>wW4U8X5FFqsFQ)to_{1S`lNgJ@=T(UK(U(w#-~)>gI}FqoLY!`TNn z+MUIE0{~t@Cg+KrKgAt>^N6-g)J)2W9CZuje^D)}Zb^zVpGi3G6rk_kTf!iG>Eiu3 z(#n={Org?-!Nr_W6Yqlz9nA1g#Txv-AL*fYy)$nc zeL4{RaDYjwb2Ba_J3DEtHF|`Xse&c16*XGnAc3?5w@qc`sXquXS(Sj?7gZPj7Wjl) ze)3Rwrlj)gmumpk5vY1=S{JkGUk91u)>aA%&(BDD#t++3hXZ)M3AK{GVAzv{etkwAr49oIyK_Q+V88eRwdsXKm04`tiK$>GJbjskK3VFz zx=pXT=eP%R!r~|Yd_dE1oKx+3Z9mWI&ZJJ?huD$MW@UvxrZ#yy*d;0=p1pD;^d9dG zabPJ9D#M3ULHVY3EHnb_C|WK|B4Ag$8j=I_;kx{sZ#z|9TAY*izzyMQf?!L9WE?dbcWTlp4-HqTmu^Z$i<2sbUh`X z;nEpkd-y`}=F5cJk4laD1oyoq&V?PmjQCwFT>6b2i#Y6=C5kQ#HEHdG4+VDF7G1$# z&S$#OR(=f}o9$D{G(i2@h6%pb@cCla=g&46QoYCp9xa`+3D82yun4!R^5WVl{;bE1 z-_wntM&^`kuRp{LaNN~h;G|@_lRWrtgj+rp1C{LMuuTwbY-p+bUEveN$;Ka%y2mK8 zchAHwTU0*e^`l{(AKNBARk4Agk-jwYBie~ZQ5uMGr*YJZQg7h1F$x}s&Id)(Us#1B zI#qDPWy>o?g)qV9nudH#oBsE0zlo7RkjXXRGj#nD zABru!V%Rml3TwzaMH7RacCBkbb$RIe81KcB;bpwtH9+m-HDHmXo$DH4Qw=;^0|o)} zLosYi*MJQnTQ8s@5Y00Q+~5qj288euMEULhXpL)JPHVw4?Ac zu!ng1_vH@opML()&p-C_KNuHkv{=b58smOm=r!O&LOOAT*6dsZKJM&8*WKo?s#cV% z8}jrAx2??xGAzWUyhcg*H2~{!4IsE7`>*<{uK{^Fq5EOlCvu5|bB${NobqrBm+PR| zWV{-)>v|x&{cqJ+A@Oz28EAZ!WbHINGlPoP}5Hu|!{yKi(ON zmkYg8uqAqge;>&g9Q{|p_pSlRTXt8CYwbh4gqs`Jfasv4Yk*igR_K4dJKpUCSHkEu zphM&8-kvt0hIp2N>;HcUnTU53H@=Dj-MR)WOle_57X(&kcK13JsD0k~!$`fmj2oLhzU8t~5S8nEnp0{nNy0sos< z{68fHVt7_U2PFv%s(nRLLc&J8-}I z8W6#YO(gyvP3twFo$iYHT;V?wntAIQP-UKXcF5b7%S+JU;=Q~%xc9%(5mV@Y?I!A< zUWhZ{e`i(y=;c2$ynpQFKOT_(=;a^1{AV2aKlbvEz5HV@{}#af6M6h=4gUYD$U_q) zQqV39bpB6sO=JxDhAkeC+@$utItE{L@*ZAzUjq_Cak4jPorm-(>AxtbuX<=juP&_{TQ>84Le-hW~^oq3@}cK4~T)s@w0`SS3r{n4-2nm6-b zeI#GcM18HSxQlEE3i{iwF=;1Gvt)9Td9DV_y+`b45v5}&6EI>Xx~ zrOx6X?Ab1mw${><59D_Hw^-CSH6m*2FZ2gTeAR?~9sWujJi7sTej|M~?9tWc2zMdg zTQ_-_Q)_G|bBbk=>-Ib)f8e)ez~%i@OO~v_ED6~Q@cToR-I|VaVm2)(X?n2&;0?36 zH(80Eu%UP(W*5I&Z|-=jg!cPcRv0xYaULKiT|H~?jM->a6+7Ipgf~c+kGD0;{3xrF zdO+)$LfCTODU)oF+yAqB_2=hmR^okl7c#3lJIJIy?V~xk-yKTrht+zR^jFvm?a!oP zj~PcbnE;0W6KzQON+XsCt{;SM=z=HTm4yEA4BcJ;T~de!zNHi$xl;CDH^+Nphpz!v zzwg#GPA<1pBf?cRtva5adUWW$x!1^;{4g+Vx;S8n2s{xHI|xiEF@npPRZWsdvJTt( zrMzCIzvLU}bpsNfwh*LfpZH5Z{8dwU&Hx3#=CJ07U_@AX=KveqUrB)x=<>f-<;X53 zgec-zii`cFdsS4OmgT2?_4zK$8f$Z*%7ZmAiqERSAExxjDA1hL{jUxcgA3I>v9rsG z9|*$Ob@XY3Pf$=0)aiNMlw8m(4n41D8!p9DEby9bsir4S`A2Br4)c@5)Yu!kb;-fU zHZF717l{3_Y3^Jk_)HnK7&^ETT&(FGN~@wDybQeIA8Iuz8`uVCChcE|{4D_G zwA$lsdD#I~4#73Bl>D5&qO{HW>%K6L-G-9rdc5mNnfwtKk%Z5h_C&0 zw?hwUatv#5W+T6vqf}xx5uYi`r2__%tjykqx`alXWhO)gV4PLt)@@52=+dkIJp`S&v4)?xXzmA1_&VAhMb_KSprWuuCC)gjGp1sU(QC6@z+3}N zzBjq**^htmFZQj2+{=Q;(U*hxq;EreK6P%xHJ3p=?eVR^C2^MR?lc>|6wP#tiR0Ky zB@E{GH$5)CN8)K1g~8fny{CSFM_!QQhR{*@tChc-6=p+`i@8x%qqZ{m#gjzbm)PZN z0E2V0d^oDW-=h3w+xHqQvh1r~nZB6R(1T82z(dq6T{HJ=uIgbwYN_p31@x|{=4A!k z%)11V78u-Dh!*9{`U$NV6K+c1P1jq~F(KJ~dwcC?2NR|#+e|yOQ!zxIDNoF*1&o7) zu®Ijcpt@A6|tc>MSE59-+0u$9H-Hk(ft=%}`m246AJNDHnsrzFkk0}Gn z4y%nszhZ@Z+`0TS;I9ML_;&LQV?1V&PHi>`#t0twzyyPtjEtelE8kMOg&KLuljdzP{gKwSwiD`$~CnXwzcT zF%9$KFakH;hvdQs5Ops<{bYgwK?CVG&+lfkIW~;%M~u(Z=&oj1W7MqID9;ijpRIk9v!Mg|*dB#}CZVfnTV1v2HW{O0^* z_8#trM`I+(2HOimMy)Qxc#{Ju?KFj(3++K#(nf=V~?bf;XS#f=E3Z(uwvejsjZzh zU)xNgX1z|`Lt^&Fwa;X)0qBe)2j)YYvI`x24Jd|VA){bJ#&#o zfw`kPu-KzE=vGZJ7~sZM)2C}chgiVh=|jrkqn2p`4~C&+Ago(rh1EvIE7eb9TM9yS zX}a$*4hi?xoZdJJ54?rZ{J9x?l(|)PFm6l82);7tX`33~*gc#E5mGM~qRK{Xn3vk) z7X)x{UTVxiU6x)u^QitL-)NvBN4`uZ4NQTp)%2Qto5Xqt`y7%cWZE;g*BEtQlK<%Tm5gtv zm;JC0m%NLepN;Q8dLZ>T=rAAM*!@ec5!=^r6{n zK(cM_s2<(4To)qgHgCd7Lg+a@eb?%hq~rv(e+Mc^*u#l5ph0Qs?~_sr)cM z{Q;9yi9d=f)Aug4=lvH(Oz|!%!WdT?5~}t)>JX1~OP^Dr*T0856ce`SN8znK+Q0mA zy4@(ID1s}&p|ex9gy%k4p-!3CfRo+*GoD$^0J0wNB}^M1f%Owz`z;4np10mvUo8HW zPCp+nf1=?H6-GVnA&*ZPwEwu9gbX~`j|SG2^n_C3r+cN$d(klLNxa%;Q_fhbv}XS| zh2N7O+q2$){g5;#t)*BE*H{N6wn=ENT|He~3%A+QKW7C=IE=xQE_#puSmJ)u6!LRy|1M3kFMjSx}@ znS=)Ydz->MWdsyqoNxk(U|!9;-M-?B?o%`1&Q`WL;34~q2!CWbFuk3}aWP+L?(fhRMV(ZIjQ`8E1 z2GnDXGgb=~|sfa9_5sm9DcT*8tbhmP}ws7UO*CqAQ_o z|4K9WP}7r_tkp9JN7Z?_HI19qRB|~l4zKw3U4t8Obb`w@&*|keGVo0H04xPW_2ngA zJ;3Jm=xcW^+!?zXS4%4BE;r!qw#@yGdY_^)$XpJ!mq)Gd>}M3uNq3Xann!y*o!-7O zBgE*ooQ3AJ1My0{8iqEc%XEn$LcfuM{u-TA=YHu>*s`r3h)!(ZJY*8A$&FQ@~Pr9G}c5KC-kP-LHO^tep7N#q!2>#chwE=oAjPevK89fbx$LnyAmGrYQ^HX!VB-<2#rzRIQf zmyEMK4^{P){0QSH!%VM{0*MLCx1KTx)qHJ4zwRICN8AowU*%niuhpI0%QK$bEf)&rTMllm-eQ%C{M5Fb@tV^_Om8*y@tuIa zt)JLwqF?AD`h}^~QgcWqmks{}93&q`G7Hwbo!ubg$~eoJ7Co@XlrvTnkl`qBoBfAU zQG;JYnV;T%!{WAG+qo^aZIQ_LD6`!l&Rn8-@mIixW)Rg4bOX-1J^m#Yu~2%#8D=NI zxY>8?#@}!_SQsJlMMjjm^el48YywH`zi5nGd@tdKD5|rz1x+Go__p-OQICXzz@>K& z4Y_kZK8%we8;VOxi5u6;uqGzDZvZcwg0BkPQ;UchOk!x96bj3qAGP7eEiCtcCxGyV z?r2-4%}nlyt1?e_al~G_r;niT_m&jtGnmH*{w@jryYyRCjmuD1{59bDbg*S|R_#p>_||xdb`bqb_4qb&todj(#QiwX zh4GZD>cl$lTl*RCr)&OE&i_kPDz?ON-=NmP5$A%Tl?5+h99y?waiHw{k7W z%}3Seeg!_rCh?%X^ju^?awo>vkT?AX*3qVmT7hjed{@Lak~>wr``7;Jp`fpGVW(if z8JAFZW8H@L@>|`!ac79+&oSwT*raJO2!dou34O_YQtT4Yp_d-jowNpmzg-%%ZDE^kAKKhEBqoK z_OY!kJ}D{jU-6IaqbuD)ill>DSf?yqEG6>19bZ%|h@2u{*gVQ|WehQ*;+}(S0dEuO zY%KonrU)D;!C}HTWA9Cs?Di1Q01Spo2kHLxTbH>-S7=QdU_ZC+e?#l{Er3Q9JOGIg zUCN{1wt0lDBJek4zUW2N8TNlIuU8oH82I@2-ippA5x<^@N%(gvuDCnvSAv%Xi9MhQ zJ-$-H*OIo*p6_vI~(T>olw_4HM%L; zeP;X=6lC7gf z)%k_|l7-^l$mp~*ta!zQ0w;^4Tz0w?y+!e(`Huij9Wrs;os&*1vA9shwZeH!C`ml` zo#(B}ucwwrEDaYc=5twK`CFCDj4#As!uL{P%zVw4RG5>dDtvnXRQ(m*p{y;%+>rhB z=$5maWF;etGa^C6g4vAkWl`%}(uS0jdd~gu^WKv9;~~555b@Y2<8l{Y-4S{eb>dD(``pXRdROY-Np4zt|37p&$6#p^;$(p=>}u@p1y zDgU6~`w8Iw)6agFC)EAIU@JuIw@N^Xb3G4_Ez1UqVoJQdRz;7e(B(s#DdQj)*5r8X*fAo^pA_?2yMy?OVF>km8URHDt52Gktv;^cqdu(U-< z8cpmF*jc0CoR|4k{^6&F&UtXQK=Pr&I8D zOI$o`?O4^7z`1Fd8gJ{wG3U&lcz;Ieye*lG&eMbkD+^3&q4Vb4W5xLr0a2T>&_wh2xrT0vTThD1OH6Sxb{RL{*BP+agN`9w;wCR zcIjH%im}y`INaZFb-aZhH9^8$I}B;o!SSNJY6_hK%zrtTDgK4YkSANTOtJ%?&^N!*(}~!;i?pIiBb5IgiVf zj4vKj5iQ*&1-FjzeK+=U5~~vPoOeIFmpUzh1S<@qt<^CWAvaOB{=ohPp0-IxfB1{C zp|XnJ)+dT$_UNuiNLKSc1Sg3*w9OP*pdic#%fe z9coh37~k4Bl3C9Dl)2AZzLvCS@wVE-Eneo&uBAs*%G^p|ohorTZc&Rwqk50CU((VA zZ0j~uLTujMTpW%O*dR-#K17v&UZ0W6f?KVNZ$)>TdE@vyo)dJi`s-19?v6D9 zmh4OIVJ1J_J*i7&WdQzy_VJu#@8V!pLim2{cHb%Q?N%^LK1Q#KW&1pJIIW%tn3yHe z6lexfu}03qi|VPAYh_62J-#)5rU~{wiI`O3e6=jns*4tR&IW9jk*vv4wn zag4Bbe3)-)rcM1t?O~$;SHk&7f4eKby3Aj6M_H}2U}l@w?Xb3eBzH|x(s4olGE2FE zYrpTdw7?ct zEl?5s5pLwYieVKVW$p9~xA@_hMq==3LKoP(0M*ZR2XyZh-(`NJW;YkJoB9d~$=e_r z=7^X?mQ)BGs7SE@K1@SfMQ> zs`dgqn_@4K637xF=C9q8OExM;EPm}81KtKk@07!h zF?7_6XMDqhgWgds@81Cy?rM7s*qJHbpAd99Ek14BB2>9&!=vhv`jC_JP|5@6u|@rHgubb4Q={R5h`Q)ma;{XauIkT9m*Wt6 z{JTEnJ#5_h!aHBguF=Ve;lvTsRR8QkkVC}o{AUZ;m&r($j_2f7AJr9c?FqTUSWrYO zcRrj6=N{)nCCR*vx+&kh7^7nGwVNr)T*4HbDDOujHX@w0dRN$$ahvjvN@K5ONZ7Z?uZw{;KRJkvG|;3dNLi z@A}?o{P}QvYT4ckK5P};sr+}nmuuZ9@+O}1ah85sw+1~jTU@hL!MHIF+$;MWJFEfK z%ZO4OCS-pkKO@t{Da{dH=rgy1(L)yHA(B?}RpGK1aQ|@Wai!qW;=@Ij4Xz*EKjN5- zR2KTb;{$N7_f0SJ;UdlAm%mt=clWBHUpG0T;wlk(h=YQRWmXRF{*}#NJab^*L4jJ*<6|dhfyKV6s1(qbyffJ}WWD?Ptl5#xJ8XYvD4E@a@1>He#ukuw6se5UjtX_+<(S2n*iIZta)CEEG}p3y^fuE_s@(C-+Tt|dum-sB4}A9(zR z-b1}AL#~RGn0cVMQX5PkPJ0!3k{A#l3 z*VQ}uc48T*<$KYaz214uJ0X;K)5UIi9yOdz2AV31O(0RHc|yfnBIzre!RUjEAB_nC zhSB;u64{4W~JC=;fqgBzw(9yY6UpDQ_xtdQ6 zA3m9MN7qk?qZ8qd=!#+ngD|=Qr#Ut*3GbMi?v7j~eBbD@*}MJ_HiRzwM;9d*U6%vy z{J#S{7P_oCDW831APqF633ECg#SB)YD+C4;+1&QsllhLwdFv!#)JcOE*p$tP3LB` zp9Gki_CXqgFWpQMIka)n;yP`p_tYyG%G8s50v6*C6g0 zP=azzx3v$)*o%#+zRmif%WYM^%;q=;7Ra7fcfy`_PpR=v`8+!MF{?Y}*phoS<^(Z9 z?^$Fn`(gaOc2l{imjI$&Aw13@Y}lw>8JzsYcIJCxnPCuko>ymrU>LZFVnCz`_|&I) z)fO~24o@-ccA%K-QZ7?fz?eh^|7rCUQ(R<*T9ZVU2N7=~EIe$PaG%=Pxtk-cXP;%W3EUVLo|tq6zRe}C#)cH4&5s6~RFs4GbWpy1WdX9L zJBe*-zj%yKtP@B@-n}&XrbW)S9O?k1)rVo%HE|(j7&->m%#(cXv1jMG2wzp?NL`~Q z*zaRc@e5{+_lt3>3Hm`f3g4#Br>+6H$O1%e-c%Z)?&uK|xK#4aXM}vt-$h))!lx{2 z<5)QlVLJ!%^!7>af1oeklPp{F$iHxPZso3VQZG!_&;V2Y1HXs{MPA*T|1-aJa$3DO z!{2}FD)pN+q(PT+sWrs6E~!cDK~eteqOSs;Ci_?9{-Q4$CNQm|@zW=%#TB0p^kB+Q z@6{_;Oes?BMHj-+g%4nav-u&IiIL?S)nT5CrS|vx1S^$iQ_T_eI1YR6(nD4xD7e_L_yDPJ9PcjY2Jije}74YfTPUWd|bg{JJ$7bf> z9(k`D75W+-yi_5KcpK3+H_Z5xF{rOIB7L$|qf)PFKh;lLY#JuRni~H0v(C=L&vpPa z?h9%EhORvN<2-JH#+Zr*db?%*jm6oyM8wzG`TBGxN*!_J@Dd9q4iqIhb3(s;*sLJ>~Kqp12t1M0TDT&pi zBqP*#T_G65FaD+@y}A6amPhCx;NlZp`S`<&`Z8CRlRRCLR3>?FnzTE z0&CtZIMgfFl;3H9FREHE7S=bn+3Z(M*ofjO(2^Vr8|i_y_!w{wvx&K`JEp?uPuC!4 zpTOnhz}}~#gfFIp-(OR4q?_PrmY^>Bn)^Oljlq9ei;v6g_BpUpkbsf}mc39N(n@cB zbUKdw3S~v!^Dj8<{jlyIDH(NQ>~98Y#a82hac)P9=`~`JrpY1|tJY8KUs1NTmTLjJ zcx8M8^@-&?KAUPvTc*4W1lM46Y?*)E2wwAOR^M&;8Dhd>T&C5t@N=uT+KNp59GvX+sp5__z=tqGyuAJ*2t@0QY1wmrS|-Lso=F18m* zf-gpM`@Wop12I)JVqPeh3ilwI$!`+lYyC2!%vSdmY2s)R2N(W>-8RHrUgS6INcRZC zMqpmFq|YKgjfnjoN$X4L-{(siNRa>8lkwy+dYG2Ri(@2pS(O(fREpNun(Ym_(|}$0 zaJx|A!>iTD^*EPA8FN|ZAtX468}~!)gzfeVVd&Gkm(&5(oBVsb&mje4x^%{THF zTF*}gcFFYm77lrMJ(W(uP`HE@}KO!l|_+QZioD65*B z68@N_P$%(_DIh(;1{Z+iPK<69zIrE$Pkf4CFkb-OD&#CJ&>0M80oL~QTGWg(Owe;* zXyS7Rt;VkC@l)MBrhy9YISrG~;QC)J1L{p_)UrJ4&Aj{XJ*HQQxqa|ircy`FsMWZ^ zH0$i3_!>}jfqdU9T$R!=uzL;IGz}>!*-(n=289D@ea0b`8@U`<`$Ls_SOcm#7E;uh z^hHoOQ=_#-`TJRdcRw>(9Gn}wjp2FqQV@n*Kr8K)Q=(b9XnLB4Xs>~^xb*WcD_YohTM$Aa+6=F z3*2uckyld5n<`313nRZ0EnCg+A#!$$>SG`adVUx85+=&kvxnFnpTDFz z{Ecc*X{mYcIdyR$;+WPzW!B)@^;gAOZ$oSexpOZTZ$t?u+lbIX)&Clq>ftmw7~4 z=o8&fskmf>H^Vq4h8E#DIJcY1eG{1=n60jE!{-ggZz3GhJzh{Xy$E{)P$jMnzGZr* zXI`~GF*Zs}QOWB%H|v6GVEW2@(LfjjwGi-1&1I7MPM6-)jGI zOm7=>Lj_vwt~ykuSRVPRa#by!x92to%$>(bM�CBceX+y-t7r`)Jo}xOw7P;N@;x z>#dBUOy{@Rf~8`=cCcsm$+~hs@j!H9=M0j`8*))C*&B)@i+Lr3H7p}8|amhV< ze|R8L!%QrsVPUqiokTVRy9(8^Xe=)SFDr!O*5ztK4rWkick(z#8Z5%Ib&t7~gtNf@ zHqGz9DGvH(m?Gzi;bF9yEirRt?;dS5H?LjlvInd!uYBWH(1b`kdGQuiTwdMF^F&`3 z5>6!$y=7g$r&Qs$llSxBN>cN{5>jpHys@U?B!R~cw+1IzBcEvx>JsxLI(Zv2YdiZyORg`X~0BE4-@)Jc*Prk%T8DxIU=$*&J}quMi2+ z{R)%VCem^E(d|^T{^_PT(+|aXuT49Dx!8FvHmHypdtA}4PPT_DFAtPVcT;zfB~G{I zb9&57Q~GMURcLt>86sb#M%|=Wzv)|*prW`;M;F=+gx|owEB5d2yrYfQR}zDQ1)l@A6_M9y`ctRRW)FZ9!GUc zTShs?9BU~jRRg7qF9a+zPd@UR$Y!bcvsFd9=MPdeJ-HM7VsZPx8uWUa5R6Wwbjh^I zsK6)cjvkAffnQ6L!`P;Pnu zp|a%e_n5mq?q19ZlRrCDJZ(9zCcAftE7fAiR~Nq%h1`4C_?)z`k~s^C^{Evqc&w9S zZar1MCZ@#q*vKuPq*& zphaGv^Zo7ZbN66Yl!;kapj#D2 z)IFM_jE)UopmnvVaiZuuSWY=LqZ)@S+L)#uMU6R@AV+4)|S`n@P)rLVYjt!=)CZ341cV6 zGn|}r5|an0k{x1Qs;`HGJe?psIThPc)_F!gI(DzVTIn1-9XW^)88~y3{}^b}&|b2I zI3gzal%ZSc#4J2FP-d(`?H=Bgh`iBY25s*+pQ^E#ql_yxLho4IG<`6%9y#+?{pNZ& z`B#aM`*?}L&QbsR<=%5@9_B0_n-+-@%0Ebh+rr}CSK>SZr~3oR;t0x^_L+38^d~r> zq^}T5x*kPIM3Y!a!hyKbC?crzZewFSXSDjmp~II<`ZFkLnR21bu8XI*H&>$LvirAm zm-Q`!K8>d>TR;fMPT2-rdZLL~ONo()4MUB`q2wJVZ zilQirqGoC&u|jKB)m{}dQPc6(e8Wl+O@C-46@qDE5RijRGzurqd+j2^DVwV z{Q;GivKtH-=IbysBi{!e{yiqRIum99;%+&jf7(2-1`6SJo9U38)KI!aF@>TyM;9Gz zWI}owYqt0baF=T^AGEbKiF@_*Kudh$Gg-u1_u3 z*ILENeD^i>AA8TB@$O`+{tj-^_#!&C-QW{B#o{{W3ayIo%5XD)*D9FGJ(q&2q=ZN?6l}z_Nm2z_VFcZ!+71(9T)#FwT$X5%3J2o_z}=2FCzT*P?Y7`} zrwmDfYu1?TA5@~K467h?tfFZ~XTGeQp>ErqhxGx(IWTeL8oX~;Dhd(Zx1IoO2fm^d+pxmyEz{ZW_@bgw+UxH zTg3;dwfGbT-O0ZZS|@+(95(kc$czR#n=)&xY5q$rbB{l@u)2vt;uIpk-56gi>4piW(?xco%>2sxnbXo_zQC5`>`ca|{q^sYS%nQpt|0ZOEdKkwb`zm<9dWJ})m8gMha>9$yG#2& zpImMJ|N0jhFmCvkm3sYPR!BB~_g@K1j1!%|?{*&Sil4v9!dRLdrJP7EkAlwV1RYUd>NfZz;H*U^Sd^x6e^;!1Xb$@e?AtZ-j~b zx*NMwU1=o#`XAL78K!ciJKidB+F02*arqGrD_xTzELq)fwVN@trU(n}&f87U60qsV zEpSF%-LzCYPpcA8n|8aA?N*DtEdEXyEaJB_%lA~_vv~4M8mw)(BCXv+sfZ7iAO3L@ z-61o9oFq`#7YQsJcuie|;ujH4p)02T9jecSc}wuas-;HA;tsw|R7tu3L+^pv7= zR&p)~S4ivH+v>PI?)8x*(53fGb<%5GIVJ18eRe22H|;26QhE-=mI(Ss#kASU0yONU zk0DoJISx|@xKq{U-UjdLbcPaV$oQ%i?EL50Y$?3#aF<6k!4ny58O5mKgzJ1GctHYp zI2lyH)jLkkSm{Qo_|iW^<>7b0jd~)fZyS4Z9;ybJt-fV>w1iEA_MNBB93U4v9Qei} za=ZwVVRg`mL)*|p+Y`|v0v!&(E7&6ir<7{>Koj0gNt&kJw(V2JiF|9#d2TDQ(!ZDu z<9u)T2k+4FXf9Di)l?3R&nwRm&(3n6zOWlyYO2Wed?xZp1ddbYcseWWJ@MgryDT2v zRU3|Ht|jqV#&v6BX0&($-zz0m3AcLs8sk~%KJtvhCr zXR;tRgXLPQ$4F5AjnDe^^VlWb3mzG2-mA;u{)2^f1U9c>V^*ss$Na7JpX1=)$Z(JR zi6=(0D5Pz~d_AUpXR#1Gdg22jrYl zG1hn9rcW{kHEx7U`QhwlEPdfo!KcwlsWSgRfZC}=E@ z5Z-l8hv99X(Vh$8^%`mq36XISpU9(ymMsVm2upvvLf!S!L*|%0V|qMFaUmybsT#iClAs;LIvft-Hn4AFLyGGcrl$od7GP1proL57n{ckE^;gj(UyTJ(S@(kiDdCZ zXOawJ*}eS+{xaO6Z1ETQ_GuRin|08a7??bqUVKOT12pI+XNoiJvIMv4pq$d5^E(q7 zQ6$&pR0Iy#x>Q3r09RVe?m5s4Z!B#OdZ0TIk zD$3tvZp$msxqP7sI8wjQ?S~yhbf=}xrN}~$iOnQwL~^@!0B+(A`g|tN{1riCT!((0 zZnjN?(N$F`#DH`ZtD)ypZtw0WJg#(CNk3gmw6P=6qRs7_{N==YW^+kSh7V8I%iu>* z#ERYym8pXzR(qJ1@dYMGZwK!r?W>3&xBdQ0$@Te3mGDYOfgWZKhxauS^8{X?h_zeM zROvpKpKcOhlA)MYzzFSTxH%RiKo-C0W;8QsNByGw!V1BOGuLGF z$6kJl4hu%8lDJ|zu9NAOm!jM0ak~s#TG!7<6ZJ{n^|;fy%ZTftEct;qPAz`Y!1o4k zvBhfH?p)`bOCCvxx`N#JyWkCGrpOnbn#@EBM0JTf;f}p)gd;f36*v-~PaH?u(K}r# z4)uL108>(8FQwP>MnFTF@{d)VJ3=Foox-M$omaImd?!UfAwaN0^>LGOlgn zz)gvT-rbTHRqUOv-vz5HPSufq-@LEUsjL+7?T$@^UKos=gQF!S5Z{t4- zEORKrLXmJV#cTcU#t=M?q;Tj;j;kVr`?y*;V>4hG_z30c#aYBtFzT0bu8S8eKZ)Rw zX;<@7jz@Wghmn4V60t0l2;k_s6^19lC4d8D1@3r^uJ#m&gxBk)jL%HFLrleJOCr6;r)u=A35at%#aiBKK z&?>lc7W#NzMEyC73O0Dz?w$H8OQ)`#yTf#vI4{W_K`!#@O2Qg(b#?I--mPk=j6j-B#%AX^h*ZEW^w>d;C%}}oWDkI6H`vjvJHc?L&l&T?8 zvc9i|THo?MZ5hdCxFtR6&A)Fin*gN2gD-CgUIa=*#Jx6fJg0P}oja>8PI3^0&fW>l z;Knmgh&&82Y?^T7C9nxwcVJQkC#cuUk8jm z=zwL`;-!C7FFa6-Qq8C)sN`#2!_RwC2nKH>)_0$hFMGFKWX?On%f9)$L~nqv9=Y~s zx`R8N^TBk*da31N9EPIcCz6Wo&*g7e!VyPcWaQN)saMp zyxV$Tq2PF}cs*wHAOb+893fYA;VGVP=Zrk#E?%gqkpcNEQn4Nl>woyqkOKy<%3tmi zu!-(558h~9(Sp*=FZAA)kOO^1*&G&%2u&Lb+I_A$?XtoYEbAxAiJ!u}6F&bq zdH;m$K=Y=+FsYB;4I+VS(Qwr67|23UEd-R3{;ORzRco&qBv~}(t>39^<;GicqX$OKvZKzPrmZX=Q78JpW9ZV#> zDMEP+9&^p1qcNtUa0E2@tT|7S?ouvm0*!_N+3tCqcNkaB=(tl z`f(*$j&`&8eCyOz>=pLMcjr0Z1$!Mr(o~F3WH+*;x7&bVb_d|aaC`e))PbCdOZi8Y zNRg$BqdXuD_&8QGneIk#+r1uv6);sN8)f&q>&6YCu)#BK|7I%g$6 z>ug2A5HC`voY;Cr82W}?(ZDNG{*EpS(V^-jK2O6XX;5 zV_W*LJ6*FU-16#uXQvpk%34Sjga`*HG3TPz{KX;4)L)A9dU)Y}F%;{XsF>tnr9bJV z4xJZE;23gsNXW)1>&AYm>w3^!DJX*(qon5FTT- zz_aJGtFi^F-V3X{eZlJ=TzS@NpBP^Fyp@;l)~dOBV!hzu<8|$!)bQFXtITc~% z3}Om+a{jYnB2d>;EEJyLDuVn;>|+;U0`DK5~*B3~wTO0j=xQhj@d@ey&5l`I_b=#?H zm(Q_<6Uyzmt*>>gxs_x-3o-sNyMCQlG-4)aA=U4A^FqQ59^9|q~>JzGy| zN@45n`6Al)PAUIa&8JpI>MNC!5>hHu=FYEs;b$R4?fyG3Ssb#k?c-L5@c7^NL4JP4 zWTzf?*UDc?JzszI$vWa`L>|v&znXeM9dCvL00ayO7u8e5OqTt+Tl~j14{rE{*b^p| zSD2)gb(UPke+StSyA8kl`H}`et^}lpw@O6^pvQHwiC}B}G8>V4Z1q<(E zs;JNG=HiulVt4swibf0eM8S{ShP6x|{o)^$wV}nc?7v_}H?5mQ+1@nX<3+}1l3*?# z^J304t|xd#`IS@&$nC~n9h;3MWI<7CV^!qkaZy)*8B?g)K4rSl4joZY_FXBv{a`Ju6EafD!1Ow&e>X%GfsXu+<$;H3T`U@$zY9E?7iRr_5$s5o4hwu zb*JL{gth76#m~GU4ZA)QJ7G=d2IZ883pR9-C(G zBfJZ?%s$%i#+Ly5qdp%NSU29MyNG%Lj5EGZkDIJTYF{N=8BRZYZTjHqz-hG|PUK;O zfx-jOr+QBFr~IyTF%#OZBMDqZjEebon&3yo31Ih4O{K*tovEKXTH0m%dAb(zeYdDf z7kS5HvwEoT1Lmbe}AI!vmGVhWl_b~00iw+daIvwED1ZOWk`H}VI0E?_|2kt$Of{7Ve* zYt?@YPtK9gda`<^YgGVJoZgt8m?6Ska?-Y`qtsYI9mzOqVf`&r&ClJRDI4-f0 zRZ7))=ZwiY%CXX3ES=;%VL`61gwYz)*-jw&)M+XWK0E4N!8+a-wN7|bQ?48P>?r-- z`o)l}BrJ8OLIIkH zO@e?p&mfRz;X^{i-_x5*d7Zd`YJdyvd#?l*>c%lybpa8)F+u8sp*u|*N-;<|57ztN zMctCG{TM|5)J?e0#zM8cQbPiYtC6_#tJ82$%@4psVf+b=Hr0>$Wx$*5dF%HFW~wbm zX@Dl8+fu6_Tyj~l)?&X$+|K7~i@Lg{lCiOGzjdj-^Tti6yNEwWre7$$@g7xPO7LPMB-+P)FH&gOsNOT4P9L&gOwZ^P@84>t>DdV%AG zFDg5`gCNPpUH}5&S|M>1t=3v(OO6Ubz04XG(t|$90jv|BHd`5k6~MTy=9=?4#gL3+ z%Mn&k_RBcmZl>E=zviMz1)4r9KO@oC4(Te%mU$Qr@M+-~MFg|OA zwIg**#%{b_p{DN*)qbUQZPQk;U}YkLSZy~27ps6>g=0#-i4CPoj#l3zi@+$SEXRSt{6-{oW#H zwbX*=_4W%fnS*Ww21LP~W0OaI*_l2(bIHjw)~O9QTi^cZzMlHV>ZfuC1F5g*R0;Nv zs$1pc(#+v-C+qn3N}i;0f9mCL4!uB=-XV4Pqez}d51ncuqiFhjSi-X z;)yz?>H<59rcp0kOjq-KjiXaxHNT&caJWR3yV&?n#zBOZZ3NzUul!W`+#xdTtUpMP z-TWQ<6Fn*p%L!1!#Ez^uvN0MPKJZ{hUYD&Dd|gjxM`uuZ(qdr!3Y`uBu2WfYkfQJ@ zo!^#dAV39me_H93?_y|O@tAPTS|YoCNLKr-lS5Tiz`m;p6*ClxkU6W6QX+&bLy`l9 zm|!0NsCw0gTgvS>d~L14Q2Lbem`RYZy)oOHi^&Ah7H+fQn_iz6c+HH4Bs`%Mb8>Gn zT!;(}U-9x*A86WwT^w`=%Gn^_hdcDVoK&dMp4FGs))`b{ZA`L{(<~>XWs5DX>z)~< z+f#kBxyREV30>Pw(Y~T}lk%MG-5Luh6%{^rhyE*r~7@GkX)>~VV{s|O`a`}i*RNZI zo1D#lRYGR{ZXb6{L7s?*|0zVNPUJ}OZOq|$CRS18Jao2iZH=4} z-#~`g>60#c;k`F{LLnkFdFuzVFC9gS@Gh$*WN&w%M{}i1uF#uOp! ze_Hz*%QIw~wwUho>_{7`gY$XioY&LdGeg&Ws3S)f?~H2P!{yIX)CJJ9qGN)1ix#w0 z2}6O!4hu_WOzRuchvWA=-A)+tH5>~!Ouhfw#+X~{dfd5y-K>3l6QJptcSnoyRWw^Y zagwfX>bt;L7BjF=Bun*lIS$jAcbef&*>!HK?bQxyiwYrOW(IYawg#V)Ps`I{;&DP5nG(^tF1Tx(d{4WRc!d;*|4Twj(_^597F@=itAlDA3O

~^i0k(WoWafVHH?%Le*-Z4J7ArHG@G4(B`FmM+O z9SpG?^>t94TaiZtj1N7qLF_d3O*vV`J34dsXORN$GIgPG%C*GgVdt}8tc+I%OJbl0Z zeZ1-YDtVonC9MBkFK=BOE?kI_ENn%B11-JrRGpA<>z7wg6Q_413hW40cQSUWzTJ=I z5t4JH-vbs-n_eSeWr=UAwYMThDSr@SW;}6Cr%>RYqDaqCa`(VGt0Kj-qK7o{Juwvf3Y=wT+) z)DQg|?VS5(h0DFjL2CZa8~YpzAL>@u+NzERSLnV;yTcv4@PKng*L)I5?+A3u^S`9T za2lNrXXUCEoOVHHBzLNejYzQc%kKz4#(9&;4qx83{5^0WH!wne{AAsnStDI)_m*7* zCL|^)YAmN*-*Ah2DZ*!OX76-y?h|OCqloBrd`@Q(5l)-y;ujip`y14U44b%Qb-vRfdcfqYH*A#kW+h zY`1&icFCZ29)#O{pT+`{F8tt7UvOx8gEvLxMLWx3IN1QNN!QOGghP+* zFrJh+)>TAE+8$5Ly~zgOo|k?XZt=!dUTC3?UPLM16ub1P(}H7vt}0-hzCZlD{*1qa zaH1VTT2BFr7cKS<5^dc?MB?c6Jwl26kYeg)dM_`t&BE*x1OKj;19EXS`b`wdpj(^k ztbHNkgPX`k*WbE@MekiyD7AXPEl(1vg(cbXDgdzos6$?AekksB$yU0@=t6G^>T|Ns zjiyec$Mh>*#f7&TZu&(l(JCHkor36oU#HkUJ6GEv z)$l%V*_K*lpzVhd_^5+)v&^AVWsjmgREI(>JRIK$TOyrj4;-o2svH&*>uRATBVNy{ zTOmJ27ibuDHoVc%1KG4H-d3I$`#|dzN}gM?u4l@1~0(rrN7mkcj*-hvfaE|u!v># zPrNe7%fXXsq3_M#`g^*tc)92rgu+d%h~5cQ-LCV2KXIGWL$+#W#bAi-``#%ZPt_he zp|Ou#p++}XGHs=8!L6O8(4B@QE3cgwb4JK@vN)nBJMq-|L?-c8<*lBQ#ENQTr*tL*ZrGlzu zifF9dkH0SPf$~d)Sre+Y(svcefUwofTFPSjY3c9CBKw2w`{&0_k^ATM`oYVtqc2{n z{HZ!TiK$)6`EB2=lFOZ|(L&>${MRqFDk99{|KFgV7X|;QZuA0qBB9!UMQS&{{dX2! zhhMudDn1h~Lyicov$YW`t5oW(D|+TG`7YJ2IzOI(WB$r^4PIsa>H|$GRB*q)P?&6m zd%pt@mrhFziqL%=2Ojrq7Xs#DasBOx-5hTTSuKFjqi)5tA5k=E50AAwhG+KXNP7|s zfJ&pU8X!Z<@MHzsDGJE1#_`W#o?%my%83_6c~7%Tec(>2!m4D-#tlm=?&r06i^TPQ zz;H`Wb~CxzIq`JrZ~l<21Y*T-T(l`&xFz;JKi+!wm_{<@IXAvRL-JH>u-N0Af7+*A zUrFFaKV%YsJ4~{*9)+N}ED_$8iu-&64=jy8HpQ_nXBc^OlOWsW8X#iP1EOc-?HrTV zi6Hek)YQj@ORLECj1_`qo5sjH4Ha3zv;IpKq&}=@xTH7XPXThjA?&E(i=@MnSA=8T zqLs%Wa^XyfB)h_u61W?KT4L9@l?P~jW7BCrD{8a;`Wd8oZ&elKwkXl1D;%3*w)#E? z&cTsX4UZ(P#UaRZ_RZ)~QKD=N#pv0`1^4GaUN3y*EhekYsnW%Ns!QVhYDqZr@~zwu)4(PQjEN zrUIC7qWVdH{zO?W+uAA!VHc)}5x@jtrc%|rf#yW3^E2E@i%0D!`oJ=ZdjaN2RtKZ9 zl;-RVkMQRPr)JSFufOi z42Hsc6`@zR8CQl3k8WouBrY$X_(9wiFcn%Q-9$iA)pwOEL<2y?0M%?66n>El1uME~_ZmEe=QPN(7x zlyT)|t4q5v$JFm6-ocABVaFN5%}Y%eO&`We)s$LwXh%FKVfT0e#?J2x6H@tp1<0?D3iaEA;&(#7e`&6pz!XgjxhTf zXyxn>n@gxrjA@p#Gdvwl3gvv zls*RvvBCTgc0FZfwv7pmKGB9Esn!qD6{9;m!Z0?1Jeqy_y(Z3oO^0u$!vB5i4Up`G{%;{ZN zHV`d%m$>wX0xfe_)?#jrd$J!?#4_;NI6MS6Zx`iK_bxqflFWEzG{F6oR-)Qb`X7~i zXNAJru^n`4W-Ktnq0GlF9KDY{oCqi(aS=GmV7fggbum%JTREi63|5x4cn49?qzZu}hk z=qEV(lZN#X3H;R^ZXuLPAw=BB?=>!KYYGCp-L{Y1cH4}%y{gN!_|rIXcRRw&`9z_LCagp0)f>lYq#J(3++8ullF_ zZH5EtnK%6XQiz}x@%VGGt-?F?({QFra@dmL)Q;43Od&}4s=1+I;ysS}x)*0FY(F|2 z<@Q|@N5-^WYpwK2b*~~dAdH2)W5B^rPMk30HR56t-Ly{-&R7|_ktMjJZz}AlW!ARK z-1hY!6*DsGB5b5LWONG^;>b6HSh6_(nQ83OhHi!7^?SePLk&(#?sMy2Sc;-yFD5zK z-8~yw_(w&E)2h-0xP|b)s}z&-#9JRP{O79tR{G0!(<&@k*>ZG~%bh=j%de+e>PJ3G zvYF>;TRv5x3V)E2|9{m46`wXZd6|otVsl-5Dy+NH1(2UW>_!kwW!Mt6L>6 z725B&HvY*;DmHRqu5-GHHP%03VA=8n+w>udy?ii4TmaY4a?P%<_xJ4^|o6#@Az6DY&n#-F<1DNF6xy!+FE-ccsOUQrfY*Pf@cdwGnD<# zzXy$cMktaEi1~d&jOT0+m|7I?Mh-T*?E5$}Ux2zcCI9ERQHP;(Ywin4k(if1E>M}W z*jhD2%`BYVm{q@eJ_cc<7!&amUA4GbX;1tll1KW0>ETv)4t8_)D{B0^(Dt-{zqm|% zKKiNmK+Q<}5+~fOn8HgONrj8olC63yGX|e4F|1U~hjNXL_%VvsI@u+6pVy}f{GNNs zYy!oQxZ)_GemDZ>oDL~Ft740DOe!q4xXO(wRA9@K`jfR&OWcp=U%&9ZXZrQ6H*^<} zFt{>#N4>&ocSkXj`vyU=i>orb2bxZL&|~j4uKU{!A|ZFA?8+~u?=w}n^(h#K*B(qo zEJQ*=O2zSLW1)ksqm$BRpnzwHVj-W;eVcaVpKdp*!DM(?NtQ)qnGR2_x@FsF9OGqF zgemp(++M5t`T=mG)BA3byd;z2GOe`QCHE8KDP(p%0;@~^WqiHpS)yvv!v?vYdQLfg z+{Hxu&o$afLR3MW$;u}pywez57S!nEh&$=H+7KJmMBQ}vlbKp!G$*H(C;E2aT{ zxFve~1>0Ho@1Nf!N%~H-z+D3~6%ZmX>x-#e67Wr(Sn=K&K*z1&SBu?Lqiw^V$@F1Q zMhw0Xhb!nZ8s(^y0}Mqv;&CWrD!ojn=a`$a(di2tog5lUl_35?Hqxi@`P(|b?R939 z@sefV96ox3h4pwgSxr6D@b=@ zjq(hnbma%QH;`5ze)>Q`$4*DH`%%BA3FRZDm9|G|``WFWK)mSwf+hgeejO14uK?xg zlD#9j$cUs|>>f5fQJ|pHHpV*|V|p`hd^7Zg(VY;N#8n$#%ufh6B^*I^vLIT;QIy28 zoT|vBrrEtp#@>sdB)ca9O^qLV`Rf%^7cHkCa$%X7$=AL{T?y*E(z{}8_L%fYEiFak z@st%cQ;+@O2wl>U@G=40?oFczhsEk$+v9|rGkz5;#Jd;Y=%+S0~Ms#zC zb};VUq$k4pLHBphH)B-_D#Lev%RXYY&KT8Nu189`*ly<29xqLlQUdbLBlL9>z8r7-pAzO5T8GCw#rcr2mNoh+l5r# zXc)UxY_(qMDD#n?t}l@$l&#OL8r2nTj}5WB>wCbbti@q;pWEWN)8$NT38xZAx#8sZ zOnN7ssfno)%4_P-AH^>kAG^i@`}uZtQ0m@S)6YO(meb%95_GbF=r-1rt+qRs)E?ei zpFrfJ;+kyjYgcPjE;rnM_-e`}rPTNGFpu?xipi^&cTH#V-5rHUIWb7U0v0JW;S)Kx zd?smBDBxq-@#}~yQ^e1}v$Wt_g1q6-_pYZO(j7C+2@%RO`(GsI97ZI8!%V~eAC-`X z(ejX2lTDyupj%Li+_rRi>MF;MXp-~rUPDWd4(0QEam-VSDjB)LWUvxju`C_xiA)Oj zQiE;;38;7HoIJ8L6&WkhU2`zH=@7^K+lZbj)Ic~rB`GQ+w?PT~r_;i{i`2rDQx6w~Qqp zsCjybyCpZ6829v9_1vOeNK{o17qM@5l%}huW!AuFJYt)s|Tca|NISa3NB)4UkbK+`p1Nejr8+n&cd; zBKuEU-4xJ?Y69l;l;iiAJpZuMcD3s`>i2KEi~&b>LBtHjfD=$RA>WvHIlFZ5vgLGod^?Q3%Rxq`WtK;+LnE}%D}Bgfr8ADdKi zSJJcuLdsE-fRH4qtw42$*SMBueo(^biSxHCL)WJ^EnH-12UMusayzjI4SNr(?Tx z#n4QsY>irTZku3zK?A4@Th`lWA8^4L{>FYwtXiBvF$p|}gzy;Q-V?#}3Yb?ptZ8nE zc_>7x&+Vs~cY)7ynp=_f2784jq+Tn?*6ulSkGV*8Bg!^3LvN*s^Vj*999j3V7bk=P z@<+uMnKN5kZuhH@zP9A14X!-z&BL1uBzC5|4Y4 z0c84Q@Q4kKVl4I3A85e)OoXwV{KONri#e)k5Xqk8W7RTN;9vt zgrS0Ros)l6eUJ>=^@DV=e_6KU>x|wRddtj=lAMx*^#D~K2EhP9Cj|lm^%9)9J)l|uZy>gJozA`z6y7a4<{c~8^>`S>4nrc z^_|==`myV%RE7x4DA-Szoj!p@cXu!9t<^6NUD!7v19Y)Ri zW@3Nv*a@7FuhcxvMxN|9f@$}%Via>lkxm=ulLD<_V^@$`?K_m*;CU*V^C!v2N95)g zW~ygzjs07-w~NK!H0?4wkbQd8UK9X3#=LDRa!Ms`>Tio6{t_BC4vbpeOTTG2u9>lV z-yag-(&iXER}rz|(;@-E9GHA};EM1$0_}S%6TDPh%PAbBj#82^&L@rns z-WoC2W*AnvIU3JhYYyc5tMGViYX}q5tci481g@?^L6YAWOyi{9!D9J|v%mBY%{hDf zuWvc$uG|et@ij^TPzzOXauj2`9mxPZ*{PE@Y2^-<_me+X96J% zFg=lp5@hovMKUVrtW4y1iEUFNK%j})WKw6dCx>4}w|2QGRSmh+6A{quC^6Cdx{q)u z338C%BHYp%6EVu%PkN74GBWP*sPFG|;!mDhiWj2QFj~P0#jTNIQgXUG<%d?sO2!yG z7_$j{@i80CSdPN$MtclYhKy=QQ9maZAZ`s5{>QkX^dYtet{+NVqJ#GsP=3Y)u-<&>2IX@?_Ti@{_V>B{c}#Y|4gsb&kk<# zR$mC@ng)0o!8!O5uo2lt6n<2KFk_9O?9%EFwFVXaha~nd^zq;E%kq6FqaQ)jOCl$F zU2!HN`*NW*jn@++p3{q?Qvk`uSl^%F;B_js-z(|%;$PEZ3Z=*v13ip0qg?ydU%gH&2E zh3ZxKy{B*)APqEPr`S74xuGRO!8~zXNbYwm>)jyreO6k_E2M#aWl5_(2)~9N4%B(k zVd$AF?dxhg>%ou^rB6fr8Q_~W-y7rh2#=l=DTWreZy&qwGfr)dyk3-VC10zAYb5oY zUz-qXMP`Cx_l=2VJ(k(ivaBhkc21uactW3x$giZo)5wfmSw>EG63dtV>pkM$Ap(EUPCPGIE@IornAa;=fF9;~7rTZ!j~Xee_($X^&)udMcFr?|pv;x; zeMf*1f@#ZM+k)4KDlv0U8k@)droftu*_t# zxwtrq`Q$r0ry&R8AD7B3YSNX@>59(XGZeLcYn|>GqetbB6^eN=frP5hBywNS4Q$Xy z%g-wxJF+n!)7r(Sf(7*H%*+vRuAl(CUE3;hB4@Y*-J149CUWYEe!8T7sjL3puL#Ps z;d_6=J=OtoE1xA!i#H;`G!Y*T^uJ9K9#!K$&CRI`%slU&YE*$Wsr$eeFR;*TFu9zS z1rFXcP$URc4e_LmFWBU=SN?wXDU*k0bNinVig zD|^3u!GubKI?tHTs)Xy;`ox&C!?J2vwd3oDs5Vk;WoUJ;Gl4zBNMirRzD=+WIl)=SCiY75Oey?< zAg9kosKiQW;CBtN!|l9xc|R*TKE5Hs`~5i#gfsM+)o1d_*S-NIq?Q;=!E~%$9M9*& z??zhG_45mG!k-pc;)iimcN!d$a`G15$(T9SsrIRs304oOYy<`UZVEe!)D4#{z<8DJ zZ&bLf1x!crYlPQ8$RXXzue-B5gmZdrb7F%uA3}Ln8*gHVb}aFiFK({*vcBef7@X1V z_f-@dLQwgSIoKP#pcu`&DM+c%I>m z4Rk=AEWbRov8Y2IKV`P{;n1WVmgeX^qb!k1M)T+jb|VLoB4}q;K4=%ns9B)4QN~W# z$>zI5A_qEVj*n&g7{FJd+RW{8mGjWn!xR$0dOt;vHEA<#&TwWw+0=vgXMmoripEMW zVcd`EdcIP>r}<7UHJ_nqR#L1;^MbblI0UVsvJxTw2sr@|FguV(d01=3-t4*UM{8I4 z=YQ(nrepjXi^ufiXSyJ9Co!u>JrItex{Vw)t<0PDn|UhnNC(xDhWMu8D>lRVUsXN5 zM3#fT)_Kb^Q)mEbUmq{{>c%zry|y;=Wak&3b}Ok2&0o<<(JQCQpyUnwqBWM;PP-W331I8yz#_Bfs-42_4%ArVk$xKBWU z)6MP>M)_(?dkm*e#w}I-TYtXt^UD{$F@I-Q(n!%o{DPV3Od@MdqZ$BaqbVoHY}<7Y zp8{(0dJHJ!e5u91RGm~L1ADgHsfV`?;qvKExbWLe-sCN>3pFwKIYTcqG&j8Z;IFf# z|Bq@W;x)SH;H30tgbo%;tA2zX^Zf53K%A z%5lKCEI9@!no!d-f)XT|G+cFfuEu-OX<2-e5#+SW`6&-3PLX=_$)iN`LXiv=8)07_ z2WDNG(bw~4f2c|tg3>tL!KeTHiidve7Lx-qS)!BD~Lca?fg(J>>4E6Srl#*T4guH{=c|8({QNc_us2z z3CWh7vL?GMWlge9nyBndk$o%cV3;EN7D5P-ogvE@%b3X?lB|=RvF|g4!I;kX_y3;< z=h3;&d2pVbXD*lPn#=f(&;7aY_x*akOEsY$0pGmeov-;+_NU?5%*ezD>aeY5y5`MB zF)*3d5j@H#wHK4`5ms})%tBFMg56hqK=u^Bvk5k7&|@J{#RkNes*tSW`Yhq%;}er_ z50F8|y|H4%{B00npr-|%q?D6u-!ZMj=BE0%O-@CH+1H(^e^%W)>l2$$-4F0j>(_&! z;?-LedJ<^ij8_A0JA#&6Tz_e-I(e9DD82Nj9O=N?zfe1+mI}67pa_u|7KX^TYp%Fx zN=_{NhS54@98KK??kX)?Mvb3tm!lx6YM%z%FR-MA-21_JZ}oF1P7FDre4=g-#_m0M>j!GZ@A%(Q-P28)7>6WdlWPfH>dt`!`0|l9QytPeS zd&Etj#^-r&g=~3d0BywK;W2;nnb+^Qse+@GKpHYj;D}D1;=`Uo$0T~ERJA{}lZf}O zujwo=W&H7ZqL6*a4BYbi3Ev#-lIu_T^^A%(8}a?pm+re~2*|7X zRRo}_@Xl@pKbzXjT&X#khiAm+6EYLg&cOXpfb?M_&Be<5JxrsjFtJ4~zqhBq(5+{wAucNx zPX14*Tv=kSpb7GmK%*BNhG{g1s|0%GFp-TB*qT27O{i?dHWJR`0_WfJn~p{j9VrNS z;h!1$M_@PUoeW-GbG|Q(gL1+JSutA{Kn6g{s%O4?o{4M$(_e&Nvv$p}D&8#ze(_mK zUA5KXFo0_*s@g-Kav&L^KLUWNjn%n}n3etDT z-!_CUOGtL4>`t9raPW}uK;5Bj&yP43;3e)wF%Fbj+Y~(^m4Is4W@qMmd1SNL1690V z<5+Gyu#ia6oA6+i|0x@wOxb>V;A%z53=BQpasrZw|ERo`(P@FG+>ChlU|Z(%Z2TC+ zh^X<`DzCmADt7Rc6>0~*W(Nv_PS|seFCUahOX`Xg>L>rSpQgLld5d4LS6Z!ekZ|^F zf@~aGZLtx|gr54CT;PtIdRE1%V3gUAEe890r7Iz&$E~K5`3hNU3{77?P11oh6XB7E zjv=nQuZWJT&Ad#J0(aN4@lOS5sr3f7Ci0gM;)|K#7|sn0KQXDG7#h-k9r6meWpVob*ni;o%P4HhcGs&3LlZeb4_iEO2KPMkI>k z`G?wNCu=&xsBx4VeT&JwTiG1D<^EOfm>VxcJSw7`QK;so4}BtYuWGG(qhvUJuReKx zE{S)-77s2!_$=Mc0b$V2ZssfXY02r<_@G<_k65F;h&TJL*YlE~f&;Io4mXouh+*=G zSBn=Dy}zrbuOiA__o12x;FRh^TC~NE)$r2InO|I&AG{|oM$z7@V%U*f+>86$#`}Hk zXmG_#wioea{o)ib2$*9#^++;}CgMaRV6| zRTR(tt>c>Xaikja)h#(veCMfGrMD8AU^lJrlR9PaDt3HK?pD{ufg$R5DS!MBJPY}Y zqMyT*2zoV+L1MXINg!}bUEQ$sJBzGhi?8MZvj=obiDMG9St`y#ty5luvq2R2N}1&J z(=M{CqkjQq+)i?I)MjGOZ)=ay+ARu*ewk+Dr{)6DO%Gngek#&&yVY^wx3f|X0jt|Q z8m8)&LE$2*#47F4yJq_}=H1A#n-Cv+k!Gf#H!Rwm{z;G6BYI#aU7M_+B-wPWjV5|Q zE3yUKljsZ?f$6*{BR{AE%bGSIiMf9VJ=v}^@HIYI@8t{0>MuyRglCcT7a(NH*wKRuNs-fO7h}3*NYEY(=mox zZQ>(?W8=SOZQ#^32cDsWr@5$Zcq|X3LpC~lEgl)mA_Y`|9U|%W<^eBK3Ic9E^{uO3 zy}`H)_*M34BQOe^JZta5Zf*nzcGHm^D(3(ct-y} zWgAbm7`3tNGX~?|Xh}kvF7RmZWt`HazX|fKcFW8>uDWUX>ZBxV&Rj@o=`Qu{rMHri zpuKl?Eu{{7P24r%FSQJ47pi)~G4(_H`OBz#a+&Dq z?P~1KdSe?uu+UH`v6*T+i&;%OG;gap7#83VC?#HjFhgq|HH^WuU*&O4@NjwTDOJYD zw_eiBat)w9UbpqST$BplpXvYhgI}j_zdBS4AKev0h6C{1&hExqL{$0Wj9Z3@ppuvL zt|z+nlloZ3tx5i#pJkUVMR;w;-)Z39Gk3tRz|_N;1g(EB6Y^ucL=wHLkp;ooZC#V* zO#X`1I0elspIhY_k|H>mV>P)+3>^aW(Ylz7<43=}k(Kn~L0+Y;Ap`FeLEh{K#;aNx zgM9LfpQs9+-oA?np8QAUFGHRZI}b)o2khUp*586FRZ?1Ha=}j{e}3!PZUx;E4ZT~6 z69kTd=+fSnlMbzp&usf>U8NBitI0DG~0JFxYD3~)^& zj)TI@>gK_{5EA?oaED>MRrueNYa*~pDEXZtOGK7Nns;<;t}^iJfpWoMBoUBpWbZQ9 z8^z?iHPAd3b(!|oL47ulAkE`7){+`(=$NlqlTP<11nJ$(WfymtyI9-drL&!?)5YT} zOEwa%4^t&ZT~p0`ROpKPdPnkk8n9F|><)*U;L)N%1S)UBY5-DAr3+WNzs)kBf8k z>lfHJpCXdd2gvtHZwUI@!@y?|Z`~%@ek3=ooL0nz){%+xlAgbe(CJn;R+Vp;uYJ+A z9RAo{(`+{r-kT0Q@nGz;(b_^+#d+_}EEN)D;H$AfJfYPhtxJ$eUA)4$LbuvDsA$T@ zu)Li+Nv+Z4c|t|e#e!I=3sebzIrjIuA^cFxD4sc4-O$m$x+{;3D3Dn^DcxB8a5cMh3)JGB^X`xeDjpS?-G(zm)^JN<*G?*fZ@OBK^@ zK{xwR@*<57&3&aGq=CR;JHkx0CpA^D>rq6+OZ8=5c8xNlKaAnwvEA>7j~ z|3Oc((DX4@)e7RB9e7yhHf=qilPA%WDY=d9pUZJ?I~5QuC!xB5bG@zDxJ)GxyC{Ms zo{WmbIRpZ4Dmk0IM;FC%i4tS$us?GG$6igcUKwR3IcUtg) z@TiB(%O-$?7NiahZ1O*!w!G$33e8N+(e$>c8=dRlgSaFey3X$V?YrrjiS{NKQ;7BpAKF2nHz7o9RhnN&C|3PK|>NW@uDdNe9y0d6f zJf3bRah@#nkLr&~hKjO_=pTcYZ1cEN`E(i!4s(%LjPLCF;A&HUW{mlq%#)P;wZ~() zFE$HaCi4?eZ~fyFtqmKoX{OoCb?24O#`lysb1-0kMP)K5;l7mR?F?@gz|d72V~;xjrDBJY$c@PY@4C@mc)WrarbnD z>M(KzfpS%e5Iqu0G5PjyrnMM9X~Fh<5BEWFTQIv)R!HeQ#s$fd>lg&_d-vJM5+!Hv z<2q|+EX`llYTGmK6K@zaV5?)T>u-KCX#`Ztv-5?cI`t$Dd?lQVEHk|Kr4dsv2asWp zPSXz{VD_T}b=FE7d1++wHmRNTHjTi{zt_QF4ZotXnY8E zLDBR79tg$hZ$&)ZT`4)Hp!Y&HLR;n?X$sMZ^B_-8PxrsIYxyjrwgg}ijFg2asjY#j ziyK%6H6-)6@dQs^f64pP@5khe=ER~@e3M zNAwe5wBMSd@yrR1zm;}EtV#Zia+cOJ?x$6!nfc|tB88hES7#^qtxG(7%AG<@_mw{% zX$h)J<00H+dCUk*)K|I@DLwn~ZD%F8xV0nmjXE*D>_VzEQ7X2XCd{p$#Ih(GrODga z7QI*+vpTU|_1td*;Xq7WMR_{Z?{?K1``*6a$<+~gmutf+xTaI^0u;PxLlHm+soz_e z77v+};k0S4Rqif!%?3A04M;hJ)DC^3`lb5jxtb3mpdL3t2@bGrm^OVG<^)U~K%wVn zt!;nR!zl~y^YyY_I||$|%Z2%CP}wtWuL)T$2xO*sCLrG>p`>s>Xyn0w;$!yo-r3u7 zy4Iw01gY|VJ%Kh3f#f$^dxRR<_L3E9Y-%=+wB-r#XIQpXT<>7v`^9fK%lc!DaMIpj^v@26{|{DA zCUN1}#6s)u!%qeyh#IoK((K^&i)fm;ASc&+=l=d5YfhZ5Hdov>P`KH%k-)RP#+I9E zikaY$T~DQBYX}6JLP}YZ-Pxlis}Q?CMDbfqcejLA=z8oo4!`j~;w|GHOp>@jFO`$B zWLhIAENORW=UxsKZnxuWBhVzOADhWfyN%9nDEoj!`a;59=IObvd{8sI!KC=5HT)L? ztjoOEq|jJqhm7FxZa-hx^P6`MQvgDLK-=Tye=%?W7jq<6`U=2?#*|-AA-G#* ztn&iCQQZ$Mf?j75$rLMI*a^k*tibocs|EqQN~*exx?DaD%x9ifMl8A0v)mOv1up3n znR*m3=Lh#xj%5&5J>fo>;N<3gd%HXFg?{lyqaY2;TR8%sc**X!7ZC3K%2XPu*j)9u zQX;C@O=KT5c)R}Y{y@z1m4P3Z6GIWYR4`+rzH!qypj@30yauh;Av9u?)Pxs<4w9r-E)bod(BIksymlDPN$OCIH!DmJ3+$3gaI`(|F~B39BiJRPp5HWwQM-53{TV@)_M_n_Dc>Znw6 zb>p>kMsyMR&2;pDCu)InU}cBbY#=OzB9Ai}=O=|}oPYmJ`P#lUPywOH!N2Xj`*|*=gW-#QD zTvnmQrj*P@y3~=!ISy#!6Gd;Jh+cXAO!^P%a*P_H+GF)NFM_h_awY@iUo=Uk^tcaG zZo`ZkNG6N&)w3JHNhr+GC^!N<@?H!dwCijj^!%VGNj@{h$8zjK+B29mvA#3x2H~i+ zV$;+R*!bRNh;T(%LDTWbG~+b;Cbqi)@$Moq{hQnG+E28_ws@CxvFAn$bg zp>gO&%>k<5pa;2h1Z?OVn_GIv99cb1$iJp^YsA>O>sVE^vD778o4+dfqe!1umZT8Z zK9?TN6{IQgxU0?FY#75nJ7Wi4n7^=TC)m^+gw|b(AEZ@T#9NqpKECxT)l^t!y^fvj zqGg2VG~}r**@hqkD3i48T8O1UAY58s%YJNi`~}GU^F?r`L}<-h`V+%-cUnB{4kzJY z@pBj_02Mb&^Q4X0WtF7=_0=n-!K12Ux&FtO@5*mq*(9`_j;z!@+3Cl?&@Cs}*Z{Zl zKXyAqWbn%1;IVYRf2ljH5?E+gIo4uSH}oX(;;D~N@??dmC7n#&aVKOw&U;qIY-5}% z!H)I%AFf%t=O4f6&ZZFL=~1R*b(ctwlZEqfxdcmrjnn^*o4VdwU{o9V#DSn*Bg^=(=aohA;4 z*1$^%d3zMrT;4M=Ag@HheWU#Rc1L<;Z|Z4Zqg<*OtLE#RE8jd)P_Dn^0k8egR1mf! zkSvgW;#$k(YsGJhn&+apLLTq^Jdv~^)x5?wXvQddKG^ICg1MhZl)2*Of*bFG_eVa)CX-%iSPLf{Sy(fYCK?pMw@=dmy7QNKiRH)p-ZO1*1F zLCN*e#~uExQkAP5o;k|l1@{GpiplyT$e|cc4A-c?2HTVDo^G#dJaQbLdNey3)tx%7 z?f9UI%fMih=H8&yXqX5TWJxsMi}Fx6DV&F<_JM3&j;7u#*jc|z$b2vFe@VEEU-#kD zC!k7W2%>Gg)YE?66B3eSH8@a`pW{A0S-fu-Jz&Kg;|IA$84A;C8bf=QZ^Aho2~HIh z5yIgSN zHgHwYoloQ*O;c69tVHE6!Yj}FYO4Rk8qaA$>xkCGH-P3z`VK+<@9W2ea{nIQ2W5?O zeIjr)y-?*D1YjwD(5@x3ZYVo099pzc*A4aC4I>0{xzUWek*^f+c!%qX3P+a13p&x@ z-_6#oRw0P$44YqbhI3nFjo^DFJD&Wrt+P#GwrC^1c#zL!affpSJoad0}DP^ZAkjv zQv!*Fr1~CJ|MvlMGm&8SO;$BSWn`)Ky&wUnK!Il?WsC;4%VpGzK0x?$CXaSpR^1Ta z1_w8MB$@5$6MCALG>!vmG^!D>v}v`5vJqnuNhi5i%wpfeJd4c7^At$U%uxu|B|X9% zYH5tsM%Nd(R)0f&wrrr5s+0t&yYU+e4^TJX{w|~IM9MjeBY>kZ*C$ESXCaTQJ(Y`qksHFCu&pTBD*2C!+gANxy4he^vhc+7;`BEdqEn&SLr(o| zsJ-CSLC0-%XQsLQO6@Wz$mrWRlEE)dnXTKO(dU9kcW>6pNp0mWefYqpA!IVGW~Nt74mAC=0=E>`-CZ{uJzJ#WK( zdIm%sfs7BOsozAjSHav#&y{QOgGR@A1`^87)}a%n`Z=}RfyWwB++_WrTl(Wep+EhV ztCbm|r^(2zLG;bDhhz=nP#2SLd$xX@UuDfT*QZiU?g9l~dp>jKpD0FBk@*AM<|ld+ zX0*j_q(FRi1fH2>w7ZB?*OG}|JVBQqs!6-?zK{0MUOjs}!{7rzZGS>-d%3@N!z4%k zQAu;oxJo~Fv1AX$re+>sp}jYODtvE@@GUj0XPcISB`(Xku()zXG*L{~q>5o+yY zkWei0LdXBd4L=-~1twlXGScF(1JtfaLRU?wund8MQcAVD zhb~UL1@QRIDH8r#MSc0u=j?J?b&oI~X4(#Ev);PZ> z$4_WHJ~{$+v#hstYfk!i1L%ZNTiT!Bh&Rakr<8C<>&3v2uhBjxwV!$N=JA}7USw;n z$QQr}RwZ^Jc`)-~5=E>nQSR#Mdu4*0b|3$SbO7(D@0Y^nP+ENSdv)88K!?RWb{CA$ z7{dMz=w-3n6Pw2sR~4Q^R=AhMFKKM0k^G4iTrY%>0V*SbYzfM_i?KzGV~4VAHm1D@x!F4TfL%1&K;2grEvuKBpW_C0=xLBaNHAZSYqcoCqNm_f0o*@;uY4-)tu)& zC*>GN*}F=kpiO-#`$!TJN1?+EBbn@`dsHv?q?JBuJVp`SJ}6!vslDKkdVks^){#n{ zIzs^IY)Fb-G=>Adtp)&6IhB+f=qH8P-I2v-a93-s#i`aZk_A8V5l7NeA!K-BY#d(&wSr^|KQiO*cU#3l-8pu_^0(FvVEJH0%_iIgjRe0q>RysIh z6G+#S+-WWxsq5Gh=vRZr zaX(MJQS*`AOc-Y)TNUAFbC1bb`pXYNw8%$=B{|lRgLt#hzv*WhGTE2KdBvxR#%k3- zBhwZf>x4sUMFO{g`!EomTNG1A$jGE8@P$dX4l6}9&7%gx>7ZjG(~Wn$W!QuCQ<~P` zc$EZon=lPhY6txKMi>VGm$JR5F7H`M>e+h)ioe6hB%WEK??H4Ec$N5u_l}yv2de#} z@UZ?ulrQ>ZK7euz#}=Qdc{mU3?w^PrONP$b9?l(~;5z1nhxaI|&}>`}MschmnQPj; zW4?2aM|Jt=wT&y~p`LhLpRGfyxtQs`g2D55+_n*dx+c0#4dKb&`$m+Sz}bw$v`qNs zT)g8lROKy_3Ep|i*?WChF@I->cZ!s8ik~+QG8OjQ^SopkKB7?elqJ^3g84&2g845q zKpEn_Sd%;@MY%#0`)q>NDswm89CT2}xz#YDRv#jpb9@H{^To#dcluMWUdNE5jp|Ti zUf%e6RNUzX>%HAnQP%$QR{Pcq84S(en2me+jrFVDJPX&4GdCRk%i9`^Z`If82Cd$3 zwx_vqU4zCz0AC!EVrK*#3m44Do)w$s-Qug$XTZ)IQ>MdO{^S4zncSttiME63JK$yM z|51JErwETA9-677wY&6ex}+*BL?&+&Yt1|w9{N)=KOn^u_1jrtN+ZbD`50vk?Ff?O zkZ(X#$InR@{5Gh{cTX%d?MsPeyK7HHTM8o7<2CZTlP_D5Qaiz8il3bFRwt{Fx`|#l zQ|agEIl8cv{eeAG=EdVKcNo-2!c%cxncw#b z%HU;v~g=2wi88Xx#hxl)N~>BrJ1s6j=!n81(9n0CO1D`)M2cW|+9a-A<|p zg+~1#rtch=ZhYpc)C$S0OkNzE)(@Gq49#m~o4~CXlcv{-k(gsqLQU78z&|QOf}#q} z*J4ZH)@b*5WK>~+SIOj+0hxsgPM^w)f2e}5j!&C2gF31&^FtKlh)vx$aUNDh~L^EXAGA0za zyUcoDzUwFd3GEjRiN>Pcj2%iy==aRq_3Kc(d?ItZ1MolAr{+ZF#41m}zFiNplzq5+e87QdbBhc0zBb=| zA2XGR+?j^R*UHP?D%&HrY2@YIR#-sjUpu7ekQmMkD}sSDk&(_CL^i-BPAVrv;F%(E zacqwWhs~|Om2xd)KPsMlj9I1r(drW!Js94$uN&kY39yoFnL9PFlHw7h*6s=TH4H!0 zYqj@GxI5L_st{shbg$7;_!%nYYi9q&=T|CMVO<_hH3rE?S--cdfW1*+!d?tn5dOtN zw|ua~?dUhMd$6H;xQh^)=V~%wP^^5Nk?ZYurZJDULQYmyY`Ld;Oy_!y4atJkn53KLsM*>*T}%_^Zn65`_yi8$wuI)^lm9lFpkLrGrULq)jp z@s;a8lnfE3)4Ch((i$9XGS!FI%e&il=MlSr3_c&x0N~fENv(zin3d{Qc7mCLTi}O{ zjHpKx%I5t5y(WHZS8}Bfy1rXLfe=+s1a+#?AimnoyIDDI2VPf=WjL}1ZFqVyx(%$- zILk>bt7QDZ&Y5)wf4IlZ7pGzj+?Q5Jd;7Jw`}+bQ)tG!lcVXOquIy3#ZzP0Q=SxX^ zwa&{oRW{HtT-Q?_y3t*KuIqXkv+@$3SeE-$8jRi-Tu9~ynfIe8ShLJZiS_2Z`B8FO zSZw`xS($9LNJCvyo}{a{W1O;IzbLA)-aF57RheE zy{X{2wI>sP1AEbPGQ-F=<<*fNu9?xsK{8HFafYdPm!pmh-dPm7av5wyN{)S_Z5hMd zEXu!9LPSMxLu}t}K&Fl|bbqqibU7Sc41cy`-*$l?x`bdV<`nyDO@V(9nfYk84fJ|E zWFU)KZDu?Y^%=`yWg)S4d+50-z^CW?388c zBy25~c7Pe7JttM<=M}CQ*)~reCGBY}n}xk9GoZeBU4mZf{z>o}$9J+TDSiQbC2Vw_ z7J&q_K|}N>%#0t-bOyqyIWIUi$hV{K&~#$^vo@jJN`FH+(5suzd_%3_K_7*;7rwMe z!XYc9d|9`7R!v@@Vk&Ha&`(0sI;J#m;fKxMqVH-v7sn2zZ3|NGcvGvrk9=k{DD~;& z&uzxV9|w?k*+Vfeh{HFVV_SsJY zc0K3)hb?t|x@B4*hi4OcvEI!-$Di~4M)v3CeFG0Mvzn0ebn+`y*u4pLE%8|MUff1$ z&Oi;@MwnE~xJQwTGt#ZpO)h`*dg~n`PR9P4 zD3)FYONPN;k2cI?hWw+l9s0QKH|?tBP!l=zxurV3U|>;Ykpl6O^wxF)dU+^(L;5=Z(_L$L)Dy1fk9*V}3(dZ8p_jxo3_l-V$P)84zBXp>_FPD`lDW zDNqE*i3l53HA^E%J|-b(qsY3IE}YpuZU|=wre<_wqg7b#>ByFIj^mS|<&#tTY)HnP1sW}i~r zn(E#dH)mV%_DgQoE0c!X^!FDSacNu z{C9UvIQkC`E`Lf>*Y8u)uBrFfH9Hz%cuy_zqwbd17nYRl0|)@RZlFmd%-di_oq-{< zi>sB7+v(t33blex(c~S~s)N0B?sxmeYW9|bEVa{d{u&#z>%f0Ic3f$|sWs^Gy^zeL zih+MrM#e!~g|^;^UDzHG*z4`~g<{)PF~x+TuKM*;v$n-Q>gt3!R|HRHHn)sQtk;Oi z>%GPc7`X}|aVV>jALh0#XhE=>SF)k)Ofw4s(wg*$T-ICaQ>m1kwpN?Aw=4#R*8k<{ zngl#uI0PyF=mQ{;MS}uM`6VsK6x&p-?<#>=EOi-NPdzfV85ty8&CF&*5@VB8p0P^U zf5AQ0R3K52-mdHPYVxZ7X}K=`h=N0%88nH0)LHSKU1d#BUb^?Rm!?`~(Df+iZ0sq7 z5M1icZFzU<)Tel*cnVHX`)ZHt;ZfR)J7i|khO=kyL^h~ont^hOI+ChAYY8tCylAyL z+y$3p@+&s6bg(y$1+lzV&A<0v)?WL3FZatEE=XvXeZ{(JZ)Yn!mLgO)f^54A7KBvE zR%{U6-t>i;vQG=Ft7Pat_j-5#BzChaZ}(d{v#W7$ixVMVR9AHP>oRuIXLrbsgc2xN z9JCs+{Oy6}@GhOz%yE+b`Ymne0>ba)twYvAf(^}Vl)#JrYQ5n3DZ$=g&Mv`8yzDh- z*e1V@ewytlxy=sGeYrZ1;3i)=YopiCpu?ELx)~64CCpJ!ZV%imeS4^5PKBs@UNu4C zCrlJ5f6kOkQ$&qCG`pwX^qosH$mQ9q-B&|YqkiDTt?CWT&1)n{VC0_p0D66|qh9pW z*p;7m*kd}Dr|aYx1A}HfUcc6pbRC-VL5_@^tqgAA#=S!}9qIOMVVL!ON?xwCC-XIu zE0LjNDlnWPG)IcZKTcj8t%BAWN@H$r#Cy_YhH6bp$ENmqy(fI3%LCB_1u7WmWN68% z0A^g!%QH=AU{5gb%=F~?&Yhva3Ih&*mxb}7yop@{Smb^|5Z|n~eu7?N|D(6kp%B6l z-YcJ|1se16l=A)?KPz);e8*(uP$|L2a;C6S(aSzC5o4}*ng3%H-(dxoHB4#>riva> zT>gES%+m$*{*^@4O!C9=@RKIdq%JBKrhNHKL zsSPeU2RpY59ND=O8R}oV<^a;qJROF6wpTqfdu2XRaec=Ny}rsxGk22M^?QK*jmQUN zg9>AHZOQr%5ytFOJ#>7RARk$RBe}DNt$T46+!bsdQ!QOMsx+;3mq+7X_2kXOBG*^? z$!|VN&~bm`V(IqFhIj7>o04LE7wL9nM>e@dP@KCd=NXnuaT2d0U#}Vu@C`RwUgTSl z2ac-MIEX72bgKko6}k~1Ug2FwUdC-5D+dAtP(y8?xO|6o3BA;dk!Tp+QEYSSnKuQM zD|*_U(q#;pTRYR=LrDtXoqDXFoswZww`-EU0}EQ>)rtmCpR{rQT-D;=hCRf`yo8X? zUG46Eb$;NDNo?)`P} zJYQpF-RFzq1BPZ{DzD`q8*k~=8F9C@+=iICUqX+wX=57BpBm@krLFavQ7BF;du{fo z#)?Ic9Q-UwBYRW#@6gkX+tYD42LNlgo$zd|S5fKDG+THL$?Kfit*}`(HcS&7Ex-b2 z8E`(DRh_`kK;$xCq8fvqi{ux~2%FEDCsgEx27mWaP&K7sYqs_o;sG1m9oDZ|{9|;> z`#Bl`t3`!xZ=dI_QNsRF?Q?zIKGr%o1{&GjR{!69TR4)RJyztvS|b2o^grWw|65p` zfccpr$=4QuVO8EGs}&C6`HgxZ!-qPhR)f?EgB*|kr;8aE`lkXH~$bZv0R_3G6X6e8Ufvg>MLtM;CP@f1lTs2sF%4&F^7-Ql1CM}`{@16LXU zosb!G2xoPh@Nv7;*#ZkF*`JtG2)7h-&VBg=AtZBMPwMeC4Zi;2)JY%Yz7c5URnB(n zO(0;p^)r94UzX@8v)8KrkLv%W5-pgvM{ug5)0kvivnfDVH-SVvgzAA=bSqpNKcKB1 znfBg5<}Hf(i!12Ow?0gHn;3t~Bzx%)3z&wi1iQ|y?+e@@*mh0u^duKgkrb?5Y1%4! z122Cac*7pPMRl_&1-ta8`cIODkM5{h*;kJ0dDtwDLcRTuN`qGn&U_~j5tt>eyW~zIQykYDqOJ}gbP2c?(Oi! zEsukEHOH|GlBh7yb*IEfY43xNwYb98DZ;4N9Wd;i0sW<9=xpQ8wG%fhvYG*zgeaF{z>0Dt{uc@(?_>As@8_T?{DB8Cy-2Op5 zz1{?{mgRN-uJY=%=Sk>JU#Zt|t2-e$rN0o5o*!5M+s$9W;xAN+Sw@R7Iia}K8mX(Imom2q!u{^TM6>W`!W#~ab{*YZ?lD}-`JnsF!U_o7X*(M z%$C{CZZgPt#sqPwRQ7PDz6)q}`u<}RXf3!hU3yFkErXUE=;1u`jVEsY_WpavlRY~U zv0bmrs%Ikd3Vuk`bO^X?!67}8DJ%KW?094D@6kz*fZefET-%sUMqVgFARsB)q_5ZXZc`e+&F^G-a9KqJ({eJ%Z$#gIiImhMr zEa6?#3ze}3vue+==K>q0D_`TuOC)TDNqC~{t41W21}jby?ng@(LnAu=QSqVQsL%1C zTYsw?O$3^qt{}TnDIUM4V)7)ux|+S{{@^LWTRx{?PV-HZ*8&=C2Jq}PU?9Sh%`i*@ z-~7^L*IyE$Y>{B-$luy4)E+1t-fhigDnFK#zWdS4SS&lZq{=-YcOv?SmH9T2$j~)U zj}h7s5TbX)3zt}tK-yW=g=sr2j%k^Oxo`b-{A?p=pT6bvN$EBtI=4eONNMe1H^!Kw z&>?WdoLe4$NTQbMxF3jjAl{mHNNKPr+wX%uTb0Lx!T#6JH+Tc$&UoUy_T9kdUr$Oy z$a6L0Qyr6vXv!+WJJo0|=0vSi@G3=w^z{DtbSH&>;xCi93DL?;($PP()Gbd8g;cf{ z&Dl}$HKtB&*!-k0C@H1;R{i#S0=NS#K+@?HpdUYEW}MwR#t0C@P*ui@8{SfvmkGAW z?@^0aKJCDk47;;La%RTnsKFi~O@i&(L3vo~w@eqqFe#X(>(eOZRfyO}R;H zoif{^H#4@qtX1{5z5Y}$I093)i_n=jW4{}WG-T%IL(Y%MoVlBOVJ%_T0ia{|iS>}8 zVDxdod;@bF**vIJUo>$(s-f*R&cr8SmpBAHW?54EXm3FOgKGIjZmGb_#l1)Yc_c9` zqHt)@*W2|gY0HH83ZrmlTN}4KwJ@vDRVH%hq{~J`hDYw5!`*-d^U*O=D?Zk^xa1gqf6PH-h1S=r2krhH`ccB4+z4<13wvpV$~Gh{zT8tzP`u zp?~n4T@e&MtCySn9vy>#1<}4}DeEz~opIE-K|R80nb?%RZ}3#+ zN>}rX$Fx;pzcE5-9S}O?puL8$@r2!v>A~eKmXnMR_IRzoS?1*yTDWU;-m|Z@+XW1; z4QnyjMGJSUzp6D6NJ&XS)@hg6r7f3bTdFC86vfOQ*@ippvCDGbtP@)^)y8N#6WUf! z%f$~?BeA^R#clj`PfQY96mfmJrKErfN;xO*VsE0qY&bvu*x!LO_ILIVtMEAhS34vkyPr^aD}3IenHUy~)=ObXoZ> z;%YM-L&*PgXo|6UpOYar!wBGMb4ThwjoOMdcAdF|EP!#OOO~c z8!=;EJJc88szmhUJNQgB)-T%Xm?Q^c=|#O=E>fd-r#1itdU(a387Ree3|}D6#KPuIp_xJpm` z7lmtc3-5_cOi`|5SHtbLTz4DJfLZ{v1HNAo-2HdTDLpXIvvg(?ckSVb{!6D}K#fB4 z(8ba3e(#sLP4xc#)oSyPFaWp|uX&TKjw9<7&$(`ehE_pz3VX}%TTP8jF=RB_;x%f- z78&}rJ$BQ)R-VY^KVLZq-qRoMAT2FmB}pFZOh7h*I|{kLZ>}>@wRfm?2I7Bk z+73)&ret>sAi;a^0v3dyIrFhXZ&+?W&X=x`JfXyD>5)_>uw@~lnSQfvY(>{!vw zgiG^xkVX@A{8x3kQzw9?6ljxwx=d1cd6^tsy}uns3!Q&LN+D`Wk*Y8@Pv}9B@jKNJbwXZzBmAwo>LFnk2S^7 zceYSG`B4oBdo|Y`fisinCymo;gRFOBca{w0#Rr>XSeZZe`4Y@~;mk0Wg&glIuBrf@ z=^Ai6Urm&;bq2MJTI-&%^OYwg80lz-C&9&u6-w@4>C2#P|V?nOnSJ>|P=&7n_E7!X{L!1gXiQ_zQ`-Q*2;~z0%<-F*$Ju+$ao9Cw)GX(Y%pZ8Jq(7c2cGy1&WL(Gu?W zyT0^e>ZMgU{ooDKVx z5@GjjplV$_Ymw^OMV2lRPciv=gu`FCN)2^`D^tBKptDF%yjqm7%MCs3Tv7V+NPwpj zoSXyL3awB5C0h0No@K*Jw;WOkfe}i3F=4F4zd(bG7n(QFZFi#`&XB<+b-+^5PKS8ghu0O6VWDs-4S|&yE{uBNp>O`Vs8=%B9I!l zlPRqvEtbE=%ss%A4Z}7eQcpC?UzC=#K#s_GNKN2@OSV{^3OXxSC@37@^RFdl3g4sc zni2l$Dth252|-Mm2fNq2@n|ka?oy0SPb}>QY!yd!ib;dOGXXOu`7WrHlO@Wu)m76Q zOYwqNHh6x@xfz6WYx%pwL}JYhR$XHKGKBgQE+s4wfu`=ffU8y=bDTPx<3b|HY{I|X zUouq1Ae_=&?crIPVR+MhI@XIt^n>%7e~@ifyXddAO4aBcd0-}CNED$N)g3z&q~B8E zF+)_k9&yS#yx#DpCg=%UewsYN1SOVa=qFu|MX_=@jAA6o;Cv&}y(;KZr#YL56`R=C zyLp2t-qJ7qLt;h;wVUmv7abqdw=?RwZa6J9RGqV#1pY;$juBx?=Bsfbz2IygNKP2| z@stl>Zwlj@hZzxe)A4^`N|hTJiMYF-MI&4r5hmfdK_NfGK(xZ0n_4Xyfo~)Fy{b$9 z&t+oe`ZqF^{8ub4YJS`dMB)PBBpO$-G+V?!Dr(8)Jw`Ggu`Rg=p=H@bI4W#;uPKck z_g2@}tYOR+ner%C(91Xyc;ixyZhXgBNNxXo;L&E6V3kWz+hq9>u+H(13f$Bj^8583 z#W)NbnicugooDAl3`Y-8z<4A zp!ox`5}5^$xkQqcjS1s$JF-)+w+?mh&MumoLpk+%1_%d!2r^r4G32FNOrk@{80_y? zU1jpkXF_W%X+>$$lcJSN7h5Z{uJwzs&O#hwrBcDzge z>Qa2GT(|S9*@@M_F6hBmv7+K}HS%8I@u7O6Kev`s%_K@Y3EvO7W=HZh9BnA7L%y!F zo1tvZ3P122F!;`RibIr(Y74sxr)=2dcR@&xI6-k>dbd1*;i4FHW*|nWK6_In)?8i0 zS7*MGlj_26+)#GrPEfVYx*c^Q8;DR{Q?;%@iZE$G57!))*`o|$#V($R|H0gQ1~v7! z|GFp$A|L_+(vhmtlq%gu7Z6ZdfT(~FVu(l!H3|aKe}I5erAa4B3q8^WM5IPSO=waA z2{n+AXRqhX{?C7(J!kfw=iPa+hFNdcViv6Oz3+U z1i;8~Ic@kDo%cuhYc%uyI-c$2mVtdkGw!g>|A*MZ594u}e)$XNk)f4l>m#Sa6EJCE zWi=$1_j~Ad3IWde_JzAH@TO>~aM{zDtOw5F~u8)=fQHzy28Wa+j9aFBbCi zUkqOmt1ThOgpM0Ai$-tT9;NX9_CRDYAA%A$H`q<;8;5wvN|w0YV>IAK${JA z3qL2DfgaNHRxiz!Wb|tNqjL2rM=-V%ECQxIq&&f+E_($BLjrNRpYzNRpR!a2&KeIf z1l&G@)gk(10T@yUI7tiDWqx5!N6Fp2d~@sfoKp!UAY!ba$ki=X!}VeNMY*d*pwc0=O89P_OGaxLEIj&geqF@Mv@}J#bTb6>YtHgE1w+-`6usJxWTU(o>E*+{ zXy(i0Yp5_Nw>Ky+&`nd#XLPHeTf5>@NL-NVsx*-OINCJ$yhWdh<}+DCm)y()z~6cDq8jjTj*jb@V94FWSc!-E zIZl0}6{g;jPm*-0SC(JoxXl?)%u$C!$4(XCsq;UCA48gyqF&R5keyU&ol2+ZC2~x3 zhx#ni)e|?LxCqTPvYJq{KWI%`q*XEto9pdpks@KVh_C?lKc+I)y7!!H#l+s)}~hR;eZ!m-i%V?EqKC$ z{xta69}?j$e|CWDms}174#?Dc>HPX!ZD8O{i-_s7)00i=f2b#dZQ!_JZo2x5H{E$< zH%ps$B=7r!sxyy=Zh5G!BN=sk2q=CrVF+CbChBa{-9m+!smH<;+PIAlK|!5zQnl)f z9^yG~0+P-JhF^Dv?6Y;^r>hhWQj506Hhgp%H(o0{bESWD)nOK1$}+#!sh4z%T9te1 zxG)jemPOz1pwdlIfNfDhAKXxTRx4+Uc7=PhljXIvqVC)szIG zf`%m}8^JkQ3UdjMK-thAH81XX*F2NA{8e6mDS+0iEElpA(>Zs)R~d)E-1`kL#qRX@ zcnO9=zOa3)?qRp(-}5=w72Bitd|2xHHDA7q)Iy%N7e^+$iy9r zF!Lue^ES30$MRNj|E*%VY|O9fvIPfqg_qMVw=-^@|0U(CIO3Vry6QY=RMx9-*>v1C z6$sLzj}`Rnq!pd{aN%Zq!L+T=0+RlsSgg+VrR1|l#7yU_c_ZJNf%1|b!J#YW z)dh|3OouP+3gnp&@c&}yKHWHSw%)TNYrJcNxz`|==0Q@Ke0(i;G3ZQcz|X#-aQg~_ zRd2uPt5;?Q7_j~Z*Fy%!ux7~4gLT~81&d#snEiHrF_l#%MuZ0dX<@6+j}5;{((n4@ z#}-0)nRb^FCR6e}e;vB2&^ygFE9Cj)T?%*XHKoJX7E{k0!-XiSbQOvf(jAZ%IE;l_ zD3wgEX7|dP2Q@f9?2T$xb~Jwl`>oyszF}N!kod%6bY%3gb@0Nl^t$(g8nU(aYdz$6 zfrMT;?f|kIH!T+}ub{U^fCnUt%U`<`uW*SbLGvv0bCoVkE%wjzHi%VwTz`nK=+Cqh zJhOl8`tC2^djH(G^M}C&*2B_K+qnaJJs7xWgIFD;aexNmUks(Qz?`j#q4Z@tH8T4= zvgg3nj+mA|WXY*6+$qA2GK94%} zzke1oY}q}PHfNDCIQA(qXBQMBArV}}n6MXN{&bzLGG%`k3h{1v^VZR*`NQWuJoVu> zNpA=bQ}Hu2!?EIRfn~-~Pn0G$^jTF+RE3%FVTwwBc`@^~?4R#u>fZG1bpEhty7I#2 z2ciDzlp2)tl1r?jcym4dcVh$Mr}ud!rl4E2Wr=OgnaG^Hkl!pYpi_M2|FSWv?i^oK zIt>~47ej$G-1Z^>68K-_TMSEjH)&b`5U-B3;6N&v(68+){HBDXC2J2&-!M7SQ1q=6 zF>>_i*@;m`Y~CM-?GN7*VY3-KFoU@aZkSot->bVf-IKzS&{CUW%~-xBPIf9KTIu3K zmB)!hH3cJ6YG7==HtV)(<$6^OJCBzwc`#m8zE);A!1~GCP91E9GwlVI&h908i{zaj zh$P&F6Wa9+M&Sn`A;VtKE8BhmqusQ&O0%hT_ucDs=b4d@Z*$J1+zQ1Vn1d#7gT@_{ z6hw%V?W((h`}L^J!tk(~V1d)TgEZhj#g25^*N(3aUb4g1G_)=eEH8w0bb)o_gYIF8 z#)e&nQ+ur6mqi;AtHS$nVe>JIo`vnd8*%}gxl`7i{JwBubFW{kEdIXqq!)=rZf<8T z4D6QQ2)vlSrFT2j2+J8!9}=c-KI)(7(W8OW!F_4PQ>n(@3X63iA6g;?AXG zE>_~-Gvth14mG62EMeF}PY$#y&qy>_flDxD$~slR9=@I`C2u!l(y( z5_WpiaHN2 zXNICd=hyUJ5O+&{hAJ`!Sued{C}sJ1T-1SDE~cM8AyVu=$>}-33aLwCErWyfT!&JB zssOI@s1M$4B&IOM|{%G!pvEe%^@e8rx z*Rj?9t(rQz1e52j9b>ZN4mGxTg`!3dksmoX3XEN)1!Xi@*j4&uGRrr9)5rO>TuMb_ z{4eM6Qk_dyzU0emd=+ngfDpCKoqC{@dF6rR+h5AB+LXwc9xKFAk0U%Ge4s$Oa6{z} zBCMY6M~!!N^~bF|5rYzSET{-a6!xsj{;uzr#-DIqj;5Y=tmJ<}liS98f@zw!LJ=s; zyh}b0mADKZ4QHgp7*inCp;~Z3M<;}`Gxz3$vFEVM@te1o?+d?x&_^46xJ5q8`|vS% zKMyJ)Be&Loc_Zf)pyEaP=QMM8Tvb;a{s#@Dl4N(?kV`)GN$A}P zJz*$a2F_Xkb!c%Dz@`5g;EZZ8yW&^A6y+8@56v!Eae>=OnOwT_&^+pDq+Nka5si_e z9k?W-?Jz}q25@=p?}+ti%?-~L9|iIlU9gaRA$Jy@qauHX-}5_Tq$AEMeSTve-Ihv& zqd?VD6=L#Z&LXFJ-AF3*ShwZ3$jKCKi^xwa90EtCs*RHhL^biq_8ReS63AKbCN z`(9vBaAL_eaRw_umHFwKNX5jklu;my$rQVAuK4>@_+j)=>kvwRaJO!4&P;EvHGBKG z)i*C-EnyKc@}6eS(gTms@ zxBufxrl*uIhC38YzNZv*(*M?5(eJw2sSqA@h>6`fxF+>sVMN}hYxIsQVoLI(jO%qf zfoo@D!)q5 zsS4<~4EY=?W?R0Brd{?)K@?WrV*cn>J-1{_+L=2u78>4*`=-JTys8Jm;n$TqHh1ha z@p@h^lSC+;pC*sZ%}p*SLtUWQ==aFwCoS+)A0YW4j>$1wN-rn<{_bbL^Rufpy35wE zRAn#4e>ww)^}PIb4#T)He+|G_32rS$cGn?_EL+~$&*RpCa&Vxey^(ZN%l%g?gWC_} zt3EwhRCtQBS`9-TBR^HxW!~6v#g(D5lrT`U={B)y3d(47v180H8OU-b_M$W8EKe&# z6n_u_a{BokR~TzQ)%bF&^nc&y*Ch#!Sm+BlWgt(6o7_g_7Ji?>&* zT_M~`X!&N#U3%fQoS7<)NQZ@I?tjx5`xnDhx&G8VdMkMDvX%AC!nkV;|8rxEr(j6a zdU6!OrS4VQdYVu+XeOkHA(tB(w3Mf$EO94QnOx>3<;JbdDLL$||H2vk$c*@WuaA#& z$t;-*hxnu<{yPq0;sb{r`rUW)7MOnMmLU_i()jgR{e{2(V(^0VZH9ADG?4!I)#l_U z0LZ7lX{FW=cs*qQfX(*pq#Mmhp7ZTW<@8z(<@n-4$^1^&AjwZT5;Nm;w2=Z?qaEU2 zHf+%uOc@GUjiawqZhG52#dUrye)=gH_aRun_cexY1oQj4@Ze-&8-m(fxHnn4)n5rQ zD~9rk-uwm@wBKxrZo{I3xiZZlaLL`(2eqIm#Kbl0VhelqT?h2nn(i+B^9RWcp^BrS z*YP|2LTod-55UeF@GnLZvqf_ay+2vcz`hp6Ah1jyClb6R^Cd}py-Hg%D6a*shPiOq z{Z8~rg*mJMfwn<6p*>gNH~P$)q$14vE<85~IwNl%D66+dy5H4;j_BZ~*co9D4R%|~HC=;mW!}+a1$8bRo(iL`==HpXh9+QLg^3CJF zb6LCJV#x7$yEwWWZbMHDEMUJb$}&kb$SW!3U>^=Ar#>hYDtc8edZ7LKce9JJ=6ZfP zO`P2JF9t!LzQ#P&-%?}M_@uFJqscZk-JdP~9sIhZ*%+iUHCjj}YFe$a3A1nCLY~sYe?o$_?CTIQkiW-Fh)9DkeiCx~$_S z1*nN%B4@M{SY^rC8{alhZcyCo$Nx<{+dp|+4H4=3$$xsewJlRT788~cKePnggdbq- zz+CbW9gbE~YbmVo%nHLi0MsbXJJ5ZPFa1=7<3Pe1o%R4L#zlG?K5)#|$naG00Z zX2SKO8+Eivf;;Av3E)u=KXB$8v9Bq3{F|nKn(L29O~)y1>l{(-GA_f8d06tj_SiUa zmW2BV{IlkmE+W3WvbrEzjxDNnSI9Zdlxt ztc)YOgZ0y3f8Cn4Gz*F)A(0Iq6HgPvFYwGDUJ&-I+M9kK6gOzVT{(XC1#+@_cKQW* z7<8Y`CFZO^r#Aca&f1F!hF-%Su+3Dq0(W}WJWBdtGG|rjW8AzMOt3h9@CrWSTBmSg zL_y`8-pC$!lJ0D3F%=HoMNVRVjFU9F!GZ`$eAq}-b(;y^c<1?DzE6Q3`1p_2)e|{C zJfaTxgXUsiD6L(U@6Go4^^D?A0=_jFC>)$`o{@j$sSRs&`Vl1o+Rd5wj)jKhLIW^` zpOtbEtg^4me+U^W2O%RJ6^oYO(={;lFdNV$3Ye-02?<&yORwb45;#m(t|pmLhAb)@ zxXd{E84K1ynA+jPcG;0=Un7ow531UHcol~SBo}& z#d!k?PO{lEIVmX}pw8)e6J7MqiOFC`!0iz1GP-fC2OiaejZ?#1q$GFe$wqc}$c+-c ztIi$Fe>$8AB5pH3Z^-ikk@C_3wax{nVudOE&x7eYjwUK?7 zXpf>7ep*uQ5LJ2a%Tq-OJ64cyIgw3FvC7j7w?4XRTj52(goCS(#gM=r|=Pu#yWrB@BS_qI78(X@qo+Hk)Ipr@ME(DK$ohl$-OS6yc7ujTpc=-2b)%TegWa zZg1#l&ahGF8OH@xwwXv8_v&;eE+7c#aOJJdlRA0uvwF9OL-8B$pT@rS;car#B5%h|{$ z<8Ejys(^}|me1;%Vw1RP_5uAyr`5%n@Y*C)HBFnGW0!yfO0K~>F*>d+F%9~B289%8 zj~!pu)2VF~@!&i@gbn(Bttsn{a|UzJ$NQmwlWK1aZaltlpcE&@EeXI#w+5Ct`EY%W zf~7G&6%G$VC!5gCjl0`V_pI%U+yaG^Kl8f%0}Hfe+Q`T40!Fj$B0HBX&`S7NMul8k z^=1H?I2k}(n9>Q>0Rzn~77)b*hOdV`SE-);#r4kdwSlBoMJj8c5BJ$-Ngvra(AqEe zR_$E_oJ&4%cjVZt!~ECi;FaUltWhgv-K4us@HVYI-Aij$uJ{uTM&sn z?c0UxEy~vFR*L6m`P(F#0lY=z z?-hHW<=t9giQgZqmTF8-*U=P$atA51GD%uNOpFC4%_MqQ1%$rKwA*yw^a_|49xIrt zk!+oktdCXlzUo{qk7r2v5OA*c&JpTExK8+(ppL$Xj%>kwvZOhwAwQaC^USpZ;^7!K zMmSwf%(GE7=oa!!DHM42Ezc&;mG}3I|B}$W1?Z$6wT@a^eI|565#YwmjB3Ea{g#qA zf6P08iGS6w%U(7U8=`egmdn*9@kN=(pFgO2lZULT|6zCS@=b}Z9pgoN2mDzE= zHAKE>$)PM_pDRBdAFFWggZh3v_R^wNT%uri0!Bzql=&qz0e*AENt`vi1^c7)Pk+@T-Y zyM@V{V?Aj-6zVwz!QjzNy()zs7!~WK^$f_hL^tyg4=w#EUvL$I| zfm}&Ep?rbjqKM#`bdg!sP-3JM8ix!Uz> ziSXC8k~gIXYj>FZew6>mAXb~iRUm8-S1T=s6=QaOo?AgYPR__-hf{)m(A}*nl*x28M^=dUM#dk<| zkUYAQ1vXEf$4vND^X;BYI|MSyztS!hIkxI`wKl!fLhiRV{@{Rkr zI~kT#PWUiVm1@jpJc264^G7axmPpwhdPG!OEV^Z3Y03G^!q5IyeZj2Xv_?ZG?G zCQHZW@McYUh~5c?0X|B`gV{)ZHK_H)n+YxpoQ0N|ShL`n!eD`WrmtavnEB>*trKv| z#G;NIRUJ{(ozJPmQ>34`AD_%ckvXoZS9qa+(M0&HdbF`y-0JN7t<2ZMvOiOMlheff zwioJ8b6#GhIEUbe;;A7EmjJ^?pvHUm@idu`$~50sg$mR5A_KPXO!ka+Gh|m$O4&7u0(hFj_nl zCQ?HyMj_3lH^YxaE9NG*+J(Sjv+Y{m$>IN@WecIylPrLQBV_TIB>8tYH>Tzl#);N) z9_fwo@u;tJcjjOE7@s$Bn@tu>M;r?{9#sT4D3SF$MzP!ISp~&*eTQ%~wJYvg`L+MT zuK(B1p0YJ`+{%*|vC>c6Uy0m3^I^qOx04}qc}ydlv!o|8T^pf#u^>1 zS^UQPTwK+g0_nR9U&TFc-|l4q37WsZ!WTd}U8B{4mo(GRWKm00aP&+!?n&x7^tk+@ zRgS#v-_LGO_eJ`}OpnKn$^EgaVt#*k66a}TP}Voo9TuJmO47$WjQJP^48gPN$i>=W76%?Sx;QpI7szSeO#@Po!R(F zf_y2&_N>DShJkS%xi1hT7oeOvRA2BfgJ_B5iv;Uiv>;G&2|19e2p@DzXcnM&ZnomnJe_pC;$8Ula zA^r}keN@MvsF#v?nv(u$!;f*iZ>1Pi?1SyPMxwAd(Qk_iT5@KXuP(jFa?ssMGnrBt zVo}GA$BET^(BjEm2Z!!-MwYY9JGi56zIingf3zxFm+g!V**4R28xzDhX!q(!;Kvc~ zBH7WICQ4@M5z^-aH_$OP=AT7C_Fu6czvX);hRcjypnp3v=4YAg-&!>h)aW-p>IYcD zgXe30L-%NsyX7)Du$s($m)%o+{rumes;61|7%neShQ`JWDwXZ-#P)$ue+%}2dBVB4 zKJ_x9hBmH8+HO0pjACK=)VZ%`xZk-g}z= zD+zv6WhEEvqb$<1tw{m^Ux5h?oDow*e39nMLEuiU zYW+|EE3g0TbS8S6=-!@BH~HDIDZMhbJSU6F`WM4MGkBqLZ+GJ41~`g-i7XPeHRjk@ z39s_J{(06}uRv!wu=Lqr!G|AaJZqYJzHQqBuD#Y1`W}E?;iRTP5fzAU@|bXM8As*e zOEYDl!tq9Zv==vx_n#f1?S3v?^82I54B1%>*%5`dZ~%-&4i?%~bN8~Lr~WV}lULGg zmml(N+!^xXn~W@TxP3ooa)BQ~b&l!F!XBdI)L@c?cY+;J{LyW)uyS*bc4AHnPN+~m zy{+|w;!&kkRn>E@1Z5a=jrs>7Kh_Aa)~Hs`&F?mD94*|F-mWz9!va93_`uzU^uw_k zsSt}uyk#_9mNHl4Wov^dLs0Jr(f)3=%ufchV>P2hL)rXAmm?L#lP47Q^UYilfw=wH6lz2wiuj;RkK?%j<5QW`3 zfSHb)wUAk&RX_T@N6FY{(e1tcc~oCTOaAoZ?lY}1zIgE_bRhg}=zTbw3r(JaWY4!(&1tmifoBzrmiMI=B30!^wq@(7}u;=$_@u!X&uaU)2|0 zl213yAKrmmrk%)jfthGm7elgRRpW(ze9mHN5De?94D@wnSG?l>%GKT6kYpsk88fsXSmR_y!w-Q-IdZa!fMQcovq z_n15*t5}eZ-Y>%hMT8z;pw+T{8sXQV=T|YQa>q+;O|dSTeA5mIo(`_p1dOHPtU`y4 zD$U5Pb-N8ad+TK@+w;yP@GOXP&>Si=FJ?TChXN%{IBkXtj1j$+;Pr`)Lr*5MPEHP2 zH;HB6hM-cJDy=5!llCOG+BauYlH@*P!`Ug?JstdH%NT!!D^7puvMta#$JyDRUkJ}7 zg}N)F6X>ftFXbXWCkSWl=1;#cXSRF~EIXrh?)wtLIQ(@rVGV4#rsl?9sZ2pnT2gYq zJen5z^e4$qUy)_h-`dgP0GtJ0?pT@T;hU-~T}SRALv;4~^cmXtmkRJGqL8k|WSa(0 zXK_b-4R{^ynGlyyF;&-RVZ|FHn~=07PuAa}E+z}C2Q&&!Mi(L31%1~QJt1i>l7&5r`+UrxVL;BBtTLF!KvFLus7(RA_slkDYhBoLXKNzN_V~Qq zYlU}(Tr!W{Biw5PJ!JJ_4YXVL`~f-GzZj&C6uWAzR)8!!Cmp`yA&+6F>5C;xlM`;b zfS}_&UuR8VWXeIF+Fva>&$0tlUo$yS)fE3X^|$Cet9or z$ciN)!pq`!MlK&+Oz3>_n7jEA!v4UYTnD@?3!1l)6i7| zf!j~SeaBkjt~tV%JIvg7|8hE?hLZ~ayT$VFRtuwSuM6*3C=9uf=`ecJ&YrcTcI;36NGQCnc zP}VT`YxwKSGbPl|3^(PloAH2|19g#N*`*Au_<3g#5>te`Fy_|Rzi;4v4))rs{w2#j z12G%O&9pzLHwU1XfOLM|UBMZc^~a;&L|cXIE3B?{qeVN5Xyz}1!_nv zK1?(58>63QxwAh^n)z{z_N(TM4_Bl-#4*3aTxxU156hXfwnRP|E16XLvnAP*F*oM9 z97~(nJB!ZfIr}(5`MeGdRX7AIq}bpO8}pVtvqWvT9ww=Pp*lC#mGeeEg_H7IwYJI^!5PE>LpkNl0-*ntq zsA;Z*?3C%Q6u&Z8NGkV*GZ(#NMJzP|TGZ{r<3CX!wD3vHtW^s1&Dw3JixflVj|Y7M3)URM*wq z=2+X^FY8B-92}vJA9nm*-StW(Ed8bIFgV3gL${%=Cb(Ke1jB88*&&diSSRKJ1=+hp z2!TS657O3xtic;cnJJFPRqPm#Xn)3n0zusI;-kV#6Pmwm7JK`%tnfw~Su)<}jNm6kjvjZU-jxG94vSA_IA`Z)U zlF{t%50}O56HT^NE~oNphIn2%t9LJ7ac;QvcB_F}>G9Am{V4zSTHDUJ-P#axcYI%O zv&P1R0xj{xfnx#O(ZTup)8Jjgp(kudbgp_C5Cs0nkVrLXJ~H`dCh|uQ*LP7~svKp& zo8}8l3|n%el2PN98ksJ#V>Ac^v|_D`x2^q`cIoCfy3q&2!ZU5FgL#vPI3bE z84O+u9$1r#GqRp6A@vf&&xbMsu(srW4YQKjE2U(y_uC0C>E^xN0&7YkkF!bEWmffPW2j#)zh;Bt85BBE~# z9JFHR9?17bl>1&mQ;c*vz~Q<5ODl%}$X7bW&RJyRhfsou45_giSV7%a`V6@m=jb(h zeL--r);TD!XPZSimd(vAYegRtONb%#cXI^VkaNP%l5!lqt5{-X(2aiRcAu9|A7QQ9 zZndRUedoK^``~?($1q~Pc`2W}GX%1P;-O@4o`s_IIfzoft5?Pp1~>T!)P_vlh3&YS zMZIC4s_`=_tdiXf>0Ft13u~ns1Y#*j_v2Re_Mt7YGLD*lsygQ#7Ec&v`HsB2lihU! zBxwrD*P&|zrS@JZcpyyp{iB^r#Huot3S_Yo*g2XG_W}cv1FLtW2+|;(s>aBK$@&|)!pSZrYF+{e)KU5Od5~a z*rr$0hL9lb6Dk_p*dj2ewmg-WX}dp#3mkW8t(6!Mk_($+67}f~;_L(*jpC6#C%{0| zn}!)BZc4;2v2V`6!<+R1Zl(8~G&+d!SDuY`)CZCHdhVj6bHV!wXO`Zn-l{SlJbx}{ zDfsQpsgg{OWBn0m5W3cVXt8x0qrXmTtq<`Ey+NwU#bdbEZ#j;*e}QJ!ZDggyST)5N zHl;@n-1sTQ$$x`$d-@(AjeAc@iYGaTiy+Erx`Yt6e2(FpVSzn-9g0~YQ5iRzR5rj6 z6-n_YjLH#3Z{K*u5V;8N!)3yUF_#xf=F^jK)t!2X#dR2=0mgyER7Lx7!?VbMVX?9tb_KI|Z-JEa)If`;;Rq(z}t0 zky4ze*3aOc#>2%(#AvTXKhMo)4U8?#SF{_?B>6f=J=^S1919iWOKMpy;!ViPUDk3h zNYlr({3DW5!(xj=-k2?Sl*z!-USiC`&xB036DiJg^o47MZ~x0NBaH6JkIv%kMX@;{ ze0#nvj1)O7Tf(1JINJ++D7Q}4ZA+;uV83f9Z7INVwhhw{d(?5Oa^vj1E@n z25(x_{5TZai3w(|^u>cJhafwOp(@w&P4hyg=Z%YR)=SUDuReDTU>XETZBu8bz{&pL z7_5r~sV{~w3^!`T+C7T58ugo*vx3#UXf_`b>g79t_%uh)-qidH!CA)9q!(dA-}S_j zK_-in_;~Z*FW9kXX+|&ItflWfUcEMblzOJ0B?Wv7(cj~Ud84O8Gw6u#A>j}e0PYQoHH5Su=C#9Wx^ z0%KQQ=gnm!fNQ9n>*Ay2e zPbits3&Z7Yz0;}W>ir_qB-1DPk)kWrLNan_=33gog$Ui&TBzLOi#oy;9oI#nY1%VF z;)D5I-Wr7ih1VM!^OgKm)zysyxEYsnW6<$YK@x9E%`NxK<4mxE= zPq&i2QdxvA5TPsoVweW^nFTK8x_0j((nG_-vx3@M$asS`e zOTFp1>Tyrr{oxaH>BO#?uV&GS?_P$e-`3C7L<@R>fZUP>dgM36r6_*>5 z8R3RQ06#m^b_Zig)M0b!pHTUhfb;`7IPZu%G}v8HcsNX?ORfLoFKXMs6;0~2xZN4o zFEb`okx<2nl}SJ6i(uA{%ra+SHr(h-#)=9GUfe^=gR47m@DbVq&U=!<_Qg$woReyy`}O~?F(bK%b}wZKe7|Hm_-6?>5( zVZUa}!;S(rnuBK*Pp1{p#}FC%rW|Gm(vbWO7S2b}b|PJjD&TB@h6~pZdVT*hHJm-N4iL?jV>q`OuzYl9EiGm>! zFa4I9AK1x_{N#K7kSr!eWd4v_+zzfjh96B!oy-)CGD#vDmOHkgVOUsA9@I7@KQUa4 zcD0?7Tw+8ByEmrKyD=`a9gr#pe{05v_Nw1-!hMnTv5M}L}=0V?G8UfLDzh& zse!4r+jWlxX{b*L7FEvdxci=)c=ba=@=8jA%~_z>X(6`^k0&fs;`>i*Toe-tmY4p; z@YD>y!?}P9YKi{LC+Cjl+s&B$GPR=UVqP&5$Orqpd?Fh+ZbdmqHvP4sy(9T>>PwE} zboH{KFKfGl3vRNwv8Q%Odfepjxxir&vFT9()PS$#$}uLbmKLnvabiDgO7h)wFU>p# z!CTAsFGi8}*Y$aHY?cHX9CoCqU{zZM-^8nIUws!|Wx9O!`x1tmWs ztya5N{Z)6lfF9q@H?QLx{bO?YW~wFh`>mfOLq$gKZqA$Tc9MFh{5?C2hwwnMnd;n4s79piJHE;t>M@8 zF}59+2DL*Z;ESwl;T@7gVzM4{$urm}V#iG*e+Jt!Hgs+)b@xTcbg`?7x*k=V`?Bv_ zo0_+zH3O1u6lsm<2>TL?^lHQzemME$P$ThBIXh4}sK##P>EGVBolL@l$0J=}?hY}a zf!(R){iT^~8NC`Y(w&**lj?kLE`#kb0<6fMas1+H^aQp4)%VpF&MTen1k}5EUP?Ni z!`A}nZmO7k<{6Hx9pJi)NnraZEXzMTsJN8R4{V|rfXCi z59)xo-=+s5)s8|GOBR#QAzmMS7gXJwv+svhS_T$R56GuU6OW^V1WGgR5T)Lzjzw&< z-B#G+a!ChkAhLUHC{#Ruw|3K7>Yh_8`Ae=yH8xkcYjI}bR#TS1^CYQMzQ$;L`jqON z0Mpwvu<@t|1YDd&lh~omEX!3^2AsN=;*T2tJwbE#f14j+mcIPh*CJy>&=6O=bbFW~ zC_VFg_AL^~juMEhF!P0#jg-wIJwZKJQ~8?Pv(^P3X9yKS?3i-6KYjnj?0K4Bqhk(P z;v=ucV(`)vG@~M!H&|Oi#Q50Q=)(kdpX1bU(!llCD_>1NUts7B9y!cK(+&W?nkB{R zv=j!A)R5$H`sFjs)@{ePXoE3Wc?IUy1UJA7sTzc|xv53?u*A@4^%InRs#pGM99xe@at^f4t3LMrM~Ksf^{vwDt| z>UZjbkKWB%s`achnRfW4^eOKunrXdJ_H_v7p)BEdF8$4uoKSPm?^MFKqP}g~=E!8=&5@ zyYky1X@Qt(60);nh7pXy{VDDp8>TGE9H6xtfbzu4m?qO^Yw}lR@Wf(Wa0e5dItjqW%+en?T~!2yv7Ix z0juo8D}JhE0ksNK}3JI zR%2<|<~+lgDqA{<-pmoT9baMJuh_35^uCT261{Xe0U6NkT8aHik;H$gE2RMk&m!go zCi1`CCvr77NISTg73T#uTUhK~bba2#FfE*VQEkQ4TVqrMmD!G_pUpgN_YeD^ejHRi ziX>)Y=;xF^gu^=;DW4XpBm6^nV;#FfOTxzw^;uHE2@1h)*i)Z!w6Xh+m-wQATUke4 z@0zD5V9-!d0+OyZ-Z0c+-9%B)eq-F+s_09(=C`oyS2I4?y!2%~DH3O|5;qMP`I?*0 zEl0YoyQm9h#oGedHX92z38}$X@Vv#MvVT-=q=FgVG+RbvSZQGVj!>vF{`RR%!wKMlS6k-#tXK-}BR(SiASw2Zru0Yl3kx++ zg6?*K>tf=w{c2`v4|*3=e+$y`GJ`yP-3NWOMmp{$bD(h3qOjYda7D9WPL9T3sEMJC zDM&bXjd-RQ^{$PxYCVg&6*Z3-1eWr-oR&$mE^UTqwCg^-Fn%P#)%3|H_3XCP3s#IL z@#5wKQtw*KzHUfwR_6PHybyC9Q*;zu5-7mF8V}m-6?S=?APF1e%)ti=?!jcg&tKC& zqsL!85)le5pzL){{a#Ozi7NN34B=B?QZW_`T4@Lgy2&i0%+7rNLZs6>{o0NKJdFQ4 zzu?YWeY|JCYq8g1aB@u4XjfpDR+Tf#u@BGJz}816V)F3s^g8(oqO z4yg@8{qD;4=6X%3rp%9QFO2-8%Ji+EURG}lvceqgr|vT?vPw#MKpBjG{C#CS5=;me z5B2=5CyYRK(uF5n+gm1etU~2slC`l;ck_>-x8&80TG>)kO%{c32`pSB>Og?;!0&*L zP@`-;k&t*-;p$iH!P_#4(|7$ZO+Fsys~)G=>SwMVABmnp-J+mLVuSz`S;>N|5z}V! z*^94^QffXS%0^^1Y#xUx5d5F)bu0)DaMume9eZjV12hTQW;2HR9-m}rHJQshVO4pb zvk%P=R$#%?Gh`6FF)J^vlIGV8{0coB{9PxR-T8bD{Z$*Vl0Q&Gb7eElY1dR6#ff&v zl)j?u%lbQSG#PU23_gdtlUU-_JXPCy0GjxezvfVttz$R}t}}M|(Jx-E0qw^TuWmE( z*eByTT)~u+P@{5(V?8kvJ?`bc9mJ|*H1IXGrUb_-S9|+e0w1HE+sDP#w)=RG@0?dC zU?-Bn+**ulJ^dHGi|Q8d>unmA*KHHM1hBTCvt4Z-+Plh(Tshzt)Twg=#qq??-7E-H zi1~%BJ(!vF*ZW9X1M=61{wEWD!+=6cvAG;lU3)TOK5?}vYqBoE^nFjk56M-=6lboh zccm6Dny4*;>#*;1o-K}WTb48?SN|2`l&>E@my}I_CaSN7K$EqvDoEG82tc#8>O-C% zPbVF0A2n8nx%<5AsTdYAG~PIKr7-Z(YEsQY66V!p^Q;S&-fkfVq)zsoLHOc%6?47u zZavZFkalzTb5GnG_e46TWq9u>m+e`+4i7icti+tBUx(xig#HiG-ZQGHFz)t51sfpJ zr6X0P2~wo1bR(cBH3HHJA%sqV09JYtkRV;UNR!?INbevuk^mv0NeLv>00D2_nR#c{ z+`I0~eb@bvbv`HOIXTa>|9k&7BN(40H1uM!tU_8E@~OV`dY5Mc?arm`MAlHz_#F1WQn!=%ti1JHd7Q6UYr5l0$y8RM1)oJ)>r`p`xj?l_MAX`{jo@4Kr6nhpI@9B`z*HKY zu$-2v%?g9$cpoCpO6Lw2jl$xB&{-&WWCw?706o-M#s81A9tqeBmlU zWCUJg$kkFO`P+`q<+bi{gjDkN&Cq`|1v&EtLFFD)vy|oRk!_hd)_DPAIXj!bA;U;< z{Y^1lvZN?GK|`9%94_Yz=}LK|Gha({=t{d_T&t7x_uKX{;0?-<^P_Jh66l5ZtFa}_ zZskvA6I(b=RaE<{-WpBwWJUd=C1ny`!(@6j-!&8d{v>h46vcsilwOFa{P@#h$E|NN z7nw5Qr@z*)H_i32*ML228ltZ5(T8UnBWVa+Y(q@U>wGJ!3Mm0Lo~pOVFTC<32rGLh z%jM|Dft-918=ELyjE!0QN=XtFu{%3^;+AB*1|2iXYW2)vYLh}!56eo?`)t4c%C9^p zE`Jv7++=?8-Y?BMKS@T~TvcD|s53j$Jb>EogM?LUQelkD2kPdy!=~K7D$D+NT}o}N z*Q=jko#M6a2N>s);#NA0lZzMrz)FCB+EUfSx~f#QU5+b}w}i-4NNs)Nta`0*)+{jp`eMOV%GkCtT`=kkIru60 z1F>b1Bfn%lo1MDXios_$1V~qg6zTh8s_3@M=b#C7I482Lm@SxF<5U|{K zINeUaaH?w4V|8c)CAM#ZfuXVEnsu7tdE=`sjpJ&MLDc6u;BrZUF_flxqUVR6Xm@hx z>5!9lI_KGBzlFZy!&5) zth0>uAeARF6W9s55aou3)_>R%SlFh8|1;Z2={pYt>I>(EmoJ=|a1zB%(X}6+UYph7NI?(WRHGUz) zO50fZM!ktttm17u=e?P0{Ca*M_UA$^%E=c1}jKyR{ zJI2_E{?s#cPS?5}1>-Jsa&$C}~lCf>WU-GjFXl2;$L{x7~| zZPh^SH8U-_a4RLNun(~+$uN8vb55ktE>>XHhcgMt*ye)1U{4T8Svl6DrrxKiZfP! zO!3INl&E~YLERd7HmEig+n}|ca09MKOpW!k)_eAwTJgJ?j!4JIlkC_JlL|_T$U6GsDJ^;11s+t8^RB~@Wa+}ySA9p{cD4w3J zo=8bSbSj%qTChX-_PJxo^k!(pbm#@rXevp(lbZLH_s`hk?6)+*Kj5uWsnUzrIp@?f zGvkX+|3xb9ISsoGtH5-bQ5PB9y28LsrnvTZ-rKYIKTSI6F4?Z1FFSeuC>zD_T=xWL zlC8hNT5vKQtsDbz#q^!Xii1Fp14c#0Cl3(=GSKC0f0lNZz-!8j-shI|Q|4Kr<2e@& zWH=~VB+f`FHF~NVIGm3Cgdg8;>LDPnJf5;o!sR11d?R1k!~JP)-777T>Y zTfZc$n(cLTANJGoqQ(SR&aY4&&!|C5tmAl0NH?lfTubv|?=j}#Sp8bX7KV`$%i13v zkzWjjk{On-JP%josu;pUJNO)o$)B9?0$0`r&^~TTh5?}??C*a`n_0v%`mz4F5_NMf z^niW)gL~JK0!X3&Yt}9i2K1fY(80ujEN39%3TA3c z%axry64UHly9Df76yo$m4F^?c|IA#Mc9X*Z{?XV(s1v)E;!{bgFEfl1v}Y*F-|4R^ zfWZ*&tNFo0TrpR-g?E=fLqt7I5Bm$^`kUBi$nD+U5uIp++9sXectI8>u8N%FA+?$x_Dv1V<_ufcX8;gQFU%N2}+?m<=3f zFWbioB}J@4=ZlngkN_&Nw+b$RobgSFNK%TQUEm30q>4D`p_PP(y*2XXA}<~?;~^PS znOptJ!fqux(zcsssS9O0MJI*TgE{gUtGkrsFrVgQtDS%>0fJPX@i#pm&Iq`UOr+yX zPFGi_J-t^+%t7SnIPb9FuaALkE?Fytj``$h|Kw9cm^tvgOA%q!()YdAYpw;;n=|X( zim@y+zc+Cid-w$w8ENpoAc>O!UipHo{$u0(GO>J}9D)zd7?H7RZAR_K0bl)Wn68^f z)*$F(zOU6I@8@r)D3+v4m2jD&FFK#KZ!MJph%jsGKY=zX3D2hvO06|!fIgzh3pPjs zulch}*G$A`yG!XCkGl$`vK9tW4SCJh?(J@|Tcv_M0muZ~eNBpPvrguK58GNTaAba6> zT3G-R`S=*{0J7Berp2Gx`MPC@*P-3ANX6&QSm9%aq%5R!+owFV>JfOw^}PJkflW>v z_HXLi6xH#r_1|j=vj1s=|F2yCzbjL$PYvKkQyR>0sf=zbr|pEoIYxj{HAKrcs-U_y z8ozr-B;C}~Ui5{CGga)*Us=XCjz#3uZrW0N<);*Ks>mJRNEcnX#fld*yrqx;;m)KF zuGPbXiplp>xrQAEe2_F=)V_EP>W;K?n7D#Sp@s~%ur+Qc{-tYy8Xh~<^F2N*OWsRi zDGmOx`Apt7$0+Jf*eQmI$GF1b#ZPOE9m{$WUwm@O-}Nvj@b>~^h8+>fMF891C0_?S zrlziK4>2Y4RcZ{6$!CAtll^SDMn<#}!1jl|df#r9e6xA#e%(YPj z`FVhTQ+-XKc3dEot^Vjjl2c#m{Wrh+<@ofyC8qtL0_!JAaa84(!H{vt77I#Eul3gb zVb2UyyC#%m!Ph+}Mnmg4D;!Hq3>_P%eyj9pHI^E3m{v2g39#Hu?T&x>tU4U8EQ}C8 z6-+s@%QeIl3|)xpinSsccZN~%`p=0A$ZVNONj3AL^#-)z%CZV?9LR@mJSwyAk~K%v z^tZ?okfT8GJ@5NQ8yc(sXewov3+lH{Hg4909_?2iV@|C2u2+Th6J1V(03$^TrxoNUoV)IihGH0O%yU+> zOWK`{<_M#5UfD&>K{LhoGM3RN%sjglkk05k-~Wfy#s6%%Nu-lw{O~*im)!z1I5VC# z%)E&lVoOJ|7<*jY#Uv@|I=uC~bR!rAu;5plP$&=Uv_fRz&W!r>rls>_^@2+*)c0je zaC_GcuQ@abh7#X=HuCzAVk&O*lxP1yp>VLM>2M=ZDVl0=oC_UL6kwBQ#+`=UlnKu# zv2St573^8vKNZgr@ydG^FZYM0CX->!t2B&NLlRbLL{f~vQ7e02!vgf08;JyLx zYK~QP+A(`;OF2rzn13|SHnsY~Xpwp(reAIR<9;E~4pe$}R{Q8wC7kQ9_DX>_T1x?Q zmhSjm!yoimjnH?=?PSygftp;+S>7Vd%*=i@X^vA+-`e>{lXUK%)DFPr&QtD?wlvw! zZ;{P=Ify(Kb@OuA7aS`Mfi{kI(JrxzNe{5>OPV*sT(QismrFW*pCGHmDZq{V!dXk0 zJwXKGV*UQ_xAg7l?hccON(P_plh3pej)7hP>QP)2b$EMp$f+G5B# zmFYXWutkSg_sQd^57cPtd!q^2@QO>l0yTy^QOK8g0|L?<>VO+#**FQU`|)R>AP|Me z|9*56XcIVi`-xOOF7I=R^s9<(X(Q>@B_fvCd?tgPbp8^UBB2?LO*lB-X_lt|mf8hi zGWg<8zNg1(3O`^0-OXj_khW9@uor(Z_~K)cb~jMEnE^Yz?na&n{i-x?=B1oP0W`Hy zT9d?)P|$^gvVnw6@5clap3<8(ob9t3Ul3D`fG&t4$(@FR?q!0`>EwT*%?|t3tCF4eZOpb^|#0IEakm*(;LrRZ#DK)DeA*25QfW0#D~>o zah}SooR7_}g~X$GZ?u6h?SzaL!Y*laD>mPcJxnGk_yiMj1aV7}wq+}y#f)r1wfAkJ z)OlR|h{gD`_8tr?_2kPtQ+syVm4e}Ocz!CA7@85i9cQ1lY?Ow%r~8L6W$x$Wi9k$@ z-R<_e?XU5cA$?c#e=CD}C+k>!EE*eMe|EWibf@kg&82=dfhEmbu++oANebvju?wVZ zqrMLqw<8}|cZ&cKb&&KdFE7=T9R6H==Q<|so!T0E$*CIU+4Q*tMKjfM#vOsi^XYE% zs(#&a`3d-o|C~1`Rl7Vy1$vbX;U5>|zMt99 z);5bC$%;V4aMQ$Y8C{b!C_M@+nwgnw+rWN04=&Ds={)sv;pA{LtapANjV`Vv1-u|- z$K|#a;~=)3Cwy6-kVipr*eHT{W312}C;Ambj*hpdSuv1bo!Q_XYG;#q4`~4=y?qiQ zj%#~Y3UlkTY->g#rM|n3W9G($uQUXIl@^TubMMpf>iN;bb(;)QDNZx`T)YQY8Y0ny zHcKrA)tr?+pY)5nZukUcn&SWUSt;Lq)75zTtvS;SkHshNUcmLqmekIoD2-bgT>&=g zhXDWZIZ!y z@kTq3_|bM*^j*@B>4e68Te43#hXg2J&S>Tuv}mq%>c@Wo+Kmbke+^cjK7Epr%M4#j zKm6u>;ozWB zZ={c!Y_}WsSkG^&8qz%<(&%e!nX+ zKJsQuOiK(<=!iAFp`7F<_lgy(uBW;{u+&(Ak9jdhXCa zJCQV35n(_|#WUp4Yfz&kJvS{Y^8M^+6lda3IY9G>hbdoOu3Mu-e7PzaplI7(y^d_f z=-x0USVDIYUvJVOx;G-vW^NV$3V=b1eC(5Jx2a9vmakG*cc)?<F45xWxoZ1vm3Vwm()new&Zs+9AunV|JYV(giMeB!7 zB`m{sN_^VGuThc(wbfwkffy1ka4NX(9xZ=8pvIBKVzr6f?xK38xwu-lct^on5fBn| zBG7P_G*Bk>rdo)t2>`n^RJR;2E|S`1@SvTm$L4fr8-oH=YM&dzcT^QHz!NYutA6ho zI4}Gez}>uCc0u5&pI4hYzszaahjNNYJrS)~#Wl0-anQ?Gf&T;5}>?AHzZ3Q&{@nNUdBiC~FYO+uiNvRQA?h4j2T^hVp z-J#NtG1!b)kz3wHn>cViPr0t~)_LdEa1&OiqyJVT(Y=!z%BjQf1FP0I+ zS)XKy)$_~c*?cb`h-3-LQ;4h2gs%(jtGbB>7xU*VA02O^em7!X{i7*2{~N%;c{kVr z;>Am{){_Jaja@luxUwv7V(*IugYlZW1o{PKWgydRXV53fgZ z4;45cJtI$98YnmKdD`t+7BL?KsiRnaETRM^aYzlWeAJ*4MU;f8EJCn~%%SbjSx(L9 zDPP?(AAGVQ45Ji)Sy{@OnmW|8tOfjX0&Z&WFQ@(BgW}nNvUL~JYDBMc z*!T>7Hd~6Y{L$>fF9x}56=3bN6l@1t?2~3VT+6Ni)(4ob@N=394{x6xMct$i6y%T{ z;_s&gu;i8HrVc!@Y=cY}u3?J9YM@PbZA;tGx52JQo75%}+Z_+ZzvZ(v89FTT%s9?JlFqHo!j}Zh?-r7texlFB;Yz~CUqb*-;y-S@j6YZj$_V4a@1gGi*mGWBf#B+(f9lsd8yQzqK8sbz-Osh zsCi)RYwISZ+a^8`ShUkLA2!A1zxpz#Tgn@JDBC~c3>!KGHjy~W@elWRIK*;rW2}4+ zHx5zFK3F%+zf?XoWGCCxLDc?PZPbIyl8fQ?;5>5LsfHzN^Yw%upWH@Y7Xc!lJhwGT zTPad0V&q}x=et`Gkz8w}^||^X+DF?x6FQF_$Lt;)i~6#(m^3&+P1~{cSk>$%3mY|( zBQ?RI83{M^V>O24gHYVDRkyoTdU*6-37BxQr{)UWG9*3dktw+ai8_YKc#@_jokBk$GxAgk%uji!go=5)l;#kSJL zN)~D=mz>;VfNwR!=zi!rf&W}@x4${tyE5=OusSeV)m?hYJ}ah;k}BbLrw!{ZUnN`Y3l*hJVgWC z7J%6`r$C{IDA%Si28tgFi`$9x@B^qnFeMox+BL&{Qzy*y-RoKkuQM;{xwwv7^ot&2 zU1nERBdu2Dnb5$VJ@NxwK$Jp=5}tkQ~GLx8caC)_U!dg zj+BPmcJ0{g0?Letc$ZHe-9_V}H(KJj!!Feq;y%_yFb^`tqSym|0px<{@{pyO`?c_r_V zbVba)Yr>A{`nL726d(6j8dviF7atc+gWeqhZg=PKOzyt+5(`=97}kCgq?@cb&Z% z-oy=GuUema|M-CR_sI#Pud-?wGuZq=_h-7k4eG z_Et5fEHzjPH8g}q&@BWCg`R0VdPJZqBn6WM2f@PHds@dI+~3$866Oyuy&(_3ixJOL zJ&DL>NI~TdcoB^4>#n!|ve%fJX?;|Egmjw-s<=x}EVJaEb^p8aWl>rdNXOaQNF(#u zt$l^izHU@%P*1fArprNN`f+W`gIn-UrSQ(5Yb#D%yy0u?zooO>zR}fxq~T@IeaPUw zL{iJKvUfdXwh={JU_>Xx5jAB``OI&jdANF2UsZ0NoQ5Arjq9-e=Y9E~UPn~VwW!$+ zo87R#2EgeL#bNzkVVeuaXV)Zx-l}OGHia=Ob(P};N1{ZPI@PByB0zdeY<*^kIv zUC|d)6ukVhi{U+W!_P=^=;Jnz3>Dd+3hd6r-jXtnIE{>4Sa|QAsr7v$k7>xaNy4%v zzjHLLzka`HL)E&iIV`73MKcPr6<>34>_$c)NAV^3@>|g`qvg*j!m7p(m9I1^3>XrZ zaBDr0W6|ZvEGNI0U>}aAyp2h*Jr7X}$M?a3x}ABTR9|WGF0i`Kt?D^yd z&ND@2%i2?!-dzeiYCmzs@bncpNT~yu5NVa2L4|VR&O(>;W(dMnjg#9@2Wn7ZrSAPo zR~NMiIhJMn;ld@yi=O9q!5a5Snz5lQ=nemD5!AKL<{AUfJ@3^Q~u&GZp2hy09OXe*b7#))mv$Y_1cx ziwi-i&3*)Ad5QH|wW=k*MUmwbot5$}m~PzB7igNC#015dn2+1(9WN$_pDd{RU)$=I zU06>qAUzIq9YN~cZAdX$y%;zvw!cgoDCO?^RY?n8kcsI=H9ye!o&+2o;v;mM)eKx# zixR6dpnaeQZyHAv>{LYuui zeF+DIJa|?!X+zy+ri-p%tbhf&c>kuUE`Zo?rKVp)yvgrIf^~ejbGkyVn3C!uVw~S^ z^Mwuy)QC=J=X$wUchldGhb<+V_%dug8LVKd61)g5F0s9Ci%K-M9JD4Ji~OSzq=v!& zlfC=@cfCZdw{cY!VCo{SnNsI(6$hta!;3TEX~Rq1oF~tc8*LGCGs)VHk{DCdnjx=& z$~)w!#hjZkrb8*lG$dIR>Xr z1Ka6M_iKpo69;BVTfpt4lcC=;ONE6obKTN3x48n}+_>7Y`o*`!dWjrbaT+KiTNi9w zLkZJ4srj?H21T>#lf*wEU2v7R{x+wqrWW!!7)F>)d#JsHOO@LNf5o|)8(gK*@wI5> zSjx)9c5ov>)H*_H&mu+Wr7_(!rR~<|IgpZ)Rm$*g?<&W~mi}Fi%&*IKX@E85%@41Q zqbSV84<_UWdn(HtBpz2G7GqLRJlKgUrdO-Furk_gmow;Z{WfAZiq0o9cGBuq3>8x) z5gN39q6nB_q0aHvD{;BYZERGI;1zOH2)>d1SUYJPK zexTibaZ-Qv(az@InpY_(3sc9*qo13n$+ZPEgChpsaNC_e>p4P4zvL1)RB6$bx_b$e6 zSG4GmITyp2jEfA5|K_+P8=lI{0tr41sfVsb(M-rSRg+b_QJ;2kk&A5d#cW5D+tsVb z4FO>qN02eg(RnCTyu9t{I&jD%LYOFnM@7NGct$OKQYJ!0CQ`UwDU3+{cYd>AF*o}y z)lO&wWFzsn_Z8P0)7Ocg<>-P%DLY^XFL|#tn}@U0SIh(*uP?(ni26tqLx<%r&5=_!ca%JK^OWQUR`gn~I0nD^{BY46tc{Xaw%;P0hY^*yCm`FEXJPt8 z%}*!J$))Fm{M^$>Jv`6Z^|2R|$=&HQ<;89$pxxS<)z!sXQi`w1?tN8DBO03cl z4)j@jSCh!hwx1=zLxHC>j$L8h#`=krY>wiP=Qf~*FceBmy*^kLaJLN5eu*MJ6MEfx z-KwopkQ`97u{Re|v1XR>GY7rGre9W#Uz&IfgJ+LH0;L=t+;r@#V z(tc`k>v6_N(#!BJ4h+k?TFBM8tT{Kdzk#Lie$DB@B+2=nw1jy8YGgh%HSGOgztoz# zy39QxF8=>>0lTp83T)w)G&#nWjKj^*g@;XkobkT&t$^n$MU{s`z!Gi6)QiCuNpPrldg4{|M6Q9phw;&rs|UFsX*iH{g!s-KRb18HMKM82eoxc#+}($wtm7>k*%SHx`vz@3Ej296`e1?ypc zHvpS^{F3vli)BfS6&t^@yMDcd^AN|nRzGwFQLSJeYIiGbhM=Nb>3FPINx&V9kos_S z%j>7V=x%3yeS77x2`m|x^A>q~@vARz9qK!B`PAxXQUGEjxB=P7s$EYqmS1oU z|MfVv(NSnvcn&di=l;_f;5wPAhA{0>$LjxgTAGeM29MH>YxQ|=XY&g1ULuPI$P;4 z^i}1Do<1kv!&*Wv@+kpHHYwD8BbE(BvsBFA1&OIGyZ85>HL#wg(|`QX6eXk*O>rd^wU=6L2G}qXZCVTeZXOD-85b8e8#0y#=Ge#iV_8LK9QWpEGM-$% zUAq3|x0*73-l6MUF7ok@xmh&eEaiKLx8p}G&zmj1?8y3s?_c4p4G#rn$F402T!-^d zolAl?@qAiOBl?u$pgtTi(#XS|@A6X3*BpPc-g{l^=)|?;-J*N-SJ(UHbmNxE?K9S` zv$cwk1A97kn3FSA!m7{7c9a+7^fB3==^8AfyP^3PG6c|l*r0+u;60JSM*8?z;C4^8?JQ3cKAvCgB8EN9 z4gu;-Qm#OJp<8errpSdUsi9P}ECrD)M>*v{_MDNq)2}SvI;y3kX5(8ewg{|u%*sK~ z?5CXO=28k_BrF4Jt250bP@@!iV9ZM4pOjXJnXOaZ1deDKo`!T>27V<}x-`U_3-3G- zc?t*+$vzl5pFELIngtquBPVyyXx==y+7t08v-1bEVeb+a7QB=&vqCFKh_h=M>LN(&zucI6q?BJ_a3W zS)UAU{lw{7U*9kk(#q0Mf%fpyWOJvYDE`sl%g4H9SUSqU{B+@lVzdzP-t8h^20brQZ{U6pR_QjmO@z1}$&bLh~vyW5gRzTO;EuQiv z)d68J{3|BPks)DCapiW0S?}B%2e)z!8w7Z@snP=z+ybnw50{@bpP|;$wSZ6NZTAAE zk2SlfSaWKbic7Z){b;se`caqFO5>z22Fqh(F|RH5v#v2|bjh?{&hSpuw&;bZ}s4x`n)#QFqC8cu!Oy45TnzB`s48ps`#*g zR;1cPFYYN49$Owf9a!;S3pxX|)2%VbR`;+UPCFLX+h!LyDD%M6wVnP+Hce-bCr7iEYC^YY@*`N-?$r<0R^`PL2Aulfpy7*h}f-c@cG_-+Y zl?k!bUU{RMv(GXLtfgUN5r@xg!k+!3Dcx=GT`TmWo;UpHLo~Yso`4(osdV7Y765o4 z$6ibRb9Jr^MKHM`05ZF7ZI&hZ8NG2rdw%z4hGpZpU0r?4sg>kSx@MNvO0-Yyid=vP zhN6sbck6ybly`fmM{hJ^Yv<<=)u|0S2UqTev)&)BVoNb_aGP(a(;K(ev$WZrh5kLL zaU$=KY;Ns{B)@8jSG~(2kqS|~Dd^vC>azkbAh=V@!%xgoB6?)v(4}*tsfRsFn4gvi zthSzPlH4VG{%f@S!Q7KjK36kDs7OtFTfXRKzqGVFcz&;1S4 zU?qjn+vG~^nktf-Y?w1FVPqzES2jTKnC`2svRVmSVq&~^6E&2wf#COfHFNn|i{-BL z555UZ+JO~tgukp5zdq^blr{yj*+d(t^Ac(haXRkV<@+^(ieWq&GH#syhxHApBPAu- zK%&--6jo%_uF{Xl>YDPq#@g0phB%h7A4D=V8F<*J52 z!zrXXv_R(v8iRce3+;?3`^cDCY|Ha>$Z?OionnnTUpPTa641# zbkvytFu9?pE$5i(^_>ymIG3+WnO{sbRYZ7@UXRMVQX2U*PFeyjOjDv>QrZeY;{0eVNZ?Z)(06o2B$CN9gCwtF%o00JcGVeF!Hn|TLGAC?A6#qI| z4BNUrQF2fIruxN^ciDv(+HKY_1?T%VXVsHCEI3h?huBuPw%V8bW)_Q~R?Ms&1>PDB zntG8Ro~7SebR*N_q*EQe>9Npe?DI@lnCEHi#>DWwk;0IBfa-vWvqOw=l+DmLveT&^ zd33E8t&XVrUQ}!s?88@^*oyQ@S?JKZ7_ChEVHCWjp!!9?in2xyp-!rI!rQmRA^&Ld z&DwQx|Ixh4d2GwtYoVsW$_m)npOWEoFKgIJn1HJd%fcvpVdbV@<{ZnGF+mkOFZN&P zgu+9A{-;skzrQ{(`bYDbx|K#Db52;y4&JO|cX@43j*|b;JbAkCjY;sV3~mtejY3bN z%T8c8vO0=Xt7HPz&y>Z;*UUG*4Ro4)C0E+AlJ;me=@D0Dwi-3}P8RQ^;*%@O{o2F% z*~Te8mu&kKC}s_2+;m$HUTZaPOaxdsfO}k@B@SM%W(TmbUb4Cm?45V;}dYmTGk+~etCwN4_~4%LgcLAvLr-`=xFhGGN7}0riC`? z?%p;5Wl8V*@r~1Rec(^oqd`(nNCHjk$t!?jBs0 zwQ1F1FEsEO8EO%_TaZjWiGBO_Ur}ItH9<^d9Y|^?cTVKFm>#it2hf(j`D|J=Schd0 zvivE(XF?6K`wT0s#6O2ZkV_9ECYH#z7cpUc3osWQ(ovkd^cDn4F{%O`Mmh5qq)wD= zUC-BBc`DU0M-ci>bJ^+SWuJyj8!_Y~CkH?448>Nm+Fh}ktByriHfhVl<YSzI00tr83Ow;lm}*y>XyaMZutKM*$A|l zD_t^Swn{Z)>AjxW!>WOQlHHeRt4Il~YMp4iO^)ddx>1e`F}5a!Mf*y5^C-3?A|KoT zR?o{q3~^ni12`v-fo|%S-$8fBo`v8v*+2_Q#QH=$pOAx3p<8{Tt<|&VC9r}=fA!U3 z-#=6shCd>Ye}g|H*?yoV68eJue!%j`7HB;umc|d7yEFC{?g{>!!XnAP9@?vjtPrLO zNj{M2jjYzdq%dieGCsB1P4&(hT0KN+l7wCm2jejUv~d3z1EP303~3sS7hr#_6iHza zs!!YRb_I?$&2wK{WUiInlVF4Vxg^59hkchm7Wsyh@D}PqiilQV4e~W>Y?!JWDlHAv zKoz?gdwnhVTrz}t^ox0y?bv@Cf;HLDcU;L8O-zN5rdC5@AsWs^;OVCLxa!>z-*27nNA_S9b zzZP#&bHdvq&A@H5qhR#>K@GvdVDm2aM_6!s>t^VD%N&X6f1hb(NA|&MendL_*idnP zN5$Q~o~oUh4RI;jxH)I&Hrp$zZU20kdGA7hMdasiNuF0k(qBIHow9|TpPAbggPNpI zLonG~L@TyPr~UTPx<1-I(gxrqZDu zxnXfc_)wV`5#>x0%c0t%$C!Y9rSr4Tq(3G1T;vi?w4WQ?xH{u33VR=$y6?g(*(X^IMIB%yok0u^VP?#QW# z4y|)wKSL@-m&%DmbD4J5+dU4a#^JZ{6VB)u-pYaaof`Ke_b#!ab- z7jI~=ki<$zHZtMdJt^g+36DacIcJ_}Oi*CJolfJ%93i{Wt&0+C(3rM|(-fUT!ym98 zA)LR`k0RSRFU*s0}nJK+mG=TlTS>b+w% zGXny;+dBuEhU;s3yvY%bJuj%a zsG4*8*o2}w9>b$Y>AC;^@c;|Mec}X zUKkKVw&Sn_`fn6H5@S&+sBlxoF(|75bt0Wx@|Xn(RvcF;NCvVce!U~Z)m_TN?%DBl z?hXWnJ;QO<=S6Jk`E0`dnn5oPg_#^*F!v3;q&sEw&ND1-U)TsuRrf zgO)6DUb;DY2Q1DE%=292(BF5f!56-UD0lumafA&1P!)<7wj~mX zGQHIMYJj)c@txP}RLKb+uAy{AnC=P|IQEaPR%$b1W`Qp6Qq*bYkZMBi0|vxqvq${g#|hk!>T7#{X$%x zAqKBBN9sUn!vCgS_~0vc5nffVbGyNf{9j~JCWI7)&MdIZsBuB?V2E({QPDWT?JnoX+n+fRaunw8-P7^>$IR8?0&ZZROjtyDJCW6k8; z0zR)J!k?X=Z#?qXZR7)jD5c(2bz@imo{LS%0|-wCpwhm;nvmj_7JIH?W6rc%UfEgU zca6Lr{T&_U>Dfm+WY7tgNL%h$->_7<4(%+-wD$n(GpCYo{v;ZQ04`!ip)y ztS&JBG#dDs+hkNWsbZW0R-dNI#o|v~&rda5*F&t!;?e(TdNg>6?5IyjRUaSz z48`)hlfJ!ypQ0Io?V`CqujS}k@sq7dd*Z~!_l+1Y4b zuk=k*v>EADkqFD7Fmg0QAxxPH84_noOJ)&^5PCBrHJ8QIZJ=iE=`OvT3aou^ngsaR z{8q32>Wnf2_`%bFlBkOpeJnLEDdgPjSQ4OF7+Ydua}(SDTkZud0jHXq?S@nuO=U01 z8r*)FTL12?@bcYV!W^b8uZ4^Q7s7VCx`G-$!42{;Zst2h7T_ZUkx^rN<;9$Cp)<(b zyZ6)*Z|d&ZC#=eSrPr1QR%7-7Fs|li#?X)S#)O$7|eoZfD{u_1g8P(L|uX|!eqzOu|(vhb0j*4^v73l<2KuUxN7zhxE zg3^nCfCd2-0Vx3~p+o3ZKx!nBgoNG`Y9N8%&Hvmrcb#?Cy=TtMoi(%OMP6jD=T z1Zu%Tid_U347u%n>n*?7PiU;FW@#|{cPR3!b5w2onL7p3AJuWEoaB_UAFFW21XV!L zqX}ov{k)tXh5>42Uc8wc@rLhBeldzQiQSDdYZpxSv6P*77*S8%2gKwSEi`W)`a?_p z-{4suCz?~HzIvmWHlcnzG0XKo(Q1amnMK~@-b3y0**ub>AI>d)YdPXf-(XP4{3F%g$Bmi6iOi*b-?&U{3RVGUVeEK2pxt?{$SnCX7Oor1h0Pv~{^$|LN z&VLv_*k?{n;Ytd~8~(~dVn^lYg{T^tORu+{OFUs)cx~glV~wJnZ#%); z7UjGJWTbDxOv3RCbpVlwRB6g8ofUl;t*=gFPqZQ1$G*y;?8`1^Rw8-D@;4Agvpp;+ zj?tlh;uq|6xitmhon$9p1BN!)nOT#F1t0k2M541=OWep)jWRi`Q%lyLEJu$fFX}D3 zh#yq{Ycfs;`9e?-k`h`eS%Dx}bF#8;a!=Pl@&q6PR`uy2kCW7&U*%|>viN?#)LP=n zykUn3&t9r9B-UnJ9-*}Py@pfQp2vqbVMlX0JX9hPSi?}=yBg2MJ1=@A$Vzd?W*3%K zHjO>BTzzWh#HIu^id4j>;CzHGR3@nc@my=d{86W}YFK7(kYahZ4l%|KoIJ|g#ru~H z_I7F*_^^6xFMR#P{9ddT^376Xy3yY<&Jsn_WpRcnOO;N3<5Z%M7ofJ<0U6;wz=IC| z1b@o;B0f~&#t-Q;FO`1UBbo@=~&|OJ2itCcx2WHoI5)iw1trAfAY zceUY@mO)YA8^IW@>nZn=*?NgW4a?#gBotlER|sXCfQE?QdE!^;zb~E0%2i$$?U1!? zeUUK{`r!Hp%OxgpSrL4StpMW$e{tWLuN0tJ+hogCVz@$({UlpXF$FL^j1(w&>vtXV zXrcX@amph634={K+nLdEm`~;9b=aTh<3sk4eeM*U>~P2gRnfi~sFtwkbQ^m*p31?& zbn|igT|)9nWsu3Mg2ivufDObESP~V2$A&DIp$9in_H}(+L=czt{PtTnmaX|bi{U`J zlIc}&z3ZzW=4cSKm+0-8pqXy!gb|3bcbItP!Fka#Lfpl=oBf*4=~c8YU(k=$E3Ss& zrOAqV4+oFnFD8uF1r#5DHl4d`>Zz3$mW1}Z8@XOLVoBTN0yRqSUTj^A9NKC@%~igq z%U5ImMyB_x0>(6R32O;AKE@o;#&bU1qxc2RA$*4|iz!-m;OZB@ryab=;0 zAT$4Sx69uC?G$`RpB*%F*lTV|z2=1v{fFVQHN|V24>pQUKQJr=|&FC(c$MT>pX}be;gpq zRue5CpLsclS_k(DVk;+Ri1pVM>|cP{VvEhS24Rn{jya1@Ai_wh1@s}Ar_&(zAPgO& zO4YM}oMT^GO_f~JiGN0C<3LKPKEz*+x|V+XneruKLq0udEtMt}sYMp#34S?1&;nc1 z1CEAqxqK=v3ImOXsvF~eG9Jj_N3VsFTGWG#6r5k;R-xM$vxjFYP~3G+T&l^%$Q+tP z4z(iRvZ*GYC4(k6Y6WqmOjEHZ--)|oh1QCKqOG?CpI?8iE?fby*}w>}V0di)UATCdr* z?V)Ms+2^yvn~xiHPDYO#=)d0U|CI^+e|3C!H2Gz&b=Nu5MvA9Kj{-mKL(BNLtnLfg z7-Ns-EQzNLVAf30E38mz36zCgPrHs{djZBX`D%}R`c4-jMz82!nw}f4s9k-YssAfE zVA(nTNy^_)tI|cl2Ne?~-eY&75>1M^ut)kW(wg0S{GO{tGoHh77|eOEERR#`>-JRQ zYlanI|Ndx-t8Vr|I0@FBtJrsELaI4^l=`UeVSOIKGXdCunA%Jj;CnE*WaKrwQ@tSU z8~Qi{fT7t~?1pW@e^+=37)T!*?CeBXQQKwbtm=KVo_a9 z>cN;|Mw%~--BFt@i;w#L(mzAANBTHczWx4W?ONAX*IKjquux`i z(dXE)>{K7Fp!1g;5{<0nr3I4OTAz~gSlR8zz{WiKx|5O`i(faTHVYq~)HqVG~dX45*PicMz)>G+%Pv?Ib7Rhlh_Z9Kn zQJM$7>`c0VtgS`fOye zY5~R+(>Rwp(`7X7Nop%81>%GzdgCybA7l6`;D1j(n_YD=yXGnMz?`??f;FG6E&H3% zdC-~2p<~6|AkyK3)LvJCF=AC}7rH4k$?G9p=EHNHxS}joW34lvOL(;&-1=NhStbez z&yE-v%fJGqgQ5wVH8C~HsVJII$+)I3H4O$QDAhOs9J>{Qn@pWd8uLW$7v*;R zwZxMGmGyO8A)`Io23;k4^guz0$qEIvb)n$XVWn3e7phYjCBFtqt`(TynIH19(h30N z_vwZ7|6_PIC$zU0m1Bx%~E;gxebZJs)Yi+1Ay;d%d2Uw^xf?8HcYe z?q-j{c*PRY&q@Aoh)lY2+(?C#LXbXq_I?$6;p&>=ITNwmXz3A&!G^o%_(v3XtV`2j zV9dVjCUv9fPnnuPh-Jiq_42loVk=wfe11o=4WKQtCz5YWQ3*V2$QNY0+(KGUSYc|nY(5dHe<4DluqpA5m{2TVNGV7E9dYP>BmgYGZ zJOx!#jAoC16`riUZJTXl@e$b^&5A5n#@?%Lj4=@VqrzyKa&Qh9*th1K^R~7=E0j$< znYn6@!2>-aat`J*>`pXDqUpJpw!Kq}O;BA$3g}qgkWF7V@l0WkLu}V&c7z;XJezNzFw$ze43EWOr7c_`$}PtCZqt)=IIuMj=6SKFRx;d(+eWkHa#X z+e&=P&stTOn$mjY?*(2lzG*rF?4T);*t?s0W9Jb%w@FJGBcW6EG6>Lst!f9 zPG@SO-nX-tiPGwSr!K#|m|V^H(I~>8EOBYP@?zHXY6-8~#&B@?@A#L&*_ZFgefBuZ zrh|Sx0}Z45-p}*6pi=LExGjh?P$|W6jQ6X42j5hWdL@oe`QDd#i{To#&IK-CHz8Kf z?o`^p%EoPmuU1S8jBA7gXtf}c*$mg?`yE2w3!k+=<&1E49v7>f zn8rw_n-xMly;NMxIGeSbj;sZ%N-z%Z%~D`zAw8()FdmBb;oTN`_r}#e{$ku)!HFa} z-gWKJ-p}h}N*^$Gm+7zjJzPk}$9;B;doSEBgJ{ZHlvuK+e|6el72#|*v7zYNOBly7 z%_SMv+^X4y?h07SrwS@u^_RHWR}8hMS!8P>y;!ogkaZjj0E-qiXez~59y(d~_gJd1G+> z-syV8zYJmj#)_eHU{b?>I8ekLBING0krilJn=DTLIg%|w<}QBy!1!lLXL9=#H5vId zFCHdBZHg(R_pl!UUWPbplF1I-u$fxS>xGLXoatbY|!-44wR7cq0l)j_n2BLa|Ocl!scHkkN33)Xb!;96DIe&=PU)d z@bB5q1))W%Pmx8=-eGkp(vCq~2Ee-s**g8^!sbta^6B=2^aDOa7c>5wsiWyCH#k>} z7wm?^@Ne6sDO4g)hiQu!u#^(sk?N^-)YgKAs)efZ1lR3oBYp)Bhk_J-dL}iRIsWky zs*d29%a4Pa5x3u-@}PAAteN$3YB~}vjWM0Go5Uou-EVO?Gd_pwmhH{ZnWg%Yu$m8x zD7qC0engkuSR<2sP;~pf4s1su;$Xs$e><@Ftxc~!6DhR|#Ex4<5X%;kF}PU+f|AJtE4Oe^-Vv`&0$@+_#nzgPg@^ zUS)Il9n&4+GeBt968J`Gt_zBL??~eqe|z!Z<;&F}ie$*HS=k=8^HvjAHTDoC_{IfBbERs5xz(e$9ad> zAD0ioU@N769+NJq50eMMsL$y^zER zt0P_ZQeX9K_=jnp;QlYOmhwviC3Xasx0X7&<&9~d7%FWk&SX#z{u)$#-tPB4ijfkv zZrYrHw;%^+`>1%M3Exu80xmTMD{wqFtP5mz&%FL2jIn)NU`>RxjSZsI6IO2bdo64v z!!II82(Em$+Kl)4rSe~Q7f8%kw}2+WOSrA!e;BaSFe+$p3;Q1KEBm4%dyp>rrn7+D zfP%%H2=QLnWx5RK8TcLMu7-L!V9BeG^E{T_vcIzP^14L~8K z+fi}(96XLwQ1MD{w1RrRPcddCEGube=#q2)QZAv9`a&k>z}Q9gh3T8e9PkyCH$J(k zveVDL>mUn5pU8_B;W4g%2P!FbT9S341dfjz1^3`rmj;FUpY_G{Us@oR(C$*`IPore z>Abfc&uteeIJWU^OaUyn&9bhJR>uDY^q&FB6q!AI7U(E`*ZqA>-Q|Q2M|cCTLx9?` z{k;*`vT;Jl>m!{{=Uu=v$%l zFkCKi0j>iPWOR5PIC4fZieaB_CFGb^r6+#bxn3tlrkxb47ra7;Y>YX`pI!8fYezrS z{sKv3?c09nx}yF|U&Z+VB?L}ZB2M9BX=X*R=d0G&M!i7XIe~}%R|j@*8jps+OQGqD z*DZe^xoIu6)(XCki16uMH3kl!oSZH$m1kRA!DP-af|^f-|6xebJYgn^qEDqD#hnjU zGs>MVAV1gmzU#}Bn1}_5vNdON2~y3GHS?@;kI+!ra-SxB#7T7NEXLwOMacqv+TxM< z{$2La-{!h>HVu~fABY5U{w10PxfzmAoKCH9^2W5H-yt?D_L?KOnf%~$mMqos%Bt0x zN*|{}UKeESOxr9jK(UVazQnJo%@ zoFd-BjngEvKTi{%wufV50B=T!b4M{oq7~Du1?0qzHYp08WIQfp(P?6dRsfe-n!B;6 zmt2ck69E1mX2)E6d(rz!S~`OwS6qjh#lZ{8Q8~?Zdb_+*&42|0A5jpSc$oc=RIc2^ zodcHBjL@Fb9pCzNHbz<|;IP0Y>5z`2&=>VJV1o((!@rw&fb|S*tHu?!)I@v7r?&h# zIaE6wEpBX&{2pqU^LJVBduTSOi=A~w=U6KxwxvcOm80#_ot*&(i|A1hJTgo~PlP_5 za^r_gJ)uN^-3cF|)3S&ujpM` zO-hwg2LT-NbP=+qXJ|8xctv&e`;}!K8n#;-0JzB_X`1iFS*|$0%$CVkMzsaP^-{im z0j#-lBgw{}h|al}=}K0RkB-mHI~QWH3r*_ZU@BtD;x~`}>PJFZY+IEMzL4)74hM5xYibpM^U3n!u5QqHbAohl1@RgIbx7w?1elN4_-6^l?-yRgbi~F*@Pr z_Hby{_0af~I!F(XIan=Q84kx^gk(IVc#-BiHNqsa>`i*O(HWt3zeTI6YMO!lnE8#a zelx)Jcf2M$e3Ckl;Rt5er(|)WBs~pb`#dH4eil)Id)=*e0?e#4Bo$@$wK-#BtZwFk znSjLj-IMCC)oey#5jwLLG^^9PWV+Hb^126n&kelz4+G=klWz&KEdOyl(p_Z9^Z9E? zQ_$zH9mQj}x!UNrzV%i51n^Zv>}j4fy$N0$zIMiW!(fkWaB^t!2ewLtQMbzf|MN+f z7$r&b?T}@ypbYD82@L-_|Nj(0DVW*~Vn~hS#+;X;Zit&tZ|o->>3iiYn4yNKT>?%1 z%fc}Y2ArqEWO|nLgK1UPXXq~UML+)K`*i?$GoZ(DxYgZS;Em9<{+jAc8$K5g5mOW} zn|u|j{d-ea4&mypJ-Z#Ry6&L3Q6bDNKTb+{9OeFu!Bh8UzgSCtJDyuNppb6rv8p5r zJqNM)!n;-`R-1U@UVzkWC(bWz+R@ePIHR;B?=L1ENm0(>!XLhzOiyQJpIUl5@9ov+ zA$5u})4EV; z!^!M(SEg(Io)2~L_vXyY>9HLXYMs}Eo6gxG-vLC^iL!{ytUF7F*8>Lu$|_A6#I|Cx zn>hvV1^ViX)?WIS$C)y|Z9OiYh3Oh@Zftm`)KfMrYRHo4|Ep)_El^sM2yb3YSpRl( z$1Dus2waS)&DC4gPp)5E_F69kSseLC_!vRsNg4C71gH|p?Qu+aA-bYPgLDewF#56n z$TBDh{3=^x?2T^B{ibIa1LFnS?5&li=b1&7n>K{uV7x_D^%OY&oU8Q@00vtg=u-Xn z&-qRg)Bne@{l9-7n%xI` zcPd7y`m)+&oJfo?xcxI_!@9(A4So;)uC0VKH4A5(vNZcP{O7N)q`3=E9{<$J=hDAQ zpQw0sV%eG5`5+x(9mraX>V@*sxrH3>%QuU2ns2^1|IXvQPJV`u1Sjwp?I~3iN6G2Y zm&n8R1Rv-Nk`*h9*PReaaFg6fMTPTcE+Cgbf4e4z(dU=FoW5wjHIIh!eX$_8UGj#j z9>2p?>gRj1mc!rChE)&cMS61wp zD`wV|=m~{%0+g4AdVTP-hX>i2o}(YHe^A1R5B29cD>-lP0(4wQ>qiC_kl(QY zbt*m1XCJ>}%^-v0r zNcJ0aarGs-&N#;3B>r^zmD>g?n`+>dd!PgC*FGwyJ0R=mB)g$LooZdQ4H=X`6CsD2 z=R5!jz&g!f^Sec6*Lt^4IgV6ilM?;K7*DP=^({8;{KF6_Bv?NLpQKpQIk?7T>x33r zHWbf0yy-Ms{*p=2AD6C|Q9C0$5YqO`b!uI;=%N)_NkAPs#_H39s1#`%^9lPtzHhgZ zn3n;M$URY#J2?Oi>uil$(VZjw>+Sg*$W&hZT9{xx=3V}uMY?g4@!+1l#Gr%*4{&*C zd9-&-df@f1Tcg6Q@@Bf$f(b%G=GA#`g}=49&AIhda#GOU{4r_sTJo>lu9&=_pCN(g zw4W}h-I+l} zqAKe7n}2N5|Dyzynk|I&d`LVGV9~EF&$*7{CW$1z*wB3{gi3*$h3FRt5Ix2}bg}-8ODpK}-nqFmpQ1VS z@cXm7JKY(s+D%J{r&L*}T*m#Dmc&Qow!)%;GFKn8!Sg1##WFA_Gv!6})&7NU5z#LJ zQQyz%NgudQ@8`_&^g5~kJpPya9r9*F!N#tQ3jWKXhd_4JXD!t5!KEA04Q|$OUhzzI znvz<4q-VRmbTV`rfUTpWqpSwm3#P)t4`_u2+lBk}USuW0uY69LPN8>pSnQJwsCul1Lg!P{$IA(_z25|+SSEkN~Cmv=*fCUo*|0*n)^;NFo=SvSunUPz4@ef?!hS3oLJZ6nRFL^OBl zbOJJyHdu=FtmOOV$$7lbKv$ivu$Y$cK~>#w@d_W ztQJG&yWqjO#V#{GA?jGW@S#mrSAiDiS|<6|x}L>JSH=b%HlDCe0Bk=9-;5=`RX=?O z>tyu#Qp)G`=a45X(K2a4s9nM=tN%<=7at~oz(}-D)E9y5>zW~2_i@>!819|oe_6@B zqWCgdge}T_{L}t&=gmlt>@M>jEVY$Hc{Idmxr1MUS;}K?+~GSfOsr3*8+;GZ)5E}{ zitH&ta=?d2&d$~r#gt7P#=9p=4X%+qsjPWpFg)5-jnsS)+Du}P)71(3LHQ1BGI_hV zo-EyE$6S1pXJHWv*FE{>rbavB5=&9@bZCDNSeOF*1M&g_wU+KF>;CsEd_-=+V5X}# zQX#)LpQRFMu^G6gB)Gy8mh}Z}>9(0w|0vE+@%+J0(9`E>x;xk--yggeB(BDk6Ut|s zl*a97`EV2vSx(m%4xKWV>}@4~qvTtS?^CvQE&NK?H0-lgRzp-)--!LBZ#5=wll#Oz zy^X+zsidq=w_w}-N=TzaplxNJ;+=q)O3oo=poh^?|wkJr5BVHije$>U=Tmo&bt4ghfN63)o zHJdTnS5~KRGyImdW$F`3I8}Wm?=nfhtibw!Y2VU($$5z$0dzy|(uS8e_bmV5nFi-e z#CwmsZq224XE@%990@swbqoXBANbM(#*X**3_w)L3v3> zt6~Tl^q2O4k|sijdq214h15-}{hZR(Nq#gazJ%y$9j?y~VuYvn2#7b#WHOXVL<5_C zNJ0PghEBjKMX@O)GaBxYqOmT9Lwd_SUF{Z2&3nH1 zzm{S43AKJeo4WD&-{2|Iy0cq{-bBPZMWf+TbsTQu3kA? zdE=6PF=19>h5zEGB7x?dgX+4Jzq2^=@zGmG%xxymEN=ChNlRq7$v`0|-&G-z+7Roz zu|(q>0?>e!Qq6The2<*r+-+k4|%dKv%AwY{Tew+Xl2 zSx44Jn@lrA_Ch5HV8AXuHh+>uYbFQLGc(i!BD0$#J5FW;kbuTr7-lDE6D88+EG$G} z4Jo4e!s0ZJD`J<+?H`nS|BHdy)M947$#WyBw}F1$3+-F0I^`6$KVWXbmLuA_;h z=2iZV30nUimieRq6<0@`wXo2Z5G;85o?r=1a{l=!+^5+naL*6Ah{B&Zs?J&yGa?>k z?AfCi{qu9jkK0;YVKb+GHUy1Cec@T(04JXicsC{`E#z$I@L9caT&d+%hH~z}H5&=l zCui?Sv;03{bia>z8&QD4S1yB_WC25I{`sr)CbwkKm5*XbM6nso=ej`ZjdOb2WPpC? zGQyGgghCcC4cYUtc~>Zbdq^NDqo(?Tbl3LVHZ>AYd$`*fXUeX%CFp)_LYDL z3dHi1>webw-@)oOu4?QDXxHUiqiM60oNj)ljbu_MjC}ws)jE$bxqte*xu%J`rUmmh zCOlml)5|$BEb{S;huvV*;J5m{S;O0{6q^bsNb^H7?43Xd{8y9TNztcyg}XaTYpe4q zrtUEmWq?a84@YP~ZgG>suN75x!OnJJ?pleO{ZMs;^k6tnzQF%-w|1F0AaC6hQA& z?vzpLdyeV#p&n6yRlvDse5=;yVpYMvWo7ui9IMHUvt%+;;Uv^^iM}7~;+@}uJ>TYD zwIm&&yIgh0Z461S*{r!KpXhwRp%OX3w+$+gx6-fUz4YLh7^W<_s6aiXx#ZLYRs7pi z-a2w&cebl^%5db2#8#z@M0%OH606u(+vkERa>DxrH>4|)Xib+aDo#r@kP#L6#gGTC z5GNS3+Mnvw$H^7K&)arjA+<8}Nlj|y-;Lc{yU&X(9|o`Fz(Q0`0NEjbUs`>e2u`OS z*&hK8tczkxX|5DzKeD0an1_yee2{ARnB{R$-uO?0h01}`{SR#a-Q)&-l6scmF)KW)ii&jN&)7XC^} zczd-+fv4|OoB|*_+abwZQ;Vm$r7rzxfHfYCZ@*=lwDN}_&KqTdL3h}mxz%W4hLvlc z)jkZ!m&5J2+MDBp>+CQV1IT>kOnvZ7*i7>&{2zwklhI;emtWf%O63ExTX)&$70QHZ z+wYd3V)`R!*1;!I1U|BcQHwEscI&U&Mp=UdqTyrPvomKHxwdQvpa8N0U4nTHGU_&Q zO~0VfyFQyvU2NcUe>1DOJ(wbwSC;ft)(qfy(r2V5t=X$MvNC_l1?8A(;UG?u*;v(Wk z+Kqt{p!Qiy&l{>eSz0f$Z0&86$~{1Q*k;ks_U}(eFvW4TZV6?CA?j?{A>Gh=1Ky?H zS}tT(l&i>nEA%Q4NKEJ5hkOYx+nolZW&NXhJi9>xRIW!t3TKWvT4dwUuoGGQXn5Pnc$nbcXVBJa{b@Ra;yg6vm}l_ybTGEl zRb;}Gbc!nWK2`pS7ICFBynfs1-SPn1c>7IvYT5WT)MMV9|N7oA6XqqPj3?a3 z@r3`W*k2u<+=Zkpb?}=hD*Ceyk36_?Pg#e{+c2p&cvA6U~yb8NGbP){NAZtP-C|MBOTkj?M%{ zfRRnYH6rcUKf2O$Am+LPuYiJ8e$wHw zlicEkl}~Z>Wi?FD@zT>NM2mAV=DXdfoF5O5+ta2uEq{pbucQw&3ai~9?q$@Jvws`@!5U2Z>wg> ze?;H!Pv6jOp;@lwtQueSY9rlqPJZeOYtfPyQQ9Z? zujO*1n|cCW)viwRRe-)Sz}x!32lu7&?xl+E3w%%R*b@YQVLMIOEinz${oF(8o_>Gz90$jOcU+0?cB$C*zU z>1WVx_3;Z^h9!D$*faOliShW+d(WQQ_3IC}Yd_l_@8k$6b=!pr!+Qh)hTr7vP+qkj zkifCfv4O0C_ctp3at*Myi`ovrcSbt7n_H)fCtf$bBy|OyFA;U1{p|QuC}RK(T;>zk zefC=K2EBk5pgtpRVSv>9va}bwvAcFc#t39*2XU|wvQNkFvH>TTExa1!15h|X}+rAU& z^@RCe;H>RWLKEk$yq*7P9x>m)M}Pi&wSB~FjgcDArq`l47O1tFvN>&De8&v%g~x=D}(y|Y^o=rrkMK-p3-3gh$a72Q@^ zD)ic0e2%OAW9?DpH%pzpwrmvKN@+~LKd{LhQx66y(2Zp4NPNj&YjOMN`9Bm9TT+tt zz34krin!A)p0J_`rrLISfHEH6K%=b1~;ymM%9%iLj_G677qX$K zlyAk{7FHhG$@8^vi^E-QCBHi5swW#K8zhKYEBkRO+?8rg4#*v5@SAgQ7L$)Fl1Z5^?*#>)pwnsRDKR+iR*EC( zluuO_`uMo0geP$?yxGLhcYN7%a`Xp?lQpe>EpbWpg@hv-xW{$t5V1?0&JSG2tWTCu zD5(w4?VBo}3ys8~}@>Fegtj42>K6)LQ&f7PMzwY4c(Gz$M~ zC*(%2y3Tb^%`;Iy&JEO7#^b~+*yG_Lu`MRz-oIBcl2m|?{;g)Rp@WGUEzNCIXtCRz z++R2N%l+;U-|fLW5|0v>eD$2Hx0}%mNLW!pc5}#>dfQS#@$d;QOvKM8Qf?fH2yzi=agbO%i!Y@Ghv+R%D$`bW#jywNQqB5n@?SowkwJibYwCQ%4#-`US6E)`(Zx>L%RM{RZ7sm+N>{1 zr!~F#NC|R43QLgv8XG1SIFUcqP4hgSSWykjK+?W9cNWjT`MkWb?}}2*ann;>Ym(Z? z?bSUtutu32(K$)JZSSUO0$(A`wxp#RnBT!rD;uG*c3_-nm7V@C(b@6;^_AK{fg}dwO5)u z=mTH@eq=S`xEOu!S>don9stD(W5y3IxBgkhGl30~RV>-#R5$I33ZFt@Rf}Z#>_>m9 zQr!o*qOAtby^!EB{Xj^umezqQwWH2a!XlLY4DL~N;h>qwK|C_lqCzPRV^ zT*8YB&g%z}R2<1H!na7Q8>ctP#@|3z325%)tVS>xB#qhIG}}_Smd>RW?#+8v|8BnG z>~J&hOox891+C33JA76yFFl{)90{K_Xg;wuEL;o;Tqmr_=`?ULF7sxj#$0w@0p0=!BHP+HDr7N&`i$)W_cGZ4k|%rmZ=oanj%NyE;1;w)s*8W0t$=^GD)o z&T>`3jqPg11XQdDo31S>YPlB8Z?V2)TFiW?v{wNk%@nWXGy+Smtg{FfQ+j z^m)Il+$XDg^7N%9=CCcbV0_b+;JIb=Vq(uMkcKE-%R*P}jV9JAouxgVlpwX0g#v&0 z!X0)OC(6P0^)2;gSHA9^=ai4qFu7sdX3uz@!={5Yd!16Ar+hm0!4UMdBHru!y~68n z`C4WqHDBEFtCA5g{+&E9+c%oeg+LlaM~c;uTM_-gH`J~gWc!4`IFT*s*gl2!6i2@q z_l6I2T!7L4zy-AapW_0|oJLFW(<7&Co2P)JZE(dX$fB~@8M=qAI0-fB+`a^r9Bc8b zG@A==Row0l*4YG=Z>klw`YCv1{DM51EdTNU#(or!CWr%Z0fMV^H_=>NF1 zDFp|Pj*2azq;I{WkLyvBEjol8sx^nt!Spk}J;%#V5?Sa9-s~*r{^d_0?>`DCxj)5w+Em;644?3FsNZDAP)$1hP8C??#_vQ`00;wqQ~^nYP;dJoEXk zhT(*0@8DHF0CFjknG#OY9woTNJKsYikf{u=ztH@J`=dz}NF5n^`Bo{@0AITUl1Xdn zedXatb@=@H?&)t~sVYh?RR?Mr-I-y5sDtnNgk`pVR}RhjlF7Q*@aRk9&SB-?r5AHv zYP;KU;7j0EaiUk8DgtOhHi(s))Rm9!u~Uyy5M%BKxf(a6tI{EUb??f@A2ys7>r#bC zdJQ!t9H}FAFjys3q%1;_ifRSz;;1O8(H}H!`l6DiGAHN_KaB+wV4MJ5%OA7r-%h~c z@jOGGz>>n?hWDj!JAySIXtUVR34){Tu2)F+wH5a2o1RBUARNqehQNAQoD0)8XGk#| zbc-e0c(v&}4P*xC$#0rw zhy9GvbhLX(oey??ZN@bX`M(GbNJ#|P9VAA=RohOu&_gYb-g(0V(0%(8w`D9|_rDHb zeY)PJI1UxzPH9>y;7Wq$E@*px%`iYUy%$QWV6?-)Zu~8AlFCBUUZ5-@&xFskZc+gM zA=v!cdK=rgu4RdL$Z>8lX6uzjO~ex?duEm;>BL`uF4dYFQ= zjv1b>yyQeFa~pfn;`sCx8qW1@>1Q2UK{c&KGF{}>{TX{OLm5ln#oJ=|q)OU_a!6TM zo&Z_6>U%1uVTf?$cOM@9^seCDd%W^hor1crJt-Zoje_<`k&|@M=c$nEx*MrvAH_k> zh&rx}Q}~+iFonco!;NbvvTW)HeH`y+G|S09@fte%uGty83rei|tJIB((-t{st<{&E zhT43^d55WB7+WXj?i7des89IQc{tumNjZ<-v?g-(|Ft-d$sKfMJ<$^UDI zWjhzlfiuOsM{>R>MEUj)3$pYL6eczF{@msS1m^LMoB`F)*N{$bFlAN}p)T9Al4J{f|g zEz(zzrviAW$YgCi$O!#n7AH;Oz9vthdgDOAG-COv+R7D^WvYI%M56da)`&Q(Wi2~WexjVFaIr-F>r1P4yYLEY!!V*Kp@HZI4HXVtt^n{>jk8E!eg=(Wla zFL(Q)<&f1#KYcTuBfZSguRMhOw=}akzpKXYXV;r96!z2=yX#HYfJ4Oxu6=_@YFNuz z$go^X9%)ZHmq~22PF5oM(z%g(#oeP^tNeCfRuI5~wNqI?eSw8teK+ONQPVAZNIR!^ z64|5^0iv%&FP@%*D9S(qPs=GUTloEo8jD5rr+3zf5eZ^DA+M8FG9NHqy2;eJ)WD7j zp+gY5DtUZ`<;EhVj0pJnhmZ)-38*N^*(Eo`V|;)`jL}f#C|yPBiKpj?8L`53(+KCcp1+U!aQB}najiYJxJ1Cn5vfFEY4vk*TN~9a7^*5T~bUM3Y=S1%z#^Z3TEA0yR z%h}#)G1}iy55c;ijJ!%068?3h!g`ThS&k*hmb8~=IgE_?v35Y6ap?dHDzof$Buvt6CsJ|8*SSZzqa`mA!fqrcqCmB+pFSRgR>~dI?fnAyW>z!c&*hg z5cfPJ^G_Y4fXR!z>Lr|9bLf*iRXEdw?`i#CGbfX-+*!M{=@{I0bF30-{{!O7O!1nHq@=XN`MH__cX1u9LbY z&Xw#KI1z{(w?3v0m-NBEC2wHeidWrpDUt-(J105cU8ovr0&Jf;G_mbjJ&k`1v`q}j z_T^iA-@)%6@v~ye<+BWe)r8>onx;l-b7PxjQWatR(~`Zdyl$`V=i6)dX_4ymtGZXC zMjhJDpg9-dDnwNT%&gkbO`k!d6QS}b-}PrT>9KYh zQ2kW0o?JNf6HY5L_7{0%Q5gCi+Zdu-4(apPyKE`a9<0wbq01D3P?$=o5@2TbbL+l3 z>xyT^JzuGi`c(YBo40m#`46ItBS+n8g;5ohY(o@jt1RGUlXaYwL-_{M zd3yu$G%Nh}FPp=9*_(i*nI{y5$$kA>QA0g$Of7O`xlaD2+!CLr=NV6w6gQO$8fqJW zj_Ib4ZvVjA@eMWi*GRP|w@RM#R8VWf%}1su(rnXL3u_Hw&#`U5up*RB{aRjg%?w!S zJVdT5yNzM2IFBCu>gN>RDm7x}@kt5vOQnBUx0=yM;!M#8o|ldtppOZArAN)(xEuL> z_)BekvwgbR@I`#G>dK~n%gPeZMch$x_wS)=0TS<#wHhoHRYfcP5-jjzZuXAWj{CnVtC$(>lr?KR7e~pG+_=`uuw! zWx3G5k64;KlmDNwBL|>2DQH4vLf!?^VZsZ~Y?FGlRQmXIm(vU=7JeWm3&4hTW(ur<`GOL>XsdR0`5isn#_I`1)u0m+xZPj(3ZF~1o@1H|KD*uhU_Y7+CZQFfO zlp;;KfPgd+X@V5#q5>j91f`dVbV7(o3lNBa(mM)Dl`b{XJ4grVH4w6HjHNpulm=MhT*0Jv~^WT@BfOVsb z{`|Y%8{sCuUnwpKu@7K??x4|52B8jsX|EGrI~rkvZVFtR{*=YjRN1h=P?K2RJ)y5Q zba&T2!(PBq!ZJ9)M7%bRf`yzB?L8AJQlU?NYLPaOGz^bu<<+QVVP-A0s(cfhHvEkW|;!9Bcd zod-j2Qx>~I`>AtK*=?c-)@iav03aZAv|Wns=Ho?^nVCL#{myBs3zKAD3CMC}?Ju*n z_25Z~czTwzrrS|sangb0>GH!N8MLtLXwGi`k&h)>D=&TBD}Re*__*Bq%%`dGrEo?k zeo)8AX)SrZ)Z|mM_(eIQthRX^zy1Wp4m~_){kw&!LhBt>?%HsY!b`UPp6(P2_)xOuilmcST~kXK`VoX}Kd0W=MBAsiG1z}zo5DzUBTjq_ z(xc%F95yWABa6QlSC$X6S=~)$*2TsDRFu6|wn9~Bp0H{YS4;S1ukR?ly&n~+0I0|0 z`{#O5`db0=WuqQY`YKJ4>Lw~@hr2ei+Ry4HOoehfgvQeL_gI3){W-%B4#=G0-0f5a z3kQ5kdI{ustkX^IEWKTNhp5|RD9&+^GpNH;@;AC6&Z8L1MobN95h2VyY@ZyBmcImH zh>!5@KqN++#7*rs-u;I{Zf@=27jXJXdOQUhD03t!tps}FE*eWM-~onHFn|^&c=7}- zFpUk@uBxfue;yh5W>T!-v$;Fh&5rvF#2t7Pv0B%}@J#XC6gaRhZ#qT6W*7D#KyJeq zl~S*U8O1FWjP?v%@DG36Hc=cb8Z7Q8vFf=tYu&@soka8HnZ2;wA9d#`?~Vth7XOD# ztBq3v(e7d@jSofLwcxYe#o^Dp7%kPxyO<_P9*o30utZ>X4D2-N!p>)QR$PCqx1h03 zKbdZjz*YpUg^h5#nv0Q(&NKUHPxohmSzRx~R(2ajA@iLrPAdDdl*}2{o4ugG?2D@+ z$fn`bR7nxJCUM{zrV@CfGs$-o0Q^bc*lLRV`VF3^g_f6;uLGNJHhoZJwW#xmG`#Ui z=?dcKJMrBAU<=Y6`xC-rRJm}vhjrset(4fcl(g5ClL&o}!ue6C$A-N~ex2LM3|7$z zF9BAOS!`WAo`#iGNPhWjp65Kk`%s9@VC&}i^c)er%U!P$jR6S5#cTeGy@8ZZN&3{W zlU@7fE|{78!Gg`E=gud}WdOxWa@APf;&1AeNOi}b0LF@z7uki3nBiwRv}~T!6UX1( z>fN_i2DWum<1FjH)>Yl39>)DK;4+g{5i>EOrD}P6!5$uQls%h{-3@8FdBS}Xytk9r zFu@0^QqSW}mal1hQt^UGL2^R-?6a=efwbWSS%{Dt33v-05ErW*mN(aDj`X$lL3#wl zX&KnrEUzwPW^vIncVAoBzRtvPahdaMIA*%k#&;IFo+{aPHn?l)ZE%6F5BRI$zrE&A zx`!^xM?50pmx3j4HPjJ;T5;WP_wJ2d^fOf5y3&&E-0;DU`hCHdWug@VT=OxkzQma$ zh?RhhM7X8(MniElyN986^e@aub}DPKAx**6pWhf8NAzJ|l6`)t-yzhtMHZ6tSZW8bto!=3UiGp)o9H-FK zIJxaXalJrBye5tDP~}K8o$bLD{pLa{4?|v_b+qxp1h~Yy)!W2hSKP(>6g2^$7V^}C z$AhF@xHsZas$vWUWK)H8EX(Pu7)vW%>dzawwu2#A2y>~ZA`LM&?muq`afzJK?y3fL zkn?7>PyU{ZoNtz#bo@B((PHvED<4o~-OntIrzOo2?whqV)DfA&%k0rDI`2xUa$kpo z9xL?Ie0Y}pAr7v(7Mtsrc@gGM|)%@%5b^17lM0YUy=_{eLtqQKrCB~9`@d-O+x znf%zL0OR66@|Ca1MgAiCyX%W5DVX6JgJR*C9%098biE#TdwZFs*SuAvfVP?3Xx#m_~{E2Q{ zq4_!imuef|sJ46Lo5ZE?CC&J*rjEuGUI8goUY5#X$g8A9M|Q|UAZ$e@$&;P)e{zjeZ6;M{7duIcjcCR z1Ww+u8MLR)1I?Bxo73!*>p;)XX6;g*nvNCe1cp_k>H`ozp=6yLt9g;(p)~7($`stp z%)D6I`eTN{Uc<-5n7E0Xgb(ZW>XPtM5CImtLgXmO*DTvi^TwB*P(d53V}6ymPswJd z-CA_A`}p7%Ivy^4jA)y>;3P%sPs_0p)=DA1+*x>_a&p10CVGCJxeMudwS~Fs zBX8g>!{4xR12-9duJ~v(GPt3+R#5z?_tL6dg*t^+t|vAE)3|Y59Q8F@@LgT|%<5@o zK}U^`K3+{KP~><}%0JO$M<`mY=plz-GR05i0pS0ro zm0<*zmr>^oFpgNj#T1NNdY1@U1L@*t8) zkW9mrQ~^BwQLqH}?!2^V;stv3s!*Yb@CJpGV2eKxfT60#?algPPd@IpXvkj_FPCd~ zstjLqm&#%k0D3pDdSzO(w}2L>264S0MN~0oJwf@>wO3DPkjK zPJ2SyYqS;)e&%dPFQDsg^!wSqa9Q!qn8dI3h&i8>3yihGfepUxtoc~aVQQP>bZ_;1 z`+UZx{75~Tgj^{X$2o<1wX-^x-*;(>(SF*~?Wpu3?Czhfplxw+F}@Oiv^;Jp<{jJ@ zN3YKGFFq=WV+(4_SD@OY@>_?YhDp4;V0d~;IJ1BLmOC|87mJ=kFcYe~II=iMeu0ox zUFaKx67oBy$HUIefmUMTS<>Lmn8+q04sEB+%x-kn1z?h3*i{#hU#LhV6oRh9tDyIH zFdA`2rz5|et5e>^*}J5ysn!XC=6lbo<1OA*Wu;rAx-NJu=ez@=lzT^Y9QM%{{G(0~ zUQ|@Ob1YM*jpUS^-yzg}x9?d)P0jStnVGEuw-s6i@##xVsaSuK>~oK%Z{8BGxaSBu zJ?BDwSprjs(c*e%E)w1G>;Sr0uq94uaZy(gv-4A;vgwVM_JjT_H__qbw%NVUPS7Te0a(j3qqp~~4hho_Oa6d?=X*28kgdFnvwlcmTsi$1j&MJCiu;#)v^5(#vhl_M_Awf5%u72RI zt*vXkV%WMC)APZX`R}`@3P}xj&+Lt~SFNJ{0HFUfeUb~-ce}JMQUe?c@JzB2bBjs zRkcKt?YW}vMLpyH+V1mZ0|>of|DoVSn2X)d!~;Mm!OC*kivp zSel0uogY6XU9SHA=H63Gu3lI&#l(U7S6TH9v-c=wklQB|7~3u+`~N9 zuOxZkq&fI$;aM&%N}iI1)mYNEI^~a{eb>JZea(1e`s)rh$;62Ft?q;JSkOvJc)?Z^ z!>1IBPkF{6BWo2xaT-zsQft@e{ALqmq{U{>Y6J6G=1kLwbNpd8VuU_GY>rNJX}dff zSFj>^Y+?E_Z)#_~xw6h$LHq{yx?jbI=Sz)?YmSLOx~}%iPr%c$VWmW#Wgtv~+8~VN z<-pOJlQWB>dd*2)c#o$F!tuOApj#$)$_$@u^Kcm6O-x{^oePlCIVl=S&8rW9dbB24 zw!u0|!D~V2#RdLC@6@spg~M|0l~rrH-Emih!(M*n z{66e6eK5Tjs;c{)e4pfj}3uj$Qhbx-kWe;{ThLBMtDFM2mi*diHnWB#V~BUnXLU91Az2 z1rhxn;X_d#8O$C1!X_Ve?l;|$>07V!ckiLl)iOTQ8YQ11S2cAoQpgS9K_YmKqW1qM zzty2@E|%3RLNF$BRyh!VU*DaqP_irifT}3!Vko2^sj=*g2 zfIh=*i+KzAxbo#Ms?GZQ(wCnEe63@5c>6ms;%|`Es%aj)s>^(;h9?@yv^D1J-^_l` zW`6d(LRcVJ0Jr!tm?AX%<%lL#ONhp7yGhEV+0-uC+Wtae3h~$hr-2u@zTQ>TCto7r zQS|#_Y~+t~?2G&Sfaj&H=I;mB1Qz&wzS_UA9|z!CI7xj5#P$!jS|<)m zGpQD5{0`9*D4{4Nx7GU3Iq<0Ny&KoY6BJ~u?5JA84vwF+X*!6%+$gPEWdAOmS6(w@k{R0d$raVrm59h^!>ll2J@eN-6;C=Ty-f@ zI<_c+EbUU@%IzC|C_blhR<36~MV9;yskD@nbQjXL)Wz)^f2gWVdR#+uX4;5mAWC;G zDiBq5p;!>-X4Z|GG+N0$V0r&YIVO(hJ@H;cg!WITJ&CAyvMru0>*IZ=;}-?J=##i= zt@ij=<7Lu{5_^6%JHgwp5SHx$Nen$%7(*EAB0gzT6bG=;JgW6#q;0YMrB#RJUQ z9A&2l4??FH;WkkQH?4wn{i`QKW^`nQ?{4a>2F=f)KlDV#Z=94u6y!3Nce}7f{C_B-{c1MIfdYhze6 ze#J1#+O1Uqt@Ae%-I*PI3=BFqgQ7yXr^VzUSKfh}ckJVxHmy^4@h15w-$Z36w_+Z< zdPD2!;Z6%nJ=G%S?vKAuDP$r27aU}&CZNX#YS*iHniVd2oa1z?^qR03e4fZYAREch zgZaDv_kdWVyZ`z=y=Navj|IfqCm60{R92Kj{XN%r^+a7fNO|rM5SiN0WTtHJN_K=7 zY<8p5=6;r?nD^d|#kq8jDWLE4$z)EmPs;tfeYKgl>Q-YrGKv;?s#HL;XSmv>9uUJe zi>vG=+CMD5SRq^FB9W)ZC6MPadp9Su_RdZ)wjtt^M2~^?%1o7{`sFPqf?e0*wN&%4 zE+axYSy7n^MX!y0V`&~)egBCM?7ZJ>^3U|VE?*;7P92-6ph!Wj6$|t|LIxvo4PU*sePhR-Tv%rg`c>Sdzcq{I+gm{~ z!M9E8D?zlRzUPFUNY);o)07bJOES0KJfZVoO6j84)r0; z@+f`<&|1^R+P?*joqkwiZm&Md)Y!4OdX%@YS*cZM)jFSu56(^cH}HtoVd!n)9|S7n zz-E{6p?X{GDwwU^$y&V6pCK+gUm^Y5fcsBf;Zw6F^C`<3=H@#|vW}5Yo%1CSbuWC< zwj{m*;6gTm`UBRB>ni$JUks}EH8f9IxH;Y0y(IX&?jgP7jgiS%usEXf;&Xa4xivsY zWGK%Maqj*(bqT3eKKpxqUZkLEJA{0hht*XbZm;9{y;Hjp|WFM{bq zL2~g^!7<`oWHp=y^B-Ssz}?Nw7a8E`AX%`0t@}D0WiZ+Nb8NxE2%J7>B$FimyG4gk z@ef5Gk_lml1+l}^^o@Ak*Reahc(=T|Wc}bT@-5FU32J}*bR9ovmuQCJ@6_P>6N?M< zLwFR-!vVIof`>XvG(3HW+0v4Vxl{7$%t5xSiQ_?sq<|2`)nYU-kHUbSUdeQEJ;c({ z!RAKOgYuWhABa+!gh3FIFSYnjkTmnWbx*OYPY`cgRS0eF+}f-@M5Jrban~#?%V(Bv z%=}(w^IjC75pGPhF+%tr5T?5aI6e$~9}%s#w;VLHw!z6Ayl^*-hMT_(dHa|-4Ma<( z4%a7~L<$iZi#b|xP~?kj{z)1gk=EvF_zUD3>0f1TH~v?==~5415+VY@-A#EABPqG- zf;$uxWm*1cqnld-^C{@rZQLs@UsyE-i>_qiZ`1dr)Z;J2y$`Bamn9!}r^SaLc#9!H ze2p_3ZD@%NJ?s_6x5JPae8|~@3Lhqdex3wzLZ5D?D1Z1N zc~+)w2Xd||1-Yv`%&q6vI5htB?5KE&3-90s_3}x^cL(tj8MAX-nQ}qc?e9lW&NBn* zn$KOyEVMZ?{WM#8;`CIHgU#EZ(DMKk0ope)Tr(4|HRz4pHa-RKnYe7V@^o6XF1BN> zd0w?ou1>&EZaIzFpigV~d@uTO*7;%)4on8ny3vaBNL?!La60BIao=iAC+9V1`LaaO;bGOA6OGQ`+)c{``F zY}FCxZX|L)(u@q<7Wo=n9Vr#iaOJ*2Q@VWr74Lc>IvxKBk6(q6r~ga zzvI{(?x&xUSKWIb+RikI4C0gHyM{?%avM_RewOO5&jmzF;zgUqUSp>w(DN93p~6pH zv&ap!mD1Av!DAmJJ+9DsBRQ03?4pX#(!GUfdwS!wDiAV}^y0g8UQSk90Ss}@r_a?n zh}uc=FKO!AroPo$B9}Df2rPCDKIb^g(HOCgc!v6SyKx$`S*IGD!Ye&KU7f|adScY( zcHWBN8HB@*tM%ZA?R?+bzVFUoMSS6ZXWBp7%7C?{o~7RZs{$Mj%q2yMqp1)T|3wq9 zJmBDF@;O6&)ZoNlNd3+PJtwT!7X7)ev;KRVJA4)qeK>FG1!3D0^ctBpQs{HfZ7v0% zxOFLh211el6N;NimLxHFFM}d5eKD{jHAYMPqtTsr9oSaNGePkr2Ns^_+Jm)&jTPtQ z)t`4+G%t4Qs44>=`>>^Ca-t~Xv-F*X5Bz{$!i!tTC4?yyCj~;&T zA_=o%Mf6^1KYnN3*}FInvzNXs`>@SE_YMt@sqE8%nwt$mxrDtqRhN~~@atPPJOI8s z`(;g2L%7LQtp?)~<}q|F=C8i3_*J zlJsm#g4Q48)pBeG@bq0SlW}-H0(N11RxRefc;39Zxr@2TItcWK1Tw9)kD?b_+xK5J z`SaxvO#4iMhnzY)9MZ#`m6c|FT;wd8w0PSwX8&Q5>`&e}3hz-BE60|(71LfIHcyGm zOo>ZdjK%WPA{`(5d6aLfvroyJJs*8btbqbZY8i{~#xI-PWv4gm`FM$+(V>Uw>_kT( z=p3@RwP-o4r$4S>Y4tEEEa?jHuF>1r+YK(P%NY4GcK!&GAWLG%}lysmz?Yh_=%-OZR8C!jRg zp*fz}^+G6KpQzvV8H-}&eXuq2(gmnA1Xq^1@XxxF*RHR<0kyIfSb9oLKypc6&(zJ~ z+g20ays}$;U3jQm^8C)^6_hYa*udS^+iSlfJh{f;aYEGtjOP=#3|Q^LIdA!iKRKyM zWTEHK2kgJJHwVicwfYXz>Zs1Jkkj?DgNYOx_4D(Ky`~X%;-ZLJlM`B@ZMzLGI1;$R zM;C<|;pMw^eNc|%)A%Xj9p`%Fgh~*G}sKw&$-m{jF&K)zbE?qt+^!lgA0!Hk= zSSVjFOzmd60k;Il%^5~N4qG%Vno2W@H#Av!#$>o2+_|2|5x|=mn&|l?}FXj(Z z^@c9rIQ&iabn?J;XRcl>wv2uLmULfSws)p-D1+u#p^28L`)+=eNEv5PJ94m~UA(^3 za6-zx^U4>}U59LL8$ z9J)P(*RNl1oV%&1=|guWO5Pls3Tm&xwLe!Spaoy;9GTaSSOys@PG zg2Ki8DCE0^)lXhK*)HX#Fky;DYA)gT)U6aYxlwi??xBRQY|pmL?IaYh%Op{M>nHb6 zsQ=s6W$)Q1-XAHb@?_51IQY(6My8w&Y9jm9xKf7_T87NE*59#pjUmQ?MPthZ=gZUo zGP1F=pFVReHRzY+W!?VmzT zRo{F0Kl-b>mt~vo-TirZE3QUM)f2YAYOWxojroapUq0og2>f3jx&N(j)zc0c2ECYQ zW7wOtpW(m-`fArY*Dz?7!Q2H?4eR5czR7fm=$#OfbsEwQwjTLFlJTr@W#FO@6 z4Hfe&H+5Bb1^5zXFhCv^hIo29bn3>kL=rb^fLhS3u+ZYY3cQ*r!YhU;+$M`?{QnMNpWPlhNdWWJ^$%vwV`BfxArNa=RQb&+eev-1t zU7LwnYJOm`Tte7({Q?t8Z3{#Eov^Sv-+nQY}`cqI`I!#Gu zd$4BxqXSiy73XPD;QJM7Qb&YEc!|eTleoJqCOG=c8p3|@WMZP52VMF5zi7}) z<6iiM7~mp03X}rcIh*xVpz6S#VQ-PHUvf5$z3Ic~+5*(&=I&Rkr0yh7Q?eF8u`^4I zSe_j%{SQTbd%Z-5r3+M5uh^pNT0-#WTT$3I#xP06JI~eCU%NbB7{?=U}7?Ny%!7gR02ICOz>>$|i>ux-ZFgJo)e%KRP)*O4f|-O69wX#*@LGE-qL|G}pK3+igu}5_E2VjP&<+ ztwSNq&2M}CqegPwrIyf#=U!to$%aW#B$%+676UZ;(rK{6sk<<(VFgr`E`GN3#evWV znWDewefr%*|dFwhP`Dl zg*i|E`*`UqBOn_H+MRqA+kRaitN+MS(`%788E>IFKHty5;mpmuB>v{1oe#e{cEo>q z%D*-HgBSlzmqnpzDK|h>WpTY1OVt@LwijqZlv!BFjvd zJ@9A~SkWF0YgjAHaNbu$`Yh!kMM?rM4h9g0%h7g#)AZ5kBebeH%qIlsEZouQNz16y zUhrg70GP`Gp_lWM2O92vAF2-0YW&6<~CP_MuBlZV$tV>ZfJL?lm4j6JBSX>3xTwB|e z)Gi2fLS;5qoLf)Nq(dgAG@R2JcrKV99I4q^5w$Z#+Rg%1K!mzV4_nXgHiNSH@22^vp1mNYqu_KntHs+{8!x8FGNqmS7U{)% zCTRPFE;-F_`_^yFgyMW$@JUvG!$)NB!+}~?20ZBB^=|#y;xcD^6(UQc*7Np4jBxV4 zP1FXspP8DZHpM06I!(%*8{#YQc3xuPX2RwiqS_eaHf8YOhnFS;dVDBP&~{fQyZS$lOCcQ_&Lo|O`C;yX#SH!tvQ2V0gmi(6mN<;>WO z%*=QWMcBhvoL=#o1!tJ{Bdfz>$d($h>Re#gRLt6xjnbIZk>tazwDvhk_k!yfD_Z>P z#m-?tzqR`P=@<8|Jr!jD+oC@qqzK`i);ztK!z}9NiCwaeUE9p*$fN~iW@1KHweO=O z#?(^Yg=*y{uyJJiPzIKn| za*}C#Zu?eCOZM_hAHC_h!=nSRr@AEp6eABT5y%`Z4TRC|!FHbcKS3gB$)YK!e6fvw zTR?63eDdQa88(k%uMnTOMJ?~E@9DC^q#UB42LHuS);%J$3}RaX_vJ45q%&+89gq%_ zT4$0MiBiz}3FC;j5rs&(k$ zjudxZ{ffJr+?q;)UPya`kF0I7p5Vp*4z%ml!^RIS{$>@?eYK69^+|d0?9>BPp#ki! zm_YH~{4~!`%T5CeL(BTg^kD8Zim^A1Bs}g@@w`weO$=>*ePcuZx6$I>C}Hw~X<=Qs z+@UQyt{tE(-21OW;5xE~H$EIo@+=`mcbod3*A@mtg?Idv#(O94^eqVolBkyO;Gmfc z|MK(B=wv&G13^xmBMKDZpOsnw_ZQ!WBq~&Xj~2J{U@Mgn$jp3fJ8o&m!UMY%wbmv* zN9HBoBb@iek#+K2P=0hh1=jp53ZF%GSg$Uzz0*>Ksv6ne=QHP_OMri`K2!ULg3ly% zWcT=DtJ+#>+rWLP1V+$D&W&vN5l(s(Y#tCef!3qK$lmNymzk3p+@E#Zn~9|r7og#g z`vV*D0REunb|MZ$2OsUSks0?EAC`Wm+?&o=UNR3aBw1Q?p2&2LRU|Q&ud{^F*o&B3 z=r47p9jlCnEpoSKcP6W+L?9$ZGxJ1R;^}>2S8Hwx-ZL@esrvZ(;lCIY4IoyM5SFTd z=-wlbH<VK(X6PtOgu@Zzu;dy}C3$?KEFvT4TAkp~O|5KUtPvx8DBiq<#TN z_a{4w$?NtUlaZz77u-Uzo!U>`P@zz)f~?$&i$65)eZTjKkb*v zOxVrCftqvHoqd;fhcofO3ZMOpVYYG2lZ*{lt0AQk-__Ok+HWU5nHDYB7o6-JwBhSL zXY!Z4@T+^`foQOIovzgXSH}KE47Bs9LAB zO-^$?m05jwqU^T>nj5$u_UJwNYzDZ~^FWQhQ&5-}Z-C(h!rCbZl2%wJlt2g_)9LQwf*`=E0*b6dSV}^qHLpf6@ydc zK4-~wB|N-FM{nYdAzzwQzwYv$G^aI=p!7rv13u&ISo^BD)1AEoUvS?$l!s8z0o>(A z9plbQaLr%LY|Xhk4+Jx)RZ%X-5wsOhiUI?R4VC8l!qn$~kPb+WtqTPb&o@{9m4(6Y zW#~Fb>&H~}&C&G5B*=ID$?hMM_1DG+JeqIp?@f}r6%OIwE9Lg246iV;uAiB`)I; z#vr-`p7K^IjrGf#C1>_v))U)AaqSTrY9B2(NEQ!)si)Pe$J-O=U25Y?9%!ulG0khU z6d?XZDlbg4@kV#dn=kAlmoOgBIILQekv`FTd+U3xKuuqB0Zs3+P4&0))FkmVR5`CK~lX z^2n(bEhTULXi<=BIZCG?&HT+HI@v;F1KrC7Qvh9wsS8ZEM!=6HYZ3yZ2Z9cyYCRoi z${bv>z_n$A5_SRiorWkVj6k+_%1KM z0fF~?x7_c$V$2bbcqWq$QL78w2V+b@r(oXB+}77u|-WKXR7O8&LsWIeXu$w#v7cG_CA8Gfn)&!KZvBs zJ_)gqM+Fy7A!~(*GQ=klqlkBftFGD32hV@gw>f1tQ&f2L_<(X4;D>)kgOzW&t=4JaJxz#V<;4SLMVht^8%XF|1WfyuI2+qP@2+hdpp~ zq`+VD98nX06GdJ5^D!*2=rLb*fQzq7;oPXeT)y1s{*Q(0CR!$U^BZo_(@Viib_)nxPD2Ng3R9y76Bb|G~sr@#ADDNP|a(q=2J{uwjT-*1C0xu{_`^ei3h>Bp6v(8}C)Qb}=dQ;$`g8?T(tz{lva!TPRfs z_FVnYd;yZ?T|4{Wx}I0m>M2xIzLKq2&Xw5UWt;Hp9nDd>2yKt32xO}r)!!>GHsyXV z3F>?V+zDTPuANMq(7Wgky5zwN;C_2vWT-i41r6urSJL(h zLsUM{O1Rt&qi3IDPSB*zT7IuHk*zC9J+qNMmuauXrsv_$vbC>AgRzw`5OmNvc=fbr zKgWS038YpTEEaAJ$uzt`BdUwP$T80ivGuGo6e`huIiWdqdojB`>^tg0Ip6notQXB( z{OS`B?meP7qF4?xmekljy=V&V**vwc=jM-EuF~s1d)ZJ2S(3L$%=I=7Wh*FNmJL)Z zNojpISv2Q{N7ZZx1UQ!$*wUx$v!I#Z$lbVtf3YH-c(BZxcF1i%rt>ykB-Y`17-oy6 zUx}Jz}ueg8Trc7V<@2bR`t12mXzTD*O|4Y-MnskYM$?zc`3<@ccaNK=*Bv1@0 zG8gk$kEt4Z@;)Y^yr#I%ibL)9RDG8j1cG(KvHH8XH3b$LeA-@#XlRXYD+Iy_Xub{xr{-GV8(WdP>y4mG?_&|J6lVN^S5DMFMcZMYm+1vX~rE zzTnrtRb_1k8H%ZTV-K*a2u9UIU*}$Om96!6dFa~jl5Dh{5+vJr~`ax_N z!afNovJQJFYqFp+g*{?gqLW6x(PvCET_!t^kzM%m*rzycSf>y1`f>ro{M`&!Ol7QF z^dvkeZ&v9Dz!%IB^=_eCG^|&NA^;;S8KgeiF58}-Jli%LNJa-)b0Zhd+TKvJ{by8!JAj=h>F-NVPzIF>L>G?1n!;~b@Q z^-=!#!Obz2XSg0#t0Un@ku+HU4o@w_-tU^tCI_Bf`I*I5N!SY^>#b^{rL3`(oEBcR!g@ ziuM3#+iaT6lj9ixe8bqTq=_B9zRi)~D#j8nLF{8ome{$58sN#|5eYWBi~onB7aV?S z(kc}mu#(MSzHRtCWglQHv^H7gS3GBhDeD);cL9orKC+ddEMv(tLb zAEC~{C&kC6E;2zKMIZEbTz!di?m2t}@Ob*1H*7rU6%Qw%@ch_44tT}r({Q~RH#d1P zl;mM%(D99A(6mUc zSG?o);z#Y|3X9de0_`jd$BW_j!vhpo0oq#HuUrG=W~4=sa;bM^`R%Fc&p-@d)1OUM zgvj%3qD73bHEN|7&$P8D6r!T9@x0YI$?2=)P&t>g#nTSvUVJ8Lg=>`kUaxp(*dtd_?W8nie4<$ir$Qrq=fs<;W-FgA0||^S#N3?G)g~Y z&*Bb~l`>K^K6n`*1u22HJWIDIwpo%dLh0cvhSmy`y7sj;2O;FN_QH|5%|@)cU@P&> z#SEC~2*r$0MPpmCySd}Ci@=hLUydUS3t~I9-}Ee{-EZ8rU*fJ~{CP81Y(t$2qAn2H ztA439yO;0Il>X+p1#Za*EmcvzdS;VZtk(M`=~0n1l#)WH|K-|TV|}2l6VZKQ0|!{R znbya=OL^8N*QS=IrQUpWtH8zLbgw)Yk!9ib;+Jz(Ud}Ksr(BVKuWDYZS@0)IiLh8* zJYxN(YVs(+Z#^M5?CtkBT}(b}@XuRmz2Xb8Ep4k9WI!u<7b0o3x8BNQ5jarl$0u+m z4rtJ1&oX!rhy@jongl5VT=ogFxp&Fz)5V~%Wb?tZR^{al!5dZ0#!b-6ZC~4U^D7%H zeQ4i~|A>uu?4v#?1M2$zTZyb^E(U4Q`1Yle9XY(B$P%pCbLCu%9$7isF}u+7IV|?O z^C*tCLg2uz+F5wkL$cGO@lDf@1UT$!38xI*oY^$%Z9A54rUL7go zXot8O&1|vP*6F1;*u^7vR~z-hdFUt`5)9r2So5r?2j?ShhxTq_|4P*0Aw~`phYPpm z0y0hXqLWhg?`BIP21A}?-+xU#sBtB(9oyawVT$yZT)-E6KK|@&=O>kkEZB06^mOp# zQ`EYp<0VR8#>M-oRP0Kq#qB*KJwG5fp?5wL51l@}rfWgF)6U~*3E)VrGn=wJ&h!w@ z1mCVqR-KxuJ%l0zC(Wv*78mP@^UJBP4!Lc7e^f3h;mWfD+8Evm-Y6;q)8RI8cqlf! z6BedH;Z?a3&p6;zw5@y#cv}|?W-azW@3FhaC$GEJjm!-?5=Wn%Qm-)eG=oCbxvT?T zjCq&CK<4aHJlav53MFRJUq#a;UQ3yI37=A$Yp^1nCHb*&^}dCpIgqB6O`xuOy<9z! zUYhT#Z6kU#eJDl2_Dg|^Ad9+?U45aWbc!x&9rO>yrIYf%b>szdW=5V9xg5D{0p8d3 zt*uA*2g#Igbb>gwR))n*>+8mA&E0Kg<8o-84`1;#An1!U-__TcwR?*5Q118m(Uk<* zq=Vgbo`gsBb)nz_=-tDW5}m@K0>?Ausyw9|Kf9ERxwasbT}W;@V9PXvWHh&2ayS0c z@x4DW+OqJ&BS$mB`oouaX8Wc)Ds4u;%BvZ@oeFB{ zH~B}}?wqHX=%M%!cPgu=CHIb`8A*;8lie19wO_&lIUc1%iqvk0iL9mUE3I2&b%7Ho zq7OguC`9+?%_PX+1@FaJ6H_yB(&8E6grx>G;%-!Pt#4J)*4P`CSm9Nz03JD=* zJgclUrDsFQVrkj#&`b(vHwZBrk&V9iXJD$iI>d3Mn(V&qf*4KpmiEU_M3&y4Z z=0cz4_($ZipF7)kDMah`ehAM4u303WIKXN&pnqm&S_O%#!#Fzi$!_O zKBgaUE4=@-XRtF!$a;O@?c`FLqH7>AfgTrAe2rB3(p!2_Z@kAw;R6 zt4Qx4pdcX9qy(gf9_byVOF|931QKef?su*4`{wMKea<@Hntk@n`Gd(H%sdmA_j%s? zzMt#*T})H8G0v{jm@wr#U_YI+_@+BLZXR5@MK$LG<+WvX5zFJCNs~)X$>p64-jq^9 zdDn##cndsnHHTOS0(>5bo=Tn(s2)~)>};2*uMq+=%g(qi46*C)&DIuYThe^_Jm+DX zcNb@8dnqIIoSNttaCfvaOgdfiMPpUQTt8ao?H6NRY>46RGxomCH~E*@M!*bpKT5kH z%k``2TZebd6-r|CNG`L2+1q((>=xURm6_MtvrB4!wA%>ed*AxvXgj#Qm2=G&_I|87 zvj;82Zb^)Z#luyfHH{W0@2sfR%VK@&tM7bmBYCGiY2{3K#QwfxGm3+4vS2#^klm2>3v#PIT)Ag;3RLOfjF-F7 zm!+j&hQ7Gbu{jZBMh3))O&89-0H!{hP0vf=IFi)s5tS!Hi*^Xbb(SaMoiQ{9&h1Y!Gp3 zX8KftWQF+-(qQi`S{;Ul+#Z-`66ZK9dp=nE>9viVFh#!(9uwm^LrGGqPA^94w2jv( zP9)cPlf{W``xcpwUo6GS!Zb7W|q(AWF z2GFUnWr9w$%Fp{_P0_H-%8S3+mqMp8fb>LEatF=9)|hvyNldQ5r_ECR0e~esft zx*-49$w+M2h99&(D!_PI70*?izCn+pYIMEX{riTgiz^U%338l9RFL?)jb!-XYwYXqkYNp<6#I8ZI!Bec}0epGP*_v@Y9E z7*r;ik74I69?m)jw|YlBk?UIinbOIIh-$c*wJ{@0#?S-}1mWRN6vY?pKlF@AcQH+8 z(S6L2dB0Q9^+oN%X=B$_*sN46>tGI&-gqK#-V7P~I71;NsX;M3Uz&iczql(lxs}2F zB|Y3B3AAMG3+SKaN&T^HgBQqw?WwlRGT4AB8^*H+*@z+uz6e(1mWq3?XdYQB^@9$- z&Uyv%O;?>{BXo!4`5CbrV?t}>*$MkMnN;TzQjp7vym(BQ*Uen3^$OKjf-(jzqPeMa z%`l#i7l)4KI!^R%F1tQvT*$0u&}j83jpnZctYTh;$l2x%An%Iayf)rLiPYE}U3uaj z-QTNVNnz9HYfjrc_jVhQR|H5{_$iFL_p^!7z%m|;ml9mWM)ZDXzw72KzH)g)K~GAP z@p)NvR%Ds;gVbfeCnrW^kVaVrW@Ingx2?`OgFCX_)GJlD5+rpds*4k~r`KWU_!zet zZku?0R;*ZijPn?LBle?dw^BI07PS7$$DnC zBejPdc*6ZWh;%_d zQ#H(+_G=5m#I(8!DpDp~-t9i@54^~KoVYODvpCeF{zn&WykLGRXNT~wHLj&xp4_*>M<-W!G0A4wyca7{lt#I@7%(#H~xN_YI_CD|rA z*rc~3_Gze}1Tv%&f-7ZHOmxyoszUM|D^ahbT&}6KK>c3-aKf4fOGox!(>ceD6A1jt zow~aTVpT70e_ohbe6ZzAl|V6QAvJg%%BB+Lk?uL%Z9TRh%Lf5(*+V~SmGdvg6@L4A z(1U$TQ5bQ7W-;h^0-tiY)82VhI3zZAEb9=2Aozo%6ua(((I)ba?x;j6(1-C`RAfd zbhhvj3yz)Z>x4J4bs;8&q+po#BKFz%soam7*QH@Fxa;+7-(J?p>s!1#i*a!NH^v8D zIniCUjH}FtrRu(GsIP2_!2_CAd#yWK7qj7J z4x-z4`_eAT?0uq6T0dt#%Yys)689BB^N=g#qPB<1xo`8LWv;_{8mOsaVtGZ{+Z=yr zzpZWcYkhF*aiscLzTf@Q&Tl$4&>~$#p|_4y$MsR@AV1Fh`wOb~k?fz479&8vyFDtnRqwnwJ1}H&k`VY1HeDZ3aWg2YN)@kpCa?QwaNK(gq=COz0Te>* znCO{%)s1CdygkU%W_c;d^`G_Rk4g z-GYt2kj+VwHSn-l2~`mHm)a6nu|Y@ToxHZ7o`5yWy~{+6CSNza$J8kUacm-7na{Dv zU)-lr(KswrPTw_*Xv9B$#+DJna}#&H4i!?KGI-r z=Pi@VfsM(h1*b6W=(*kV76^4QUVg(S$+pd}>nqmD&E*tGt<1Wfac5YV?)+eKF3NjI zOW0(MubX{JJN}fwp-m9FvRKHap+ATeS`gM~u@KEKBxkqI$j4fYmu!wjh z9q&~C&my6awkVC>ot3*0PQ!chEjY5`EB(Aqo=juO=;nANLVMgplaB;M zG!j%bE6~-l%)_9`zJ*?>tUlFR~oT|3kCtdzJ~=6s_SM+OPKS7 z4_5o7!?U5Xe<}C|lcZGMXWSb(7qog()Qs;Xa6#C#@bz#F-P$4g=j^$=yt`5Hk;K)| zZri6lJvCGe1S+CK3xplo3tUFCm4<@!h61?2>nSU3>O0f(V|1FLS=DAX8WKf}b<={W zVGJElQ@W)0J*0M3I=H%D0-yG9s6FBBu+~if%5z&1o%+M8iNeLAsiLsXD#b2q zt-8)L&EaZhKK9v95Z0otOpVIv;g7IyS6)(3TzGfIq9h_=#mg!%60t84QGQ-;25hUo zfB5tMUuUAF7taa;sE`Zzkm!PGq#k(Kc%HbNpATA<&x<=$_HS&+L+R{|cj zu=V7N3AqBm?^)vhNk3H1O%%KbT#J>GttUd4Nuj}&G!$#h7Sc@^w+AA5=mK-?el1uhN_3B;vI;fD6~&+b?pCM7O>C_kK@2@Pn6E*`^QDM5Db6phPRZ{O7I*rF>5P^l#)0 zG*{(K(@u>^E3xOC$ZU1Y2R&*hE~e3{dQL8CDvPN=n5Fk-0bhYi+S0OO_x;#zo=$qY z4P2pLFRizqF0R+4a?fRU#U?p5w#>5r-nrJs1|lV?$KwAaR&iYQL>(D1+mY`N2Zs!-P8+fpRgd5=Q$oy8O0b@53_kj|p^62-3dG#{eYC?Qi~pHr+wg%X)LZ`vBm zxgle9D+7)&8nqqW!(1CIY(EH&P;eB16PD`4_^j1+?CjxN7Xlmp^uG*@3Ai(aamb`Z z4tP7~PBhr~x;TUMBhTnxgb=W1eMMa3?jLXovR7R*kuF^X6^HGJi9kk0*ZQvQ2RCR{8vn`l) z$;jAm0-ZBghd>Cr`Q*%Y?#orW*IKP;M*Vb)*Ebj-_CM)(Qt>tX6v$8XM?_)yB1EG)@ z`O+5M8`Fwnc6J3Do4=TvD(gM9A+wjvMAFbY);W$h7<$mD7NVk@l{>Fd7|vW*KuWD@?h(@e9@|*88Z7tntny)@b@pGm7ZpE zH4!swGwSB4I9%fo*J+6D&>=GIZ4D(hl4mPSy=6*qrnR?J3v(1dV37*<3c zD{U144rOH&YK_(cx&f!}<{NI1sQ6OC7Yh@yhQe9BsK< z$&-%B7k8UeZGG%INEr4!ZxBJAc%x=^sa&O37l8 zxhny&b~b+UA>v-nKQcux^!VvdDC*7+URXZ_#<=q&NO*SCP0o9kY7|M6AGXAVzQ2AV z`ovC{nc~BfQr6!AbjYIiOls67e7=b&7IrO+cS5>8mIHc_nt#eiUl=mWXTlakCw{lJ zN^uM;G9;6=6cwVCUGvC^^&O+N0UWwlN;{h&qoWd(9oU+sHyi{ka5&wzo^ zr$QAnOyS&V8`I6qQWyOs(A4D2o#3|O8yE_q_me$gA(u9@g@$70g-8MA(rH2<>ML;P z_Byc6o1tDIORG=!bhZ^BuEi1cEZO3RLNxT`@7!0A*2}nGUTZ(5z@W+5HTjS%2Zm$+ zT6n@#MZ0jBCapR#Z0YmuM{<`2uXPMEBZHbVuU<{SUX8`8aIJkm_SZ0~O!Z|q0rA%$ z7v4KWPk*jvU{z-{OTfMHo_0G(A{lr765+4x$)6IKyh zNJR_brZvLs!6eP5@Kly` z6}a7fR$E;tfd@!>gG*P zTeK^VznI5PPhOI;3%y*xXX|5{fh_G*79p;5A}%5QL28LDT)aA6lhlTo zp^&L)iQA&3VN^Qd*d9FfeVEAUjXMM=0!^$)jSenGa=qKFF93H^@Eu>KK|^EpM2R~t z&7?L1A#OJ^a}}1m%bDW^in>P~Oc7Cw7mKtK2Szrfx5(N6BB3(cb!~wuIfDc9L5KY< zRf6|+fnqf5{#EI#jK+aqX-7W&dqs=8q0>Vx=UU7T9M~<26DE~~sRJ&cdX-wGfQ{MS z?u{*vd9Itpq>f0UYP&M^xF7x7h07!R(NDQI%`fYhpkUGqmK~&q&G<()N>O=LwHfW%Wi_>twoU>X zQ!`Jg+ey8gjZ7%?HGa`<6(J}Y0{%;JN6pO@SLC@1ls$xnEWSr{YNl@$uSRa4Nh1B_Y(sQ%pF2stHwtpeQ>YCo?SU8`k}OM>~ZtxkgXo6U=LA ze-|dEJRNO{Er{?J0g)CfdO8EnD&1?LVEo(r^9xLH08lkqD9PjqI_U(d=&SXI1;K zfMX$Vt1}u{Y3j9)eyY|oP?EONa1eSL*DMEXb!$+~g&_ge1X z3$Q=K+GLD$D6^7cJ7PZ)*A8j1z?9vg$y`k8O#4o8vq+_3!kxG$itGt?&+KpVw3MlS zOVJxCoHc4Pln97 zDJH4<(D}8lP!+e>89zT6G;p%v_N#$*PLBhrFdrJ>F2+T^*o)82L*t`gZ0WLVR994) zpp%!DwNe#&$D@aEutL^L9XEm+Kc&JmHxE&RM0Qnl-4LpBmdWuhp%TunBlE-k4_~c) z#Pt%-`@7lIcNN2#?sEj}`Xs=<5d{f1OJISN`?^kmia27UP1Pdp@$Vt`lXOuf`?v2o zK8(**9;~wW&6JoGHuEk-QjXW{%&7n|~$6UlxloPg%? z?hT70{qsG_$S-b7`TYh-1je(mz!NXM^eV40NS0~U?AJ8N!2w2SEiVR3aI-pW&fpsj zU_E_AjGhVSPZ}brRV~gKS6&x(cHYA7$kd!X+@Ukoe63D<8lv@LMiTOL;~w!iaCBKbk%Ctn(W!S!#bKO(I0waBWvXrEo(l$o)c;Zh zV{Vu0`iRv5XrZ8;pFm*~@(=#>|NLXEuO_`{)&c$yYw-5*tzrZN*`%`_Yc$SGEdJf~ z(6~d*$-j{Mh`@ceckjeVWf!Y*vTCT?KJD`Sd(Kzek}tc_e4tk|sEo4lr^kI$4f)A7Td<1%T0sBcN8n0j;NkC54cad@Z{HYgjD7lE zCo?EWnP~UtMoBf($Jc^me3_>-@gj`O*sQ-48AE?5W}n7ZsRG9GY+#Zb?4AEpM@{X& zmdPf({7W$ibk*BgkLiZ=|CcZP@^YmS#3}ULR|817yrDcsb9xYae#~&;|7mrK|1Z{N z7>&1ORi{#V(l7td)bM|q60Q*`ES;|Nqy|kqM_mmqDl@!7=+DIBpR&+#iL9F4zW&HV zaq%{+i}x&T9}1U_ezDy?uD1Z!Xp^zxJs++bqLTM>v-sZ0<{wL-+C;Jaw04-%ad`8K zvEamV9ephJOYAlnn!=V|wStu*Cso2aDjV#4S80duC;iTrs1WrkomrQk$oR97dHbBo zPlB9@NA9Itjp#<;yBoy6k+&`6s}|Lw-oCJ_(Fy;fzBTv#%hwXhU)SLGwifF^#J7Rz z8R;PhI*k)Esi$cc=~9piukd1(vVC?MZVNTMu0k6&B|KnK`GalMm#z5(f%o5-AGAo* zanK4ZRJzhguXEox=gw~xyJE2lLHHXi>b>>$3Lu5`xA(VgD-K`2{PC)mWpMYeU)>#g zl=xT{U{JbTI?sJQ+OP%9B}h_Q8j3WQm+mn)w`g|Z^MD0brKB3P-MeenL8+iV#U(Mj z&>5r52eq(|%(5T{YK_mTkBe1>@3Yv~RK*vU^e3u0GimmO+fucti!GH%#OOX>*_j@SviiE@(bSo)A)ta-29eDaikfDfhUK zqSF`IZ*Zx&KIzx~r%$HZbXk$jq#+xS&c|(MTWMrXJ7(OkOKC}nrN>#YQ&Oc)$<)21 zb^6xQTmEoI0d((EaS+wY&C0|CSn$|69ddbh!QoU;W=85a-^iU91rR5%F>&-zV(s^B zjN|R+DWtpI;@2c9pFh6Q@Pp@s(T{NQ6w1X%L`R)UFK{v&Gz{-$7ejxpV^N(Eh~TPX zF*gp88*6viW?tWW74pBCm>v zL{&G*jtLNn=<>RlgX;jIjk*(2FenVI=C2#2DjsD=sT-_hC(U^HY4C!cuFT%#_ADN@63>|7{+DqqQV4Iz(qd?597|>F`lb^s@P7^OO zzuk%<6cX98oddh3x63=?Ec&|UiMNBdBGRkI;|ynh+ch7V5}l@OM-6T7gq1dRmu3{G zNIU#o$yv^H2qd0#pH{-XrN&qC{rlo@ov(RC=>`zwP93`Y|ki z$J<(~loRY`^7HId6JvLt6);yo&Kiu^jscFS%nGnB|87-(Pp8b2@I_Z^nz)ybnhh>+ zC7y`EeEPQ{7ev_2{MG(a{8W5=z*JBK@vqyuo-vM{!}YqFNA=TBX>orx55F$aiE>JG zE#N9V+zkMtE*}waAgG6+-L;|hMx~PggQ3IWW1QClrj>@)4@8EkeB6trht6=z*3%sW zY*P*2-MjZq&anJ_cUa1U63lFPIXeYOdItt00pjFxdsQTfn#nva%B(ueoq}AhiiYV;NTX<84)vMJ?5tuxjjR3XXTz(~ z-}h(+QsLzm*|JacR)=-$;4(o#^rzXW>L@Q07xVC*zda1*S$`RwDP8w4#w<=|&BD&( z`nL-;?`#S&re~njkmO{H-uhpP^nKZY2j_$GUa@;RLLxH0N&?GFXJ_MC*4QBM1()A^ z2^HE2T8giOG$iX8J*ZUK);9k;gv?lG5_9yaF)0;P!)@0|U}S_co#hTTO1}pJf1( zy;Q}s?-1$JBgVTi(spLQUb^PS!2Tc{U@*DmXA+8UD++HFKOPWTRGDn;2oTBa$fPQ* z(jmC6i*c+n`y_w!(1j3^(FS2~(aQ%v5wGpz6?bq)PCr<#@X?3lQri=T0us=Rr(-AJ zLO>kp9r$Zs>7*_2#`dWf@vuXgfn*w?DUJd8 zlX*~+0}J?m-7GRfr~gHFp6{o41G47Mm3X+#C+Z_Txp%nV&rm(vRTwccdsCx}F}AV+ zaFhnri=^mqMb9HXdNIx3vL<4}wiH#o_@fY46TRm9DJ#cDZnrNLJx!RhEbpncS-!@p z!Kt-m{qJ!wm&au@q=9bQt~A5~aG_9Jub(T)+t=2>)z=)Lo{5}O6TEkyT-Oj#NYS*N z4t@JRV*(%A7IqqVB7p>GV`Zq2g83<@WPNUF7m+P^L;YvP(d2uRoE?2Xu9)+&gU8-0 zsKn^?Lx!Z!yb1n<8}d(;t9Jq?rg4~JGqXUAr}&OLtrNBN@!kCk)C(owiH zUn+3ot^8Ai@XxEfVj3zHMfcoZz~u?!cdf_$hc z_^5fd#1tR%p^1;kRAmp`ZUb2a*$v-UG@B9z=Cv;Z!3}Zilq?FGj~1kZy%@D$Xs=_B z+4T)P?tb6g&RVVphQwi@Ila&(E~mQLAVoESh@T);FBDR|WOK1PU(TGvTtRMiJ{=nGfq{;sxoJc;cY7u4``^-P;Wg7!HSTE3-+EZX zQapX>$@n>cKo92D_T4n&@dkBwYWZC!a`}m>?_}zX53Q6Tj zrw6Yg7<@`#!kr{ybkkrSA>1>;a;mzF4JBh3=8Zb7&2#jK;*|oVgEmnh8n~0D?ap5f z-~h^pURc_*+7zSem_O|t@cTS%&DEOVCxZ4<6tJ)4aAF0pg~5(!ctSE+WcOrySy1Ak$76d_j<-CERr_F{sjY1FW^X<7qP3g23wFWh4BI=QTaZ4Wh^DKi&UQNGQp) zEVF-PPWDRd4D}6#OjpHUd*Bt0oUB;xU>k`9$ujRkrFX{yTg@8MELkKu+UhRkWynMcjov<7^eJn4@$ z+sUK}7(Dg|{^naq_V(Df-mv+iuWP10$|$e=f%vDkIvSFo1_R;9n^#`3lsr)+#n`J% z0)F9EK=#~VNE~V9l7t!J6;N8+(%^ub{g%0nQ;Cuq46arMT8wu3%ZNLZ=94!ybuGy{ z^WlCDCl8P=&r)~b5X%umaE-5s{eo7)`tgFS#MOHA*{1fercu)6;Du`}KEi7@3Ge!BUX*{*) zn3uM}@TCkx)`UAtDZ4BM{O?w?W64V=!eoa9Tehk`^zlG&snbI&PUi>ocsz?rjDg-e ztlXm2jl;mk{T-8Kf&Z_?8@RL7eUzaU`OZ8j-OZG3xiU@*?vJDt9E>oH(Coc^vs{9u zcMfq6L@v5?5K&nVJn<|Wp7H?x^|>M_Z*ADZY~An)>@8I}x43;Ui*PLk%~#r(F;~;V z@B0l2m^3HZy}glu%`j05I7);$KX)24e_GGn5JCmW`n1nr zuwqfxB7r_`Q8-2h@GC}mrdXJE&VFP|Dyc<1;mZFpy>{qh>+`AiwUkyqxJMS?Qdc01 z6+Y9+MA2HhL@oel(y1sJRb)YqSO>-Z`os z<2%VvBEjuG&!SjA-4$n!Y})3_8|0Ce@$*MS&-(e^Zu<`-rezr|P&m#xETN;TFTaAK zE!GEHoXIpf>!i#uj*{xOpw{-i|Gg^7R{T>^_Nyos*Ba|v8rlQ7jcK|NjeIxlz&2e4 zr>afw&X&>-^5_qW78!RY8gy(>VC?EkvS$ycp!#`Vu!xQSe6FEXlWi@<#TyTzaW7q@ zhc)J7IYG#)N-`=OlT*ktL2ngwsT98>UsO+DSyi!{;6;goq`^{lEx1f=$wuKA*O>sD=a zMT}7{=>eR~D3sl*8&+epHqVLHY8HtDk10OE`}SMGq5I$)=ZBBY1QKS1tmF7mT^>=Y zH$=HK1ef`~4wE4VpkYNZqM$C>pX>VsHahryMPwV>M+307M9;u`yQK72{#h1R;*GK% zh<}ZhBU+(z$f5It?cS&QKag9v7OpGfd1=p6H>}3o8;!93A8y7zfYLc`*&gNk7roJW z*TrIMtTEi+OH66zzU85$VL@WbJMertvtFiG2(i?H@md~D&?E3yFuHq6|0V!bdJaRUXm{2)ml<;YsQh+o z5eSLatI|Yem|#Ukn_4|Cd6%{G+~H4jRdh05ShR!96jnJw@PYo>rPGL-bkRl?^8oZo zOT@i`|3xL(me&B|Y5nnfRUJC|@-oCO^c)GWi~J&#dudKdvFAsOA`BOpCtm~eIB{Se zcQ>{iI9C;IfO(vnjv9s9fmz^6RN!G(_U2Du8pi@mMMV(r}Ct67?TU$AGwK61) z#jf(~%J(UbrfM#l9Bv&jB{4r5(rmNW!$0d-NR*$n+ucrKtorwgmO*ym_kMpMAFivu z^OtNc1|TWVeoJqEJkG_0`#o-A8vdR!yt+a6y}@L--HqdCW#L@5#`4{()WHv4Xs#=J zl4LtF8Ihs$<5>dfMaV$?-zY_}!|OuwcB`!0YMo1VepP{2B0>LV3L1AWgic!8;{@c3ZqU8We< zReD#`h}l$_LOHoGz4~ksXyFy0OBFj7N8HcJHmyEhL4I<% ztMW?dKYQt-<3d0->gKtT_>X7cJ{^vGm-rIbotnosuNK#tH=^0n9=oW;C;2VhewFw5 z%fRn8>OUQtj63ZB>&{&4oZI{qL=F`QpP%&O0sz ztyKBE@*4NP0^fM8MP7_xGY_@_lb8x}@=){Ry2>yNgVkiSH!{T&$MbA=SXMZP-h~;K zprtM+o%^+rU$8#mq!z=pkncX>&#T#7S*>+;iuqtLQK471*UzB;@*moSe>q9~pMP(f z0(7z4tq>-t{EH-#7#MuF1y3;PnLgEf`yDmiv2-u9HbYD<=<y&D&*Rt^bGqen9O;kR@{%V9|CkCpRMyC|EKx zmpm*CfhSH+MA)|uvIQ~BPKGS>IJ_)uZb(2Y*Oo5latO6DP}R zLe4&l$Zjo0Ni6iLziNB;wz->e4pWH*ntw5585?R~K)d*|KM_bBK> z>&y2|2A3GAf7AqX+TdtSIRhoib>N!{65At z2VV0Nnd7uxFd2rIE{^5}ZDO3pq<#C<@Iinvj@L~%!^roir$&|5rI;(8Rlr+%vvh&K zo-|N4f2640q>Ry3oB<;K5Pc3mKq#C>OUi#0nL=T8C!pQlR2Y?Kt{lAkO&bHN{BiB4;d#NSMRArde?nu{70f;p$izUk!pOEKN; znsQ5q>DpVKuZzRl`*&yJPvN~syB_COb$?uRCadZLJ&rJpF6qb6z{lj3`O*-d?VwG- z&SEj;BN&?V<<5uur@C*mHQWR**f>NWqo%{uMY^uBQcS2UaDRkchXcBz~^^$$u#Cc>j`{GtnT(kr4A$RiQ0#r-cqHrU(TLBQ)h8^k{<6dNz5zo zMHD#sJPpqoA$WM-Z9+%?&f4`WFagIz83(18(DYH82hG>1lFWhHB>P^-tSXppP%~Ux z^}3{Yx%Qhs);Q~zGSOM=iS@CU$Q|fRN<}bptQ5*>2B$t|SL`|k z2p6$U>_Zw%=Xz9_#JPxfl!d%bpk!QUE)trw6sBTwW553A!1+y}JURQo++4K5oA|wB z&^-$0jzCdh)R?uj<5>==Oe?n7#Mw^cC(ksu>&3!e9PF!JO|(#9HJ9 zn>WNlxbD^yx5Ejb7Sr;;wu!dn0I`~6-7ZE7|L2xp46Q%DyqxWLq<*2@A>cQcKp8px zM8Quts3lPcRh_yE^+{^Xk?>CYmPEZ&WALqZ=diTry7Qcabf+H!i93`gB0!j2|B_@H zT9rxRAE_O)&{o7CYj@^4p5HFp-6rMPDp^A;Z z^OFsP@|oTX;;mp}=6q(W0;jC%gfnLT>V#hWD-S)o-XB@- zm-t2+w1|VhtZoyZ%Zu!x>+FO4QD#~Sy&g)q8B5CVAY^_jLj1JaaXyyn*OePz^xKi# ztxDWlvva@;6!5;yBE zm{HBMV3j;MDY!7nHs=A)-$C^Re}}>YJ9_ufbIgrtGqb_78*cy$!9U6UCSp;Fy!qsf zRK zu=8tXA5l1Hy0MYob8oiSZ43q2Uzh_~XU1nCzw^mKT;Go069LzhgKZOS!Xfu$Kwz>U zWwL~~LZ?W^KGyNSN#oQA$C9f%o4Gl^5BpJ)xWpK($f50~)OeLk*HL-?;%rR2C0^dWoX6a>nwkQy9k*FM%G8sO8nzisBy_tcSXR7p5#e3A zr1T*6!?_?*b?$HzF$cg28{x^bIwJ((nVTbQ6;4Gb`B9Z0A84(FU-0A+>+5@bc}$EF z*{egmwmE{2rAL}V0El9w;*(OR3Hql^Yz;Bnda)KwNxUJ$0#Bb9dEdQE1W_$g|HzW^q4X zzF6aTPC)k3X>M4-Pr&5uW~-a^Wb5<0q^m($>ng+)Xvza@`l8IPf3lob@{`8*JEQTn zO{-n~Mks=HVx61zIXpKIWPVr`4y0guRxG*9Xm8D=)a@%k!XcFB92?3!#F8>R!PIbM zUq}o8bBIq_eAInzLb#-`EXPZ;{Kfh}&Ro~JCVwbnMF|q|OND8FgmJ7t4_!$1(Gyt4 zwi5N6(D00Od8qhx>^m zgyuyeNwQs8Jk2;`2W*cW=wYY3(e6NXVSr*vRkER^{pTaBg6$r*1?^-A@}S75)C<@&=33rgf0;gz#%n?BC5yFPicZO9mKy4MD#hj`%cx^?>^YAU_w4{`f+coQ7rJl05-#kjF^e_1mCKn zj<2<8zRrIsc0gzA1TW7?B?f~vl`HOI9}OohH{_`?@0bmDMYl`#Pn)IpOfRaRsNwqS z%%7F{;Wey18DFWZd2xv}5d*_E;eN6u>#4B(Fy6lusujNbA+X;M=TSXBT}_u>4Sg+W zzRbaPB-A1g1YP@ypRoDaR8Q<_o?gnCTyNqpAK@V}_DArfFV@mNp^foo?l; zXU$Qp%>s*XgE29!1w+?xrv!>kPRoPJT-@0jK30g-390(R7tD7fy6W}U`i4je=j*uP zmoi4H+y0FmXZ%dF({=!XbK(2UuFklG4yit%Xndx(0Lj~I#nDK-8m)L}B93z!nO>7| z)#2;*4S1cQ4qb{<(Bh$%`d0s8;HF=jX|8Gi%)f2qmf9o;?Il7!0>Wcbs)LtP*?Nt=5S+z%`MxzvoObj{Jp2ba>r`i#B$}*724bLJ@cuo!ELeK zr0-4jbSqYRtW12PC5I`#0t4U7+HBl)ujyHHFI-BBxHn(ZTp!%Vx-BXGUbk9Ja8;U4 z?PKCce%n+vir@fo!si3P1MKFeYO@_qy)ZO$=#MbDcWts)J&OeX=_@zHV0CME3-mhX z_UG)6Rdho7Ofwtl_x8K)2I&M4qEv%2X&@zU>b~#tiK0F)=$Ygh@}&Rr^-`A=^SOZF z6CeYA!K-ZzNGQuEdir4HBD!G@irGLSeTUT4Ooxw`qxVj24(yULO@B;Z zgo#qF6w{L>uX41iI)@MY=s<(^dG(i;p_M!|KCeAm3$eohiqQItDcHjrz~*n6^uFeL z{m&y-4<4>M^H#L=isgRfIWHgciE`GiGHXTLazQkuxUueDq?3a_zZCQ~F5SkJp{=hA zSVO8XzwwY{XWM<9Ma*dZQUCi*#?)b&#ap3zHaMNckRr%uoz2ZUS=_Fuv)B9JeST#Wq?Ole*rmfVr;V?kBr`WaG1Rcvbjq>bpv-m^v5a zL}!#`Ylm^Dd-AnNQoc*KeB?Q8R~x=fR-d7J<>?wXVkkh=_$l!RZmG#s?bhoLAGCwZ zF3(-2z1b$jlL52kveX!>a_0EfChKEqpY*VOb1GI~DyLrMm7cIY&$CNJ8Dp-(uS2)e zri9~DT!zQkMD#VjB$;n$>WqB-NwmFE;OEmj+LV0h(x*L@XJxA17j_4oOsQb)o&R*s zHK=x`P0cvj?!bQ=0LR0M;k|C~jak4corHg|{E1fOB*ji^43({GCj`JE3L3wE{7ZAu zt@Mp%@a3iq!hRuXC6CnDHmiX~Wpq58*z+7MKAZV0`FeC{&BQ!6-AuMtl78#LX`VGz zHjjL-Zb)XDm(HJ;Vd!CQny8fevr%3ik6BoRmfA(>HxDr;`^YGfNc z`iOQvH5(UJL|O0W=UdB>tvl<}ubuL<{}nXB`pgETZ#=4x(`tDlZ}j283`*N7V9Hj4 zTHj?tu2piHx`hg6)=9w=+TxaHWxbMRJSr+O4iV6M-jPRTmzS8D#|5I1vsxJ6hvI~ytUBc1P^Z! z;$vyVRjP|D0C~MPx_yE~MyXl4tIk}OV+sJd3p8(zpX9#&@O_sP>lw^BqHu-~y+0 zSo3nEru2|j|3Nd^fBIYf*U6;?9ocbV2HDOl4;X{~VT4$It8a+G0pmq)gCu_>_pqpU z8<^7Dn(<4oZ1k|rOe6%2EAfd;oZVE4o`=$|2T6^|$W*M{DW0BuP`7qzTE5D|!;NFs z@)u)ff?L)#u$mFQ>_q7xX#nZWh1R;N1*8Fb z%4o83+q{44_N?%SNA0Ep0>xUpy7`rKUDiJ}z zf7_8hYAL%+ONHk=9a3{5GM0b8;OCyN2HytS_GZn`tv^;?zqkI3Kv!%4klP=1Cti7Q zU9nN#LOXJsMJldf7&K^nd)kjyL#4OX)p(2defBw~gn^mvrknd*S<3ugeQLHVY>W2@ zNgr-T$2P0l8z3A3J>ukMFisHOX@q;$M_6T3krc2sD?el{B8x zfaQQ&0p&0Hx)qIlM?6p%pez1XUq;!y7 zv2()_7PeywybvH3r(+l~@-=+0ERA8sU}fMYLmDrgMljB?laPp)X(!r(G&b~csI+P< zrcTkSHcBx0zIaD~7~YW}$Wm``w~CG@YWjWa;On39^$R64!wbl(+Jzl1Ugc9j zjS%?F=6Hvu>(d`I_@z(#EaOgk4(M)Gx~~8wsF_A74U3JPhBdckXW|re&~Ek3*~aAN zX-7vc=O0a0Up$;~5;(i1liNF%ZGS9k^NXzt+y)p={R?Gbn8WO7SRTFN(S!xjlF$ao2}iR0c%*Pt|MF8UU~J+ zR9xGo`keSvmV5KN$x3jQKEcF2AOP^HiV`k=0*Guy7S^JTo$gfFIh${347(00r}C)j z57`v_C>|BEMsXK%lr`*)G$N^XTB#?d=~7Fz-q3;P$#AtGe@|jGJ)Ia;e~6T!_#8j{ zt$F=K8(fG7`wxBQD+Vi`#g;ffdqnMzsC`;0jO!A9L(kCdCf?m;bJwJ4qHq?-FknFZ zm1<@DKS=)pIE}_7-}Lq%Ig*-l3s<^UWLv}i?rfj3fzrYs)-!DpcpRt8S1X$i{-n2@ zVFh{t=TPsH;_h{I=}hilc|;i=tQ zuvsM)mqPuD?f(iptw>62JK7Fa1n~P#oH0u|J^^k}v#e_EE@jrU&hl4m7pz{(-yeAo zP1`ZbE1tLK_YrucbO;0?PyG&E8@@LBPaW;d5FAIcZ=Du3DFyNSI(97@;NlY+?UhRE zTAuSe)zrSfppIoyb`_egx19X7x@^A6tM|`38p+!cxmUb$*@6Z&_(|!pO#(ht&$F+q z=&@7|Lh3zv+NMl4=26D1L5}d%j@9^uk8CO`8@BNCzFn9%a1M&r6gClbpBk|}lmUxlCakSd3Y^Xn!~Io6RJDPdLUV! z(JC(Hau9qq+8JV*=r)36(yWxYL09q7LO1)omF3$*g5m%uzvUjH*?j3A5Vy(CTAAy< zrCyBUAXi|ZAdX+Pd+45ey~rm+NqdP~4wIZdui{abS0@a~l{ZQ$FTfm{SBofx`F6?a z8P83A1vkFy{gLk88K-oqw;DsLkRDT)pb)W!Zx`y}8gA}aW z3WeT$n*GwxUFdGx9l2-&4bOY8IrwTQ%|Rox6(T>S56r^ znx-m&+lFpmbMQ9!x~cPy@)C*BkH0%N#D60GB+#db0)I+Y%KZvysLL#eSd^DqibhLJ zsf!dkB|xtpOO2erW%;R0{ze#x#n`O=)#=m}*)CFQZ`(EfZfr`SAIu!Hw;Gh)qK9zS zqcg_Vfm5tt^$`EpTE1%@} zm(f1}9tzZIY*o6-3VhVuw5xBdt@2z?m}o-RI>&6}XBXSVi}3APOb_4NV0uq-@^6VP zyIW6Q$&=2#{T+MJJuiYcS+Na#q4Gj^h}_d{Td{e^R_bQb&W5Bjvl?k%dp2~|e*8Mt z!}W1sPKRm4;#R#FWV*e{QEf-=!u5}!MV}cN4050{7FF9gYW>*!H*(hkyXrv2iMeHp z)bEk`+ihjXy?Esxkac4#ovk5nTWYXgS6oF=Zx>m<%HDRrau~d1@ksxffy#wsBVP{c zfRE{>uY709noItMKW)c=ltP<*5hY{Ms6Ge3IMi!s@OCmA(e65ow`}pYZw_cCmJzIM zfU@E>x+BS|!MeAz$L{5qqZg6W)KYN{6jhdEItUXZ%gw^WM<^ZIB%RZl%JKk(eQUeO z#qdfQ`((#S&slpNF3txm$y>5b4$);F&PE&92oVSwgaQ-u%*KRuEuPEQM8Zk8Nam%< z?xVIsW@@-#MO!AlwvuP*!Vu-n_5xD}VbW1+R9$}V7~$A`BvNb3$NuDHu*?U7BczLi zr9HW)N_`x(fpL%T5Ka)YqVP&L&J!XtQYYGp9zgU@MWcr<9_`dr;pj33m$7wY#a3gg z2=(!Eoo3oj3C+7Nf)}3l_31rFKs!ivXD+};T_ehk%~Xiy%p0BTGoHT=VlYj&8rj?~ z##a7`E{;?CMK}zCUEI^#nZdrJF55o&->{+W=8wmo@KiO8ZME=I@;g--Q<3;euRt!4 zd|zy`(%bJ%G3G&8l8$P@7jrIKv?GMN;q+&86zCHTZWoA^k@kR#fS3Nnv0OW($HDI2 zPIX|~%$SRbk00=P4&&PsJ5oZ`BGJATFRYY#BAwCh+(7J0rP(3sWkN5v`zRfUFz}o| z-`Tn-BxI46X-il=5F1$^Phs(XdbD=1eK=J)(WnpLf`Z?&$WFa$uYe2yjt+RMjU> z^kZzjB~jTG9md1kQGy_=%_US7pFFH4Gll(0B;G@D7d2wu#I(KOkdV~9+z>r}p)<-= z(?8}wtL`N^Lugwu2yu!B9^vuOG7-;gv;n;B_|#|T=K)B*ol7@yf!7gKTWyI7wD04X z2>6`wlI3l*pRjm|*tvIdKirZH#xnq-Vkr=faVgYg%f)YesNUap*DquKF?eK&e3Jd@ zr&Psq=5uNFQqe3mKN_#;l9E-tS!MIDukx3qAM~1a1JH^xkdNr9&Jzg=5<<4df|41T zEJ=H~*ixl*AA3X({nP5Guj#jRBx7p}dUf^hYh7S#ULXb?bXW_c@$g7RO*Wf+@pxtZ zRaqJ?4(6KEd0}K2#kQ|oU3B_*Q+e(yLv+8T#K|}?g z75jvTT;C^9J*d%b*D+>W3TC)4g^55PHO*dg(d?>f^Tf^asK(vke68zt{&23s{s(=0 zAGgok4kL?qF{jtE(NuHl1i|yVMP}kbhHbhx+{ZNKr=}CQ$#8y9+XQ>YNx=n7@xB7F zxbSj34G2-t=hE9{C$%k?s?o`XHL-Q;sOg^;ok&zxhKXdSA%#^xN54rTj%sdB3O7O& z66UQsT$OK=uis3$WT0XryQ-+kM6Fw&XKIzXO0jSAXFP~*GIDQIL>L<747y6a{?s&) zUWFL@*&`e;(b6RoY$JP>wG@dwVUFzNr}Y%dqnJJ?;p)Et^)qY}O9o%z-%F1&DcPa) zgu{lst}Sw$R|eB&U6H(`>D9QNVzG+_`YG3)Hr!}1A3Z>)HP{W!W)rK{HJR0oizxsb zlEYAbftDZRR++;0Jw1sPaE%dn#oKxlH=z(ZfsZRX>+&EJY%gGLn8jK1>KTu(?)IbP z{@;|Z_#LP?{vx}Ql8=iYw@u(DKRStmyZKknw1tbIoX9+XRi3ivuId3UP&f!w`|At^s!+{ks_*l5*! zOO5}rDt*vg7tTErx8B={Q&fMW5l@e~no}H-yC}YVXe-2~y%-0yEjo8vOHlWHV1L$b zcUqQm#%=VM=JzwZ6HfSk8QCrz@70pGAW@VvdjBC1q`I|*A99#YG2)TzrKdvg7hR_a zuBsLm#X7?K4VWk~65>7AU`s6HI4ZAJH}u`aL(zs~C8anqZg1k5x`!QNqPN;v{@qdY z{;|6?+F`$L>xe(}<@fxkD%Mmx1pb7uv&E!e-kgtdm}nTK26|7FGXK*2{DPs#{iPv3 zhj)N6r}PX|U|QyE+%lIGMe*M1>BRYDcvXH5ER~M=*2{bg3w1_B zO)&}U*q4*RBQB1JefAMF^bigJ`aNT2G5f|{^q{0kXKo2yH>v&-@G46bb&m>m z-7oJ$DVMy9(^tDf!>2|+<8Y5VC2@S;n^HIf4>%q;LLC<7x+%OU-fNt(n~MZ%E)!

!P>B#q zyj#)Q?6se7rC9I`rGLur6vo`ApR`PS@Pmp$&<;-PbKYDPe)BcLEajEBZq_@b#R+z6 z0xknk%XsB6E7(4oYPD!uefBtDQlr#ok1*prc&r@!mj=QOVE4`G(Qo-_XL$TN^k%Uk zQiaw&{Nh`i^Y7$`tqo-8=eE+1~%%WKu;lK|_7)j0+|| zuC85RDRN4zX}h`mHQ5p|YLKk_Jyqxsuab&4XXtybo{<0J1Vm=&Y(Q>yEZW`toqIz; z%FuLFk{VCvXZB?u<*y!CYPHdpw2<=tvOP3M-2g8?)d@)arx@}d)GFkCU?LsN2zvrR zc9gCYm|67q)CR?#9$#v@g^zXEc>#8~70xNSbI*Qkjz;WxAti__D6mnjoJ*@vMH#jC z=d3wM_n>G#j=bF^ zVRwG^cY}?p5@G?)4B>-VIpO6x_M?TO?xdMu2Uzu^o6kva43uz+u?77qix9VmqvtDduQ74?scRFdGA78S1WVYtz?(JlS-+F!I z5`NlJ`qCu;EIthJ;2OMIIx-S0-tjAx(HGhYnDkTkTWAEZP8-ehns&;8pMkAEw%j4T z_eV9gi+WqREXsG|wm-k_on6;@_B{1+bB|F~;u9?5?SlSd%&c%U%gs)wyazPm39N3d zJoJ!xA1aX>PFRE9-(ChRnBxgwPoWgp=E#5Ga`^XNhyUn(7?F(J4<##ioWgtL)*L%< z4g4XVKqIJ09ZoQ{67~G^$`9{R$&zn#51*?EH)e+tZHv?hcy|aPo+F66cKnYELkHe$ zFCo`UG(X?Y*3d`shwEPzgC9eM(X1||g~0IxynksU-xMG}6BzdZ~HiiN7p9z*M;o5?-q z06bHB5Zy>cMWwo;hnq+7`q9VvA79Cmvugsu5%fyp0w2CX_xkfMDyh2AE~bSou#>({ z;hm{z_I863XH~D_Odk7zxtBw%r`h{S$?o+RvfR@#)sErtc?Vznt|Rk&l4 zxd;fcB^Kv6GW$fl(nx(qK6N^3E zk(FZ5qnu{=4&uZvJwMu)1r zH#XqrNDKJ>OzGtIw7%;lzrt;z%R)S)-6BYIL<)94Z_>$@5l!|kx383}9)N+iSPX$+ z6U$DZ_4ve~LyDBJ{RG-~lXcPagzFTKPLi)rm;ZglcyLQ2R$~f!zqvJW$o#$G zb;g#P*_IltNP!T+?*NXVK6YCHlJff#P6+&K&zIi~KKqc_^%ma5XXE?dI;-(BwJ}n! zKd=v6AzWOdc~C)=28tmL*7hq-IU2pj5op(x*l~o^?hMGk*(C4=;ni07v9>b{OkdTz zrua1WL33>YLg{wxKzd{H{cj2;y6aZ1O>L6(39K_rsk=FlnUG%E30zak0(x_|kop&@ zQ&T;fkojeR;cVaFPnBm%NyryH2NvBYRT7tD9`+14(1I&7`k8KOlOJ-!Wi{t7)c!QP zTC*7t+pJU6?S5^JMR?#fP4~+cX6rAFjBRh9hln49fLj9}5Jn*0-pa~! zV5sB{j~*9?{coB+WR}L|Gi*jrC!CYw-(wa?4`E^jW1W?}GoG7@;1ULSIIaU{)OrWx z4O~2UGDqy`wLrzUCPwADYjnvC5qg)b0Pi5(EgV6^%0cJIw2`h}{L~6veNs-gTLHzn z&GRo!O&=I4ua{eExPr+QEHLh_D8EU6?RnMUBMs$26h`j|PspzIk%5hy!)XX>&| z0M+nq;_X{k(++TCV0k0X0GyDbo9Ovw8fHq^KAf}h!v zQh8|e>$hq-fK#7E-)Q&_7&x-kBCFH>XF#a7St2&a}peM6H|4;qG?}IN5Fic4W~uXy3!f_5HKp z&(DgSo%?TN{iyT*Y26%W#(pxCLHym*E3f3ApWTGfHj{GOB{Zp78yl>;p7dwd4`A!= z+^?B*n_)t?lVE+To&BJwq{pNOQ*C16GLzKgE`V|m;Y-hGVU$d;ZHXlQrFmTqVX%~p z3YRvSrA`X`grfUINTW}-yWOlC?-`CO9w7LWH%19+{ng8d1nkMN*9nv7zu6tfyD9%9 zAa2@B$fZ`)&UIE+pa z0!v<0>c?L7)+LAMm0Cy;c`I>|*O~!9(LMnbQCwG)pz7-l^3KLj3a-91U$|^?Oqnz1fT9r668mA5XR z9>jqq^~1}odGBO*BfrQv%{Q5Z8@|PzDO0|cvppR-R9)&R#H=bse;3)+^(FXLUDNKz zhwp)U9XbLgHl?8H>!Srw)ZX#L8hmH6;EdLVcnY@-*ow`$MOiNEI(|SgZL?NPP3?)O zl$Neg41D}z#n?9u;Q6gp`_d&g=3YzXqh;ezZnmZi$3RLz&za!JpHDEUg?M4-qU+GW zQZ45?DxVe#o)hkQM?CIC=iW38E^8&z-G%Pzi7EJscq@{q)+# zJfQ0s9$P17idDad03Tf)IjmFd&fB*WJXL0!;p$TB&6clw9$z(8Qn|h|FC|r&L#?EI zxEV;pskj7Z2)Y2GM)8%Z7P(IW+G)qnYL0on`9dF@^~NlHeiX4lgdM^TDR|+-yPOgC z7AFd$#iAFJwDU{NO@D@s8LRCCfM9Ap5?^2tt}EMv;->8JV#- z0>$XuZU}K&5!~W71mhemk`?1t^yszSJ1#dCQ6v_quNjV0(1;_wSRzc>>ovTFxaU=q zy!!(y^dV}&!Rm!)(ib{zIeq$*E}-yrWU(*Pf^OtowwTT+HOqP`xYhc}457{|_bc{U zOjUPUkFjLjCr#yy=Sz%P?#qyYbQh@uYL}o|<7TByP2e78@n~b&!DXfq5P2pL+m%B% zqDU<|(d@hv;G6XjZdUy48t2#fG$&Aj+PY**(wMEl2yLVn zCG+XAsm)}0Z~qIpqIz_2ju*?_Bsa0?{;0%o;Bcn zikkU1-ytDJ;s4Qh2<*zO;h^sugh4`WcnUV6F|RInb--P6U-A`qq9W`Axr^1^ zHQ`AatdLH$PD32bJ%KOF%PgHwgzGQySyFzyB$!dT;Ay z{zU;h9(GQe61_bXmPvk=#2K zk`?NH4zI%Y>z{TQe$NeqGc?(gb~|nFj;U)e5Scp7XN54T6=_F~kww@Xh7{Jjr0oC| z+0Bq3llp0cnZz`-J*g)F(R6_?lRx3@mbzt1Q|{D621_lAuEQHkJnmjf2kHPb9jq*(EJQLh9R;GO)@)S!;nefMEA>c`=g3i0OaI2U^$ z2|=pEpu)A}3qPiR061&2n+Q|oRpx+)S6Y9PCe;@(-c&j7rbp$2Q$q0LcNP zYJrF#tkIa{wqypKXv7wvMt90VOZaVy0mFWOMDIPewY|lW6UOGS_Mpq8MrS1-E{!B4 zBlOwM7szYDd#}Hv(Wo$Kp=hzY!Qn%{7U&rq6JO>($}LA1=0m&Z5flHIcK)NhL7O`7 z{1++TzuwA9biti-uM#LJt!ac8A(g_ljljtGd$01Wp3=gbbK9HCJ}X?G)-`!_t++(6 z@vd*_TorRC*22waY`2DoQGI~Ul-3NzS+3?9{^fY=>?6OqlqWqsW>%^YC62qO$`oc+@&Jc ziU*^M&Z9nQd@{wL!~y&HIzIZ5E+a@_+UY4^_+CmU#~sh=%0ls($2B$*(N{8bW>5r8 zzlfDK^^=}4>^7k@qvWG)UAeEwN5HM`avF!{iPt>FtGWRy3z&!S`J1Dd)o%Xdl|+iJ z0`ia?d3Kn8>Wv6Ewbe}A8=*bXeye8gTY5Thw0^d`yFRqtrRp_<8n3`NqsD7(pWJvj zn%~7Q_UhZRMM~>p3ZQljX7w__%Map#0NR4iOG^tRJ0!pGk9;{0q5&*jmi-mTc6R zKYc0W>#tue6si2^=I3VkL)fB9dDeLR<0Ru{a|3>tj_Lc_ktI*#IqN;t>WKIm>I5B` zt5NZ?f(`or`F`!V4^bTagpG8kn^qN8GGVMokapH9wGbzU zQ_tc-3Fh7bU}$dh0e%C}{PtbgVo%I6iMIk>V@+}FOsp6#YLlX-(owMy<6jbFZ{K9l~P@a&1g}i%9vLx89nBUKRj|TYz_( z;kM>TDja4t310pwly8L4oFnXDL9n6Bd3YRffxiPvPx)&2(oqyuIOQzmiunG>A=;+o zeydd61i&hXlQf$>w|x!le=P~RZzS=gE@5bt5TKdP>deOB!O}x>F4kO(qf|D-!3e%qNgyH99;eR;}}=A3m9BV`%s8v%`^^8gnMkVR3IYZI?-FI^IVKXEUET ziFLb2D-~RSb9VGh7{*gWL6%QV`mVM|F0hx8)_gMFkuS>GskPeZxk~*gO#0ZAD_C;x zmA+{0A7t?Cl@S=oWdU+u)OQa8%q$;xg-^w{xhVF&d^ZBDExLU&xM?X)V=xo9Yhafo zQip*X!YVM_&McJ7jpAj*na?5k=d#A76Kk?eJWQ(u%qO4RqeH&v-MxKw9lpsuz@4eU zvMCHQlIRM&*&ilJU!E*?`2q4ZCY$)yRWw0fqK8}c%X;V45F0AAbd3URRqg6V6i}Og zN{whIgT~984>DrAE)!>;>#kU-mmIvg0eBgXP4({GXu)0e8{MV z(SNsn5rl1$B@Hdk;YtfZ&ew-YXG2HC*;V6ur4PTzJ3}txZ-`5!?+s9bz52d?1=KjXta zP4RFOHl2J}Or-xkC1_asMmFN`1a(nadw0Kv4Cu~LAc=l1k2me|uPc2?I+A*9n9Jxa z_40Cx@0;u!&s%hoE{(ZMs-2`7^fLEFJtaXl?BdRND2{IPL~&?TMtoXG(XWA}U+Wgu zOBveTTj5s?+><*gp8eI(j*w7BH%->s+{poh7OcWBmv2h@*^Te9`Z3*%8UQvbvmRLd zO=t+-GoHyd_L-iaEe>=mi2YdIp{t+2DEGPlqf0d<33OxomR-;s#2RmML* zNhmmfrTiyMSwBcZEDlraKxIxLU#-@z-_d`08LDLPl=~|1?c>WMwLJ>rH_=-&j(m4f2+~Y@pCyrJ->QBnXbd)m)9~sLz4h0U6)+fEEcl$ zrxA1g*EXu9rWTj6SBpvp$j%#jr{V^!wZ8Xob#q4O3qRL3s5=fy+-<->d{C8kOv6Xi zXZ2~+@ncH)bCN|Xfqnf*Y6^UmW!}fCTy_p(FI;;4)9dRWuUWpfm$3CBg#mDink=x# z6u)v?3i8P)VzYK?!>fhNnx2uJ^5o!-U?C}Ki9rzGbDMi7$KxFO%l>UDoT{xuRm>}( zu>xFt7&jkyg;s$4z}0P)zcl)9c5f2RFKor#-n`@%bbUmF$qIIt%tnZW0i0)9np4h1 zTZC)Kft}T^4@xeqzgp_gWEBh+;nwTFdrh3H$zkmjmNL1w8MB!ls98z$TpP(Tu=iU8 z+3j0X91!5yTR3UtBDM&U88POX=A90fof&kO6J~zzZeQ;iVf4%ACMPk7y4p{q0?svJ zaq2(_)%%je+g<8#eb;xp=AKKx0^LY@C0q1=3{(Cg511D!;Viv}CA{OkZ8P7ldSq;h zD>v?*C{P#q2Y#XE)dWC~9R>dB-%s{-`0@Si3-HO^9@X%inhGZ$yl#?Dsr8IgMjTF_ z?rZ47%p6g3aqf$i@ANqbJ#uS{!l41AqABx4Q?e8~wsNj+q*iC-#JCh(Dzb-_eAko& zw9&10ah&aHQ!}n_MOso)NLVcjY95k!%{NW$@DC&_nz*TGSbjk+Rop}movh?35Ouq= zbn(`l#huDuFbN8nBh-^{gAy}f7$`{G^1MU_L>Kzu=aUwvkzV)wen}3M@wCizxZah$ zBbL8Dal-BG;uPajuJXs^R0=MPYbnm7_!y{O4k-dhY^|JDAo9`5KFPH;Ka3x4C;dwP zM~iU3~F}zZ&jcVLI=7s3Y3z-IOl2`{%Wb#V9EHU2YTtD zj$%l~P3pf&sHYw41|R=8NzUJXs2}+{QJ0ys}l-{e`_RxbB8x_8x=U5)KN=+BP9_(`!Qlsg`Ukc|1r`I(3+ z^#LEh3ZsG!u)0M;yULcmlcgSOTk73TSZwH&RtXAu4aQbWaVxN0pHy~6Vj9iT|nMC{h|(B@H@K; z**D+!?>Y!S;hzV>uiN0)G;U{V$+SRn<%~#oAkpW6G#?s;?cg4-5do>lcKG>qI8&46 z`~a`?Qkn_CmDa5BG`_h!~EfwM`T6lsn76GFaWI{K&C z&;Cj zdF3R2w`fumw%ajy6f&w3Df5SDvhpZq`&oT6dx*}+k27F9!NGDdE|IFI5m@P znsVHlOKZ`S@hIo+*k%Q4Mt6bs+lZnJ%P=)9p`c^VlHr#9;`3P9x#OaH7n(hfRwGU> zkf9)1kPRNKbjjd>ca~{9^?JcCaEOpzbhKp% zNG*jrolVZaADze&SCTQ^}gO)bd_+_10y}IiII$JNYq*4=Mhh zxzRay$Z`0JgX9e*ocFU|AIv3GUR-;3d4V%~*;^{4dt^$iq8=TyHESsrFBJQj=8A&LMqO%3pDVcHu+D1=u#sam=N!S2AaP`*%(mCYjIs%-4008}|IJG@ z!}Ggk*U}SEQ`8o_3G9QJMgKfU7IkM3-y~n`ThoD7;zG^l+rR7_1r$0@sJ`yySQ*ZB zp?!-NE?L|l8B*I?=xfc#S11s8(K?bpy?A!O0nt)|D`(%99+Mvs2vwLqehDh8 zS24Ceu)RnU3vRiwp2O9|{5$BUuJWvoHEtre-PgjS@6AutP=`v z2UqC%T+U*RwQTu`e(1&BD|EijYIm)MGUDK^Tk<7Qe?JMsX@5WE2mxbV_6`#a!@V6%8yv_d0#r`8NZdk{an)`B~6 zICJ_BCx&-Y{2KLAYrNr!DqR{eG>Eg$e`%g*lGin{nro3~_syNY&=b~j|4?UoeEFyH zXP@AU<8S`|{b{c?T(K*gGr4m^vVecag~%s+{GzZkm&OZKlYF}pe{9Iyr~XEt`sz~a zQkV9u-||9SaEH+M4IBQ%6EP1`*dsDWw7Ny5UfD*F3hvJ_q_LTOAAV&m?bU6Ny*x-y z68qr&9VY30$2ajb7tS{yFDNJ0{-ue~U}`3dmNE`@#pkyt@@AAjev({Z82lrDTSYAA z^TsHO>IA(n)^2@&2vO;r=C73M+_>5@Ce3n1kh2>!9d4XVE7ojOk&8S8Q(R3Z0CxR7 z-9hw}=)eX3s}yr=*O_edNE0nb3~Eqg7g$49>@T%6^@?LEOS(Gx$k3opSKMJ|@nYS( z+0|q2$0=t+d^eKSJ|pdYS{&-@Z>BwiI(B?-JxnxhU7E<7rE+m^GUpAcds0f{LlkLn z#0-rq8p|`h7t3?oAolIs4{l*Y5{^0(3IrDLhjmrmJV+UM*LA0Iv&iae%GKvYFJT^L zt+w?m2${m>8+(rjG0K?3El1oLa%sxnPYN^eYLC^}VQ#+bIThF)uD;H}uf1t6LPOEN)0gT9 zEnQOYke&Q^!$+g6bX1X>C9mslTv*8rZRiM~;h}UcZ&UAZpwD+j3+f(PJ)f`EhECfrm;lY z)z`m{gZn`7SuaK(lgy{=ddw(_>xlJxFe5K4sR15;$~&@){y&%OeLMLt0yK9DRh?z1 zTk)XsZ(`tj>EpnuT>kaj_NNI!*M_zvD->L`b>YlLZcLfHnOdR)(XKb*2iQg*0H!{NVu*E$bb8U6Z2t6mwT;b3bkg%f$chb%@L?v6h80EF ziwcjQ30cAnM_jjEB$2h{<=lOq8>cd+j|?*8!bMR{4Kcr;U(9o#?H1pSv;;FIjNfL10?9UDSx~-I90B`^pYz}b| zJhva4`p(a$wF>ybpR*MiKuICL!OoAbBo&KBR`})v$JzZ{JmcwAZ0GaB&5AyKE{s1f z`iE938T2qhjO{(4)-|$}8aUKwVf(qa&0|ynPJ$S+hhMyfpVap*z-C6jG8>>hcTV~V zG+fQ-iPEz5I_935#s*wMwDFtI%s%{U0}Y?e>v^7N^!f4NzL$fp zyKG2=mx=rsPBfZ|%uK)n{O0xFZZFgaPYc>TNs5I;-`Z)SV6WM*?k@Csn%z4zd@-T( zPw}WZY;tzt}dskR-7@Q~0U`uCo^>5(vbICw!y1OG!?BUZrhQ8Iq7gJoyHk7rk!R1n{JUc-KWO+ZU{?wWz( zGSxCh-Qk017c`?>6dH|Wtgu33L{ZF$7vtkA$x~K!(c{7yzY5~ow0ws|EPs_p&|72( z6ik6E)}&gD2<3*T9+}9aEllCB73x=Eq86|`J%1Kkd~!8lL0_m1@)j z1J4)owbTVYz!l#NY$q3Ovg?VA+a>gVtmHrghfzwcWTmpl3c0GoMO^0*Dy~_xcx}>i ztKWap?=MZtp=aadzTU9r)jxq^iyWJgbH}+18)^QjLtzCozt$ZMni5v@=aU!!PCvyp z4w6g+zNyR6XjyTn|Czl-3HKt)?CVm(r3T0R(<&E_Rr^=E*CjHopbnOdW!3{c#!JFj z-A}iw^%DxGTo>0m_A^M6l)(91j(9O^B~@Y^{wx0QS>V^9QRb_=Pt<3=C%P}&JKE=_ z0<*mM*_+IYDavhRO03D~Eu80|>OL+YOlf+u+rdoPnG-O`43)2H&tir+k{_R1+a^#4 z4nQfd>5pgZ4WJ`xHfz8r9^uo42ZSw*OxTRQs!B0!ihNQl=Xh{na-mwWb$tr33`~u$ z^wrZNE9HHqV%dg%9$#lNiT7uwn1^2ZHVuB6Pw3-eY+dx$y~g-<%m)MS0O0=81Sg}C z@q`3SA%^+u|5Wk)<3-bASQ7>B(!A*VYKZ?LL=_XE0ATTLX%Aqfc7wZJYHp4XIdsgP zHEG=6IuxLlUh;gl+0wX3;0F0h58;3^bQ`lW-?RH;jV`OXlY`}~@P1!)_;QsDelxnE zsb`}%SvfSLCy)XPpmSD;G(MAso>0#4q;?kmUJy21EjYNYu_QiM@p9qUMHy1Q|G>f{Z%JL{kUls=_yHa`$SiX+QIas=+4ekdG&-x!(B!pxTX zD&_3{@<6*y#<9H)PVz)M-_p4O*1phwG^;Lcji57(L#Gv*Uk&)BE=_(9f$vl~~=ZX1?sgi5wz47G)mc^?cm`b-Xn z=+h3MRX~&f7jy3!*3=tj{bBHlS9*6qrWxdLboCYpX& zZyb?hCo568p*Foa2A4Q4nV-XL5gBJdy{17+c`nF3Xl7V=L{4y93sDEyO0BXUG}u3i z${7L8t)pqwnR+rYRTCcU2g9dkVo6=tAEwbsQ7=PPZ|mkgBl+QVquN}`H#A;E;*&TX zCmW_`!refJ-#7PmaY)#i;y_)9#Ot)l?2zcm^X+DqUIyy;0>d;=Z*MGF_%u4zM(e3(6 ze)g3lOxZy{Y0n9jl}8f0(40VSa;pV$>su227h2;WE}UZbWT2qnB3esE1q{^o%1-a% zCnd&r8>CG`AGc5J0Pqak3~nD5<~NVu*8O;j^x0@ulTg>jQH*~!A$On_B}Z=W2&vu#+nEIBjUE_;41e`BQg z#+*}Sv7?E)f*(lgabt%nJP_PGDQ~hmvz?=ce4n!t9(IR5o#6$-`@R4EhHE| zh3T)Vb%$5;)PbizN?guprVhV%)9&00Y`+Ltb0eXOM1B|5{hXY}Nu z`Najdk4vH%el`G=lR^CW(WHcf1D&M6Dn96}wm07H;~5ogbx4`X=RyfE|42Nz_uGdj zht`yKi%cK@J=Cflik}UMiBa*Eb=RvSJV6xrpMpO&C3jggP&ZDjUPyL-d+QZ%AXo4^ zsb=ouzX%B^F92;}*wJ#5gO?Og%jbws9^RGRel+-@cjb5I-S=r%hg?!`Vd@cFVXwz2 z5?+3uV}EnzV^l6YqpkvjtM%9*!7jI+@pnf!rZW^(Uqj5Wr8RR3w=l+bFaeJA=zLxZ zdQx_0H!)km&i>Cjv0~O~c33D`(uIDo_0vcCda1~CBdVOgm9uDR>yVn36 zK^LufzV0WBH48^u@3!y$ZTDQ~HTCI$EjG48AaWAjIH`iZ`Kt)-xjwDzoR#?`<-glL zZGTresc|Ps-J?TnuSHWE8`S8lYfJw2i_+(u8vHDX6_A`DOH@M@*DS|{+5oAG$1$WZ zb6ILp>2f07Q}dX?=a@`j|`pjHqis zSS=Go%7&W!By_~d9L9_WvJk=g~#TkIv-ybIiN??8@u<#u+TY}u_ZSC86CwnJu0_svvJAzyIQy8(ONpmnd{ z)l9;CuK@eBmiCezS?IWf_;}O4mhe(-`uXaJ^q-3c8{;NT+fpY!I~C$o&DowW`EhY) zd|gk^KWhmwR9c(Nd#A~6WBrYif|VriMSj2-TwO@yK@mH8QZ9z+5ztYKQxW{2FujQ| zvvL&p>mSGUESp<6WoBccR{iEo%jd4ichhfK>CsRrNK8iXLhB8Q@V5C)EsFAhS;kn4r@-0`O;=mHy4ysVdpPOl@pf(|$8KLeYBKi{v78hWziua)?VMb}yhIXLq4B zc$+s{B2xhVl4NkaD{zFxl2_@1$_Ar}?}cjg9sCJcDlP z`;!W;R;BgyKX$^5mudu|O8A=h%A4z2D&^3i?v_eScvH5E_iKSt?`wLZ9bTR}*2e5% zjn}`4(yhwzeZJKGh3q>Tb{EIy5Nc+iOEOkrFf?!2cS#KPwd3wCd7$1^0Mw!=n%fBZ z#aSPjXmb=q*(_&l2kusN*-t_$(8TPM4oxwz%&U%ibd=UJAnk8a+>}!FSCCPrVd6@1 zF5Wvybv4vGMe~NNSpADYIBl!0=kt@946%mWk=OZD@d^B3OvJT{STRn$s`a__Z*&vhM?B>k*6vPZGB2*aAoGvDhJh zXQS@Ktij*d*^59^RBg@|U>*jBkqDFy3-R;bjA;$D_h|M{Qz7T6cpd|`q4txtcIEmN zxTp{PiVk5e4HJQyXRoG6K_Rvomf7a5lgi=74JYyku{dedzZkA+p`Y<;?yigs-S@w~ zkn&X%Xe?j#Et_|SfB+fv46@{X);&1z2r;Lvy%`24%Y(1GFO?$C-5rUg@7h3~i<&6T z&S#}SvsjBjUb&&0Z;I%2wt`7r+t|&=Im1tSDNjf?RAoeMi=ziX(CmQDRxXT{Go>-K z=5ps(tRohrM;~&%h7Ybt_rpEy+7~sD%xH9GSmFDV&3PS7vddH$4c-Ds5Z~(l}>t% z*9rd0;8nYfJu)(KwAmF zvKw=_d3*c7SUAT_8|;t$B4IK1w(hOr&x&30=iGr8x}JM^m4KZd|8gR5NRgr~pxg*E>$R1FE1`S`Gm^&lgm%prh#=r$>hsz z^JkVFJ<9(X?@C*ShbPIlZ?4~ReV#i)Xn;_^m=ddHc_n8<&6?>F*kiZ$Mc#}{oL%&* z;yGVKJz!9O-@&EjKxEP7frbFDOi3x;NtoB6f+4WCcSU&s|7kVp9kyXu$YZ7P=e@XB ze;(~EN3y`BE#PF$!`+EGeeKMnj*Tjq=(Z1{gb`^GrGbj?cu~MeGVQ@JwG!51wbWR{ zvryJqUCP1|t@jEZdZ4Opk5wjWiqgu=myjg?rd zpj7N#cs}_a!QQ)oS{Zh^^GpW>jWIS;Pe(qOf;41ln2pCItLEjeN52aC^vu(CSDm*` zdIlj3eC|GNakPS(J|-L`iRv%6wiDSuqb)^EnioZK1kP=LQeBU3GwuC{s!=WW05X#c z(AToR0yZ4N=M4?*-Wq{if7j64+f|co=m!^U6zWbwsr6)EGf9+v_q;uzv$+>|BI|Uj z*`6d40o~jEhl+;69pc%%r6JarK)&@gOm|$a!6WQmu;?wj&C}xI-`1ml>Ks@mD(_zs zZYXCGrZOPBqEcBDWI<4oQb45lc&vj3!*IAeKHLZS_+1ZOxo9B9ttElIJ>J_zsx6(l z^hRu@xsrse!1aiV#t$fO`a~-CJAM4>?8}1eVZr}m_HvdSt69iE+Q+>1ta?ru4nY1+IQ09mQ94^e~c*P`e&banEN&0`DoA$>%(9e>wKWteHm0J&R;V zxYVn|iPmBc3SRuPty|ym12H2!i%lebLzFAU9q~$%CQHvXIe9$y@*nVkoB`fD?EwB=OT{4RTzg}|{D{6Wf+pOI6_mrLUDP5#@DyfrCmKbIl`m)e2Vm`i7y zQV@}5ad)f!%enwJA$+=iDVzl(&qyb(@Qbq_Qs0PAhCY^}V!3f^7fRijz2|cqVsKzk>-gdf@A34>nzHWE4S}j+xoj>x15es+wsNU2n z-0nwx@eGm;Ex|b6zOedU^lAGADcYW3&TUS7N9;T!Sz9+&>qa%yS<-N3t4i6)1$nHN z%?Y1)ARMofule_Gg>tO!RP96Vx)2&bk(dW;c-pe0Dm5_T`(R!%gFsS*U`fh~&l)EY zxnS6hVkT|qvOwLtLm0Mf1cb(XtcNT|UDKD$8XGbl@i2II4NG&@i$nwdp-hG@{BAeu zt;R?Jk-D#2hiM$vC^0FQv-{AG;jKeLW|qh$#~$G?P)mls-G|zv;E!Q1DoO8_K=J;J z$uL5~2IlDfB8;II^YYg0JjvqWPbU-4&i&`f=RQ`rxr8hC?bwqYU!7k^zm2%Pfrc?3 zq8Om1795Pc@IO_u{zEklU(W8QiCc`wxp+*8sWF%L37jYTDw!0tdi1rz-dgNgv>#6u zIUbr@ZDpL`<^#@kR!27Pl460&Dc}|&e9&;+e>)0-jII9;GbFQOhtUCeQEn?JQkk(LzS${ zTutGIa;xyGO4|i^ZyTUY>dqo|>-%K8*O}5E7Tvl}Kk}H#$nm05TCwrI0mEUvHD!O* zdb?r*o|(Abqdy?lIPnh^+(!D)+Wrux)YTv#=`1l8CR+imc{mZ~8MKrD;J%ug-Oe-1 zJ!PwG%j!%OC$z9HZCgYjahHVS*o4F`R53^#J)KfUMFujV*x)+qwLd3S9H7>CS~;Pd zz3tL8D`J}M@7iX^7$a90qB15vBVy870YUJWFLwN!t#*VHPcT6c1@ad241vtJMeYMN zg0tbX%lLp<9*{zW1Jwfyq0b$ zRHD1Z70&Ddw?8k*{ceY|Q)V$^m@Rmb)HL|kXhuApb32nE)mM7rZ~J6N_wJ_0JOPg9 zFs{)*b_Nk5tN~5tKcTM0Z;aDz2Em)7HDg7&pv4adk)!WA%e)1Fj*(F<#u5g@?vT~o z#dEEGVa|}^*}ySQq14vd$n*IJv5 zU(2H%83zWMRJfNt1@xWvqW^s4J#wiX@y7nWkc#AQGiEcY?S3QmjsD7|FPDClj_FhV zXWXIx>}yKq|4^j@^9H)D;FQJs@ag*e9v|rRIO89xyC%3V*9Dg>khv-Gz}BvcJUzxM z%>!JD~X473#!S`s4OEmNn?0eG=%~EJf6YB0=D#?u#;3oD&4lHlzufl zQpo`|R<#bPpiaw*lUQs9hXi-;?$y(oehNO{A?#AbR=nrUux&5TBkWC^TJ&hs*_BxYTP*OR44 zVFmPIte#)a+YcSy=iv8tC`R2c@PNLZ?|Jd!-Akg?cb6BQx-O|Kp9b|=@Ofc4W0XIP zgBvvY)u z73+hB`N8DY9o(9d1Sa&MhCL|>I_iYA6tYDz^d%`!TONsH0 z*H*{~ZeBx-M{Hh-^dLVV_9Y?2FnKMqlf_ zzC|t);0+oq+{Mhn&LEV#prD*~)7kb@*ii@ha|F<)5DI4BpX=y@aq7n5rv&EpfvHLu z#H0HbZQ&5y(U7H|_T|j3V0drvQz~V1u>W4`Z;AMg2rlS;55O8M=^)-mI}1)|F>rkS zZfjVo{kiwtYTEOu(m=pkZ139pkA2JJa>Eg$;(#I4IGW5#E-F9`{E66H*$j=4jd%?U z%IT;Y18IcGk`Tbc-nuO%2`7E^i)CbdN3{QXVeB2#9% z$ic<;0iZhY`=)Gy`QYFnpjBvwgY2Stp-iH$g*USGo;-ddX8VO6r$c^d|hco zAY#Bp5w#6U;4w_BK4es75HZc~?R&8IN;oIzo=RuAGv;(5LD8b>QlCXpJhVa0$lw2YTG+ zEL_)$E==ii79Kyyy+Qrs`2_xbw)uA2kkvy*kP zQ?GQ|b(sE1Q#P((vrxO@F3ET)Phts3yqFg7{N<^N{wklI6%@gfyWM0DU3t0C%-N;P ziwJ}XW&7CjN!mwnoaYCC7)V1%wY8p6M`$QAr=+UvkEdzThtIi-w_LN9P&e9|@|rS} zpo@{Z3jIX2$nD&tY(hZR`)X(tZaZQMA08-zA2xRo@oA3dh1F?yMp_v!&$7E*0@uHx zs?wz<`=ZVUPCTtY!1Pm^Q;(%bX(js&8xk=jV4>zZ?_A+|1>tB2v(g z$-6<5zSEFpVAz6~$*#O;1-Ni9fxSNo~t+BkD>Y={x|_#Qd2x<9lI~tQw#<70Z1w)1c5~k7NZ4gda{X zRBvcPfbznf@xB!Muq%^-3jorLU9O%Xx_HrnZm%<*_GMYFn)+ zI=Q;J!Zuz4N~S==Y`G*K+RN_q@Q3`S6p(1y7$y?(ez95JuRb3F+R_3 zY=3dAPs~cH)cFSdg#tpMbdRTM5OYD7Exf_6z$?u%y)WW+L{omtbVqT!Og9=gKTdfP zdeQ+XexC!ks~TD|>fZf5bk0BTxao1W9u6K2Gaxxs0Y)yuZY)_H57pAdw+l>f`~o?x z7l7gUxCO&C>APaF(ew1?m4er7Qh%j6@~>^>if4Q^EF$Ul6zVm#5ZWSGrbQ=}JHusH zzHu96sj~D67pSr3bsH750fdIY`0S&oHnd7d9FCR{@XotqCOgOHYuPM7u(8uH8}3fC zaJyE}AYJ$}POkW4FYnskB-0#YI?Ri-bdndR|>_hs^%rAyV)k{FX+qo#8m3`}}1FsLK+NzS-NLv}UMdunU9rFIv@ z0?=9b<$LpaC;W$n&2;tC(O$;d(;ww}x~f|Bcz?1O*n514j<@P_k<>H`2z&_b5{zfU zp2Pu{R{oN=-`xsaW}P-vYGm1e5#YbW~Qzfi3q)$wa!28 zJq~GHdr^6=tNrA6J65u)s@fl@_l-Cb8C2#)^LUCf;~gLwJ-%S!O?|}S$%>Ng$SRL~W2JZ7 z>!Y`tPr^ZFhs0_PX4?@VxH&|jDci+~kZfQ}9WjVogwpsnGx4(PaKBeaL4i${SmTmk zzj<(CRB0l22d@UM)b>{bAp!DDpoG<-2Xok$(9mw$hap=}Bds&;`u}K(p0mCGcGL;t z1 zDP}%s)29_t92f!11x2*VW~cojE07pN7j>hoBWQ>Q%_Y!eC&imWI9_rfHQQz&w;>H7K%L)zdx<&-SCqN$=qm`8ktGRU!J$yXA+3 zP0=!W>YNpX&PU_MGr|%o+S_3@t#Ftv@h=gECG$fT#o*CnjjlMi{eH!01C@qHSsc5Y3!&$?Ewq;G(CN`hZlew z&8M&+5(qD!z;ivf$O9oUaX@!QuOSN$zegl4ZGixP*|dQl{EaGU2B?(Kfmtg z@$n6N@I$>rM*7Y&EY!~=#{OsWu5I$TX#3CifkpLE7Y!``a130+As&)u@F)(bU|+uI zbSaFdx|Sxk!Go;aM7?`<(M-;;F&?kp^$*pzPu#c_quk?}Tft$^0n2jsfb-7TX+wv@ zsh`gAT2dxIduCQXAaPi<*ovx_R=AV+wb(fW(>I7_5#0+B(k<`maEv#kt~iyfoa8{@+5A<&+?c{OzY)R*$Gk82v+@oZ zLUVNAMs!eAjnjK=XbsgnCNc2l_brq_yNt1x1z$Wrze!zwl)$$(&;qEZhJ}Rg=7~;Q zQkU@uF&Vz}0T}0;l+L!==6XH3F8#^zHk+};?~1?5n?;0NuN|45;HG==9!rXCuj|XN zSKjGoD_7yM$?nQHYwW0o^R#~729!Qt`BqZ_)%h!7Ai=FOFk#+^7+w>-Q<-uUj;L<( zbq>yP^=0OJP$6?YUHO8@3*!r*j}@+O1@WMFI*LTWU)qL?U8IVxmIvsjI!}XRGeQy3 zUaz!GTvtl^(RY?QDroAUO=58Pru<^Wry?LFRu>c92w^4x6aU@%N&jU4?NH%~`H&M< zC3DjJQ&!8(iLEX1`t+kWR3gQ}iO2aG0)hn2=+c57c#BOeU|yggEhw0Dq+|2Mv#9pW zO7yU>Nn#MU`TFHtOUr>9k{fib?e%zZVhs{SGHbJ0 z7&cjZxOafM4l*vwQyZ9-s}9SGr*WTEjU^B8@|rt5>&K#W&y=7kkBPdS+louJ?YWN- zjct?4^YB?Fx8`KbQ7Cl9S7%@Gei7W)c-Jv|+iu}3+{SRi!F)Q*vKTr9LNpR6>8EP( z0opoT`G&Hht2o@EupgVWR#6?x{il41*LZ!rFmpBSnE9l(479HZ+qdZfafHb+S$?D8 z&xVc!S&V4W!#m?&G0*kK9!ia0oyIR2#@rJ>?^YDex5x1&l_wM5!d}^4yz^}O8|l-T z&J1+ZaXf%uaib5p4OY&W-J0OANw-OtP*Z4W3AVX6fRVV-Z^s8#uVElfED3UY;N(eh z73d%3t?-+oMj>?bAnlg*}s;_ zHK%|}wh-$tvP26Q_lgg1To#~<$Bgg`2r&&QlLDJRr22BM#$9Z>ZW+7qHsqMz@-0(M zG9CS!@E(n3d7#ArL2&O%;odxb9y94*#j$rfV;6Ku=9OkC|A}Rfo0SQZZ>Z?-4Nmp% zRdXHvD+mU%95DrnVum6g5*ZU3beSEX$>fbX0G%d~YYvrr@DPb?-&*LayK-W(_F5uP z$G#rV7==M6MsRj6Zin`){zKK>loJ(FTpGJH;S#0)ngNZGje}cb1|zLhd7}>*2DAG2 z8E3GMi5v;UCt>Qa&>H9UmhAVBY+s-o-<2s(6nFK?3qRjpHhfi=vHZ3iVRN-@?JtDg zgPBx_JZ^D>!%xG4B-yL|hv1Dr?_AXt{eJJp?Or{a%hQd*>s>mg7IHQaCqDnpCe%Gi zV{^0PDFEUFL@DC4Dj-kFR&(ULTp6t<>*sg&!{A1`!PC(S zs)yTixMqv!9Pvh?#E~K4arC||T7~Gsw}HYBZ3}y&=?BBkNNU5!##+$>mvjnI$|-E5 z{RafWNU|9W&u{1uF1FG%Wi`0-H^Er8@g@tNoA)^~Y%3D18#cIHn#pX<%Lq;j;rn@3prU{{;wu<=M z>3&zQI&LQmeQAOo@en|90*VwI!2${6v%C6is;bSQ6jNl4k_K~ff}XWRw*A4UGNlhR z3#dpD_Y@mOC$Rr@l(MY}IoSk+;R3=Jy2npI|NCL# znK_mc@(sVPvdG(hZCsaU?$6BnJ*&DbnvV7}Vcz~VW7#|5 zLs|Ql3ZG75d#}H?)hI>IM*|T!eSE8(HOdnGsnWcjA-PQ0$=;Nu9%IsV^OD1F_BcBm zm(0H3ceqwy-t+VhQ1mZnu2Fef2e3tty;EV!`7+~8C*-WV$gIt~9?u{`XtyBKOR`-A z`jpbF`X&JU9O+bWr=IhX>em~=HHFj;p|6;qUolvUMU%>1+_}}pt(|>F(L^+5Zokp^ zb|Yk^^`Na|X#eE=nIFNhOAW`8fXohTNm_|gyL-x;Q1#*n7{zIf(}^W&R~KBV?JiKI zeP)s71Rx(YZ8X7)47_GUPO_-42PR;d_GMNMrDnAeIota@-TEy|@dfM4XQIVTOb)(p z%)*Ypkj;r%UkUbeZSqyKUIolxt!Ljoaaa&HD?2N+v%f)B>lB+ zJ<+5&`EE#@GxMmw{|e8S z2ufD?gi#o8?&n_P{>62oxpcioH<6;CjL}Ux{CmJ#u=Nw15)-EHcctiuS?R_^eQBD| zUAyfs5Cy+IdOp_A{W_`}fe5-u@L>v<6NXe^+e&a+{p5ij$!2vJ$$8ZRWnu~|fi=Wk zOKO(mtr624B+T>_@8`l+cUSzSvJxx;#9-0}LkA#ji|;NZ-i423-!R7(t#|fkmK}ZA zYMp+$#0a})j(qo2iJN;T2xl^k-PDBu>e6tJmv1~XrK<>SMQ1tWKAx<8A5}M4?3P*C zhN?eFrk$lYF9AFcl|Ad+3Q{~4f6)ust0S4#0Ute2cJf`{z6xO%kcffi`z?7pSMVci->m%PKLH^V>enGG#Zgkj#$+~KiTrZ zk&#{Y*8Z_Oc6p!Lik-Sw>by(8mG7SJqxUEO)9nrbm-in^Xa4Q@I%l6o0dk(6)PdAk zOE_#7)JK0~o$;viI{k*pR(i=&Y1#TdNR!F+=2Kd#H{E=UtfWVa*9(a;W*H8M@kjW} zII~Hf`z_devP<_eE~B1{(Y_knvocJIcUci(Uj#L%ww6H|mx5Bl?uOYh#;AZu_qTKx z*(Q8sWUFC|LO+dt5_3X=`+s*9-HJ}0Es7a7kbTc?y5xi?c3E=r?m9IuTnt6AK!>_p zs1YrGPW?EciRCDa^yMboeh+r=C(7u!P?Ei6V;uL*vk?K{Lwca9G_OWJ6Yg18+EUck zasO~{O1`hqbX``kSNGZ%QhVi}b&3pOxqwAQG=AjF#LVYr0JTFvkP+p#t4t*O^7Hb- zT@Lk4YybZWg3MXo4L>A!nH(HrM3CtAXtN2$3$i<^#ggfTn?4o}7p4fv4#Dxnb;EHxup2sS*H~B+=BjIb(gMDS_|#!sRQuB*5O`mIB&EFQquDQ2T{1Hb7hF9$IlXA} z9e<4I@SR@{w$58ji4GE^ZZwnHe3%T2ISNAi9BU*EG(>meTZ5jl|JwWQNk4esSK2L5 z4t<-tfviSYTI`EQFgcPS_Z+}}zjC(xxYz2Z+*&B3EVYXL=(pDChA&23_vu*Y;|byV z^srh~=geGBW^~eb4-}rZ`o_LXy@%+`YuT>X&CgpN?zvM1zci*|(G;B7?d1VQB6!Ib zLRC__j zAF?EnB~LOU_~@CuHTw!^D6B(;o^YJRo<9mGP<8P9fiReiyR@yy6;I!CQXFveMUimX>k^6o+!Pi~{_rnWgkZ&$mf50Fc2+#c>@4TD%% z{IZ^2knki3#s&)9u08)0#_L%frb}cvj!~ASG0X3Q%E_?bXiML|cwVBuQ~1eVIy_B? z`Ny1QmWam`@_zPYPv+97+c>$NX=+a_F z9W&LwC!wK39?N%Qpl|h44gU6+zy}u=-b!@~GK>qx4=l-VbKh4z<@uWxa^J*?OD}gg z{ZTZM1IrTG0KPh#Hw&fdS)M{LlS=0MQb#aX^2?9uo7B)pn_><1dVFZ9^pCE|nsGeS zx2J`DtPHpe5=7DCf&Y)W*@@(V!_7^17%1=`D!ESil~DhK7I;b11S02fzD5#Aoij$Y zs3kS7YcU2D&ykY*!mAGRN*d5r#g*y7Dc2kts++vV8G3I|EBsc}6K=oz-_OQA|5{}l z2r9(xpwY{-4Kc$ZFsF9LimxQMMFvi!v*h^vgD8)X^PV|M?UgN*j8fau`HkG)E{(Q~ zEeeG{*n3EZ_et7r1Z1~O1V2%`0%~$(LCPVeqk4{62AVH^VJu6?kb3zrp6UZwQ1aL9 z%3aK*?rB}t4tGf052%%AK+}XXSl$ZW5N)C4&+@eme(f=*j`QJ(9>Zt))t|d>^F;`Y zz`$9+=_MdVII3S@to5{jf@S4_2Z8JLz_-VbV zpvT|FdJYJ=<^;v*aFgh|1ZZRQ5n0C%Mj?$Av=YHSrLCbuk;2+4pKCvkF^gqh_pOzi%(;STui-q zt-pD#gh4+H$mHGv?nzqHK+z`)E?!1zIKR^x} z#GiMv0-i#6ZOjuiK&4P)`v42^;#pnyEHPTVP6q%C+-V9_ud@bNGXyxmDn zER5Gr7JrKLq6zR-2zq&mZ?*Lr5*_ALMADJv4;VI(=2p?{_aC|^4ub^ruWFz? zX@1yXCR;fJgyJL5?arFfIaz3fPDF{SrG*-HzG~f{PhM>O`8qgNd(EEIIv>q zS2^F~$=l)dwaD%AnawA%^BAy7hyf8gVzt0abm4T?SyS5wrdSxScZ=Z`eG_E0_U5q~4%Bw71K6%Anv?KIfzmMhW;*+%dv7A)&t8?Gh zyk{sWX!3TbxF>sHVJL`YkgSA&s(QZoXHmtajJ^dT+zshZa zlvsIxh+CaV1mRYH%CoC==AsgFTm^)Q&dFggwo&cu7zo*F5Er5QTG%s5H_NT_3TRM% z^xZf-jB_cE6De?w7=eFWqb3oX@3^}Q3qO&wcW;2GN?fknsM@y6Faw>QvV5WHcL?Xxo-6MM2jIiyd$R4TJ|wMe z)2e*#Xq~9O?t3Fay3wIzRC+!4Ajg#VJ^X1l1Mk&8M=xK&H2i#&C*flh2&CZ4c)KBI z`{7yEKU5+;0fIDS;Yq|b$94PFpU+4^&nhhx=WyPvo(dQX_N5=mnjbM7%smQj4{iwx zFEovMfi1cUXgC+Ola9*pwQ6s_weh@9i93|%#hHx${?#kMU}ah;<;w9u^b6DPTl;+b z0(5}GjPc!kZAT$wgLP!_osZSQa(h*JjTR33yln9r{3yJm3?Ha-g_$T zJ=U(Py*oaJ_$xOr(>iYxVyzcgkFLJMd$Z@52{5=j#E&~Z&?<#4 z2D7OPu`x8HWl<&dX@R!R@a@uz1zY=z74yvI7`aSX#jL}+sY=_MSf9h21_fMqioScS zL`e!=x$I75qjqLaDljDl;J5GfgvfXIw`mfk2AW;Hz-Wk zO<&nOp!IOBq^92Id>4XpJ;fG^OA$RGU`_j@Gq`u@r)jc5>v;jHN79v8kf0@93dPJ- zT>>;l2?up?5A1N`nSMcjg}U{tv6I?bt6$Z(tFexR`8N@=n27a{dcFvfo<)-esA+ZVFnfUr`L+q=s zcgL={bDa#%)y#$M6cm#`NkE&chbY|SN_4s5s0Jr;q6jd*SR}rfa&ODFYXS&d-EV8{ zkW_we)R=znO4q);u9`ff?KJz)(cX-G+)%8(P^L`s3is8Mcb9r^eO|4X4SQAyU`Zbm zCOL`5)qfoB1qZk`&$B7kSNcGExq27;$l{G9d)@M@+j~uUXOtbyHSKaS!byZ6+Bdsr zt#rAHWV9*k$VQqD>srH~=LGa@CpVmy|GIIx_*?(pz;aQ@5gU*2aDjc@%JhIL3)FT*`dXgn$8tT|4f7*liq zBV45}TO~mP5$h%h>cLvf1iD{<~aHPt=sHHILEP7f$24|`5?}jPWPWNk{KGQ$SjN0A4nUs zGQhoWxn%JUBiJK**W=ry%%gL;9i!nkjpFu_Z^Nat120`<>A`!m4RKPJt;NzGU%1rL zh(l#|7_yNMo!E~s%dNPffZy5W8K8j7-FZWoxkLHOP-$c~f@-WZf1_6uY#-V#K4R?p zNgncBsef3jj>c2;LdgftS9YC{kMRfPC$0bW%xqp>aYVW|A)#Xll3HC|g+0tFh{64_ z)Mpp_KL!xFK1{!A_w0*wb|PYX;5cqVk-K?bRke=~2dmVp!B)FuMwh+Ff)IQ!elZeh z!D{KG54u-F8=WQBCaE_w@aLf3xA14-w(*8EzJ95Og%$XOrUrP`k##WQV_26T-r|%? z1w>{5OoH(>C|&e#H8Lq1mjVpfkAR;-O&u$3TQo|1|KCo~K{5|B{)g(`|J4bKDTrqs zYu$(g!Hode$thwryzHpxkS)ASTR+U^A1Yc@obKPJQ!s+61w|_5|8%aJkB}3hd^^o) z9*7cA(_G;7FOL)SvakfP18?wu_)QBbtOYEv(a!RJs1{xpke>){uJTgCOn3DtrNHwP zRP4B83V;V1Vr(XDdSHalqzpn=D@@aiD(O8i5`Zg_eE?0^-p zoQ=ZD0JVN?%V5=b0Z5B{k%+4;gGTQ@n1rk$BCCG)`#82rQ(GbhUfuIQs`)CS zZhux;p;(&x%vvhlQ?o)>WZ94J^4{z{k8%kUU2vYGGXCELLm5?~gPq~3#cQqc%7;BX=@;D7#te-3 zmQxkIxybhL2{K(dDZ9Jn@jPRCXW|P%ZjjwfujBVytN5G6QPN6R==m0wT}0SJ@gqqz zv04|(J&&DPb#B8y8iRp#_m+g1??0#Z7l=64P?7Laj_ymC*akx^5 zNHrdz-NJRCA+6{~m>L~n4;=wQU>;tV)jLN%#{cZ+SC^omvhJpQ*Z6DY<||J+QG*6* z*YA{{X7|cd{l$Ru3%^6>ky#qycw@1~xsfUe-=iWQ76U`MOIHG~8ELUxSVkxIeI>P^ z^-1#8jwBkZtlcgqvl1NX#p;!2x^<2SE~40{z2&5D@T|>;@4qH17|B%e`7Ta^G3p|(XuNRCr32+=*d1{Ak?9)v zcUkCIicYr4KUCobbLSr65c;v%uJrCr87oOwe|2k`R6Txuou4U@6oE;y2*LeVi$MK_ z#;Jq3>w_aZmxKpa{s(RE9n^IDuKQv`L=mNTq^mUPN>!02B`>HT1c*u}geE0GNE8L6 zHvs_w=^{;PD1p#IFVZE{geE1BPy>X#JHNBmd(NJ<_nfuXoU{MPe1{BS^38<%d7kUO z?$0&I$57t%Ir%;f5w4e0KK~V;C&9KIpF0!j;h7tGm+_yY!+-U9pP8NV3Q0VQ#u4i> zab-c6^j2i-?x6FT{Thffjwgu3P!YD$X|#nv^95g=~4qZROMq$1rvB)D63Cj zctS}-_4cK#FT3CEEwd~WzHB*oS4xGx_#7`U*@1QjyyHPf!iAvyz%dvL%qAGGaefRi zqVzt<;|g&y@2bhA={2QU!Ov;l57MKCT0jEXC^aoDX!~l&* zizsBYvmv8Zw_m93BY94aAumSNKCu_x=ZVjbzCLe45A_qLfPtGi8nW)RC{~-s6PJn_ zOX>&@|4t06HjgrrBU7eAHXKJ1jc-?6ujT6CtTPka`*1$C2`j|yaF;JjvbmN74~4nu z)YTSQ^By~f{L=B~4bIg^1{4c05A{|aol9)uz28=m={x-1YO<1)_cY3IjAEQmR_(wT z;``HNYt`sSMeEPy`_+c2&!vebw>W$$HB95y)h|X}B5rn_(8O(l_0%lZEGEGYo}*75 z<+WWcwf6u_3s~%QZj6Kc*W(Q?`8&_1!(QG-&lOT-$yX1&tLA%MJX4ZfAb-=*gK}d| zY?>i<^XmUiupE?g>j1~h1y$%wT8HFq243#ymsV`ED8Mmxg)_4cw+P%i7(C zu`gTv?3#m`2|pGrt$}iPt93^bRp05}&K!S`SAtxsm{oH3=d7i1il6oEdqkhmClJ$_ zd5glAtHx@}3{nlpe7=lq`&!D#JO%drfYt8FTw#F?z7v$9u&C158GqB=EY%eIgY>UU zvB18Vh*@s*rKT3=iVY?YKvddXP(H%_yYKag%^ys4&^X127qYaEo-^ujvAn_!y7pmMgWRinnn=VPWj%>2DlU6d5vp@zojlGID)2 zmxxBVfasxWxYTfz&#vu8qh`xwhe?#lJEhKcH`ok+s_VB0-kp#(zDl}1G{2_crs`UZ zIzRptaAh9aa+OJQ2hjYBMgUfm{2U-&(-86^c{So`{-kTU%5Ft_{Utymxtb>S6nGHd z8jaO}ZaS-A%gR646=ZU3>IV$4M0GYaf`jsc81GhJoNJM{g#0>xr8su}2+;v*IRrw2 z=X?BuXd0Mo<5Zx=RO;Hvk+h>As6Au@76QW zB<-I(M-kf-ug3>t%A#)$n4K49W@?+3MrVB=s8%1Pxj=us(gjbH!i8w;8&(`qavdT) zZp8@Z#3nbzRnJr#ujpMLRY<*~aA*J12!G-!C@ZE8Rfgy{H8gC=PAL|p(9RqtnU((^ z0CW3I{jURDG@4jS?kd{He!CfY?6G*cm1e>LblF~pSag{Exkpyfs~;;ru=G`IhyvE% zo3ZMUt}&GSVz(>(;hxW@oma|8DSfbVK`mPE*fMZ_a^q`F%+$dd_)oClk_Wd7f*B@K zLc?{ZmF<_w%x$~#-)MrEjy(4sFrmAUsL$RFMJ#<55E@14uj80mt)L$s?iw$IZr`e- zjyRoccjVK88{c-rEnMWS7Rx~FPID{E)YwB*p7+F^!2RPk{|L&DQu4;dd| zb#PHr(;Myq0B2eapWL*_`+7e2p0>8K^d)uQr32!nwGWCt`~5Mto1LmCD|ORWoRQ>; zfmBjmV6Lc9H?e%ED6eknjC0*%{R+qgLZBkb+G+i=*Z(*Um6ozO7oeK$SEHD(Y{F$z zER}g}u3;lYxO)9m6x!dzqXD&^Q|Owk6m_NoXons#H4+8DZT533UD;7A(U*J8W?!1+ znd02*aCb5PvF@?SIj*6Fu6mOq-1~fMihKq>B?kV~*2UED&3QtHh`dcd%e`6Eu)cT( zdX&jd2Z(iF>!T(;*`HQA^-PJBy6U1&tnDgTH?tY5fU;#5uZ*Y+uMezJ-X>{krXiH! zW+$MUhp+pL2Bld;)-SlJK`Iqb<=s0r#$n1!`M#QQX?|$(RqEE z#rN7?&#gUMUpxpa_>hQ>kx}H0k$qWTQ`uBKcAx>D=ZeiPoU?(Psw>FF8#JJL8uaHZ z2938G$}&%Zc-YJOc)~>~&f6xr&Z@L$Chfa;t6ktsQxk0PrxQ5@g>)7Ym_r8+hmmHvEeI7LN z5$hGBk71dY2YF)aCD_3a$^DDW9In4y0wrQ$Dy185Q(uA+tqV6cuJHI^u;224X*p}jY3rO{ob8w z8vG%@;>Z$hR*+wqs^d$;BNlnI{5i4f$i7JtTjIAc9aCgxb8cKMD6H$>)KK{=3=cLR zlDjLy5`HW0%XJ-}C!rUU-aQbVdJiAAy+I29pl+axBCwx1Lh zS3vy%JR&a2Z@3&A!EX<}M2)>fO+WiSy8UL6UEQ8!>w-hjL8}%HgUNOt0gE0ye{3~~ z_Hye#V=kMGT*5O~Gpr_CxvbO$`Q!8v^XD&Iy!5C@QpT6xYz3J=k?LuQYNKl`pY1kkFUe)-^HbXPl6l-O@3%pJJ#m# zLmBP3ywFkDUHlt!)e1W(35hluzWcNH^0wqxzj6SM^P{>cQEky9%ttMlft+DO{Ur~= z$`nQ&+BY=^I3*2#W>)F7{E~Gp>2>R+P@U7K9r!XG6cck8>PjiO!Dr{DCD_7y9T^Cd zWHaA3lsI`YF4}Zqmv9OC;B}YF^z+E|fu#LLbgx8x%FomwWi&5MeBoD#Yf)??E?9=} z+qGj){;*gBt$OiL^NG@1DFGb!C&q^)wA6ki{`|?kyd+-!-@wU%`>ASB3u|&(nKO}Q zaAGi9>a>EaBu~aIL98Ik`ik+Qkp@wBl8dg@`%d$@mp&8;k%lDMQ?kdC@ z8MZT)v)%Y#!pxN@W*Gs*Cxd?gU9VMkztR9F{Av+qLg1YuVPe^ ziOW#8u%zM8Z*HKki8oGaat|VkZ4~Q}SvTWMt23ADTp`eFS5I%0q9TU^HL}n> zuN2a^Te=CmNpcflt_+#MZJk{HkV8v<7=T|Qb``i3ZA{h~J5{PT)jP)CUr`ZTdClJ* zANN!L;qO3ct;4`6Co*S2M?fL32Zysa$RKx}WZ>#fxRtV=r3C)@t@hVD+?S^!iP~vK z?^2{SGIr6_^BIP#3-GD!Y=<=(#YGLYoC?`-3f8E`Uyh`~j;?HGU27Qk`6MgUJfWK^ z@O=d~sO*yG87BUv=L+j+8VC_ABJ>w z4G}6tz;V^gLSjB)Y_A1->^~Ehl=h|7oOk9blJidLBsZG1c+n%W!{2Eb+OYpPHv1Ym zr-WOmp`rC61hQ!lys4Qu(Fr^q+UTdcGaIj|i}31-*@)G_Uw(@B7VMFj%V|ZYtmm|*_EqJJRi7MbC#bcq4hCL{oKf9Tea6v?d`cZ)4u*3?`$-GsG1MD-aoH82Z z|4nBTr!^4#`xGss>OZKg#qp;Rjx2IbcmJSKdWbai#v5x7ve3Onp1qtIhQpJG?~)&! za=$I;!DFi@Za)J!&-h9&q&E*vNEvCA1^`#h4CY*RjLVzTsR==Nu5oEoT>Tgg=laaM zU1H1*$@#$JN0U~ejU^fd z<1G2Vp0d|Ns)Q4ZLgdw^vb44cC7Uz{#ulwZ4awYY#;dJ}WtrIW>7R+}ij|csH?_iji@C{NHQLL2q3hhM>z#r;QaMENm{O=+>%7QRO?NC_ z;^zpVOeo3lR3X3&bJtk?DL236d0p$on3@~sBuU+iFT%S(WWDkg8}3?T#sG&J(2YO6 zCBOBn3VT_Aj?jK5Ms3NwJElWdF2|50wlbMN`NT23%!s=p){^WH;3A$Z`%xrGkJ(GO zhn5@!BgUK5-%{g-I`)VrzHSgbwg8P$_7S1+6BWFF(^>sZXAD`%#wK;eWioTT9t-4>Qb|E~HArwbsZl z31Z~2YiO2Q&?T2FRuTs?IKQ^&FyVmRSa zF-3IZ51{`&tqL~P0jrZF6=S}us0mwgon>=L&|L>l!;}_f$$F@u zC0UlUCW0U_fmh)WB;hel`2<5Z*Kf~Fed{&}oVrAuTFj0@T}-tSn~G9@P(r@Dx?@_O zh=D06@)YO)oU84~ac4zNOpNPwNraCE*V~xTc3?IOJ`d zWaYW!U^m+*M{h?BGj%5Bdv!NHR3B)czV`Utqq!*o$wK(>yvR=yH5{kmZ=2C6a03v2 zf)11RC{s)VAKn)*yuD<(vL|XO!=t2i?`GJIL=i4K8~*OCemuW-cc)=F`H;2^fBws7 zj{c66qIqrD{nBmGFWe%hhBYO!UuZes8J~Y%dW-opF^N6dQ94>@mL}xzQ1)r#5rT82 zl*0`d=mNg^C<=ZPX3#JqN@DKF<|OM5fzt@?i z#DXnsYG_SRf9jr$j1(@#qLnu-p2hP%U)IDjf?3P)s^FzR3oJgpQ;>7W#pbd7wMyNH=Xc#> z3NzMPS3b}g;4BK190t%G9yT5)0axaBHD{+N(#HygH1+=J|E?td$LBPmWaO#-98?cK zKxL_#s6jBlT<73u3@a;kJ&`^vEO1Jgcz%|a01@-?67O^RG8@otzO>*i+blj-k*gq*hd&>nrzwA^Sfw6_xz_uPpG`G!@w#8I2F0% zaI%0jhUDU2pM9@&B9{nc4d)=*RU{&2Iz$X!I#MTIurz;1D+FYLQrt)Kc2PecQ)Y(e zDxUv>KXBF*@?ey1F3c08h^B|-6M6m_PO)++g+$9^8BK;Fo zo!p9f?U1H66NBwmKtuJSo1LM*Pw3>qJyZ?~eIv~*46*A{Imrr_KFo1CeDI1m|Nbn2 zy;nYh34=Y}J>DbL80@}Mxc0WY!jJ{P+ASQeXI5I2bNPF&|MHQhBwTf7p9iVdGDTs| z1}$ZO^6{8nYqwR%0^af6;hLYx;23{HL-vg2H2I|{-0iN-yj8ax)AqG!p0`R3Fnx-z zUBCdCHd$Qd*QWx*;)<2C#i$9q7?)sPZT3#io4Q+D?{W2?K1*RtbeATpq&3fPFca-M z<{48h!#d1}WfT=v`ig71xHmw{h-!r=ejaB%FP9&7Jb;wSGyEp5wc7->&tIS|rs2^d zNYZeG%-Z<2x>35>m)A`REQ(NqfUbbGS2xcO#b|xomx-=p27~*jTF25Y2=+7ImK0-9 zf1IS{m5Sy7U zW+iKzjS0=D!ovIBS|tYILL*<$>`&R9nJkkS=>ua8F$9xJcrB9&es#}p3JL5Fxaju- z$WS%NZM_>TIYAYHNl&6oY1Fz-I3Gz}fS+U=2ljm>TfRSBH#6+vDYBQG!_ukk5hh{I z94guKO27Qs%d;$)e)ctjCLh$J+Yc)n5*w*#_`5<;tIm~GL{e%LH!k{&}FlJ|J8uR6(J+c3T!R?u+{tA!9j%xx%gCleO~L=R%y+2sbbWZmjOpA>CUKA zb7}0w#D}6W>Cg`P*ncAm_#FP1hyoZ%7s2GyKXVH!o73DIsS4EEr}IGzBd&fbDpy=Q zq;~oAcsT9;(5R+gHR-8i>D`?OZgODvi(p1DGn>nptFmAE^Jij{vHg6UH-U3%J^cy> zkhgppBX*3BY>IA8jmmdb>yjG7^e$NfJi%QpobX{4_&FNB)!kQ+EyiD;n3d_3Pj+qd zq5c2%x_Bt)W!D(4qO*nbO}k1)BHWWkX|2vYp(T z956Z3<;YvrHo6<_bdWOsL#eSQDKE);Lu^gu`hOH?xx&b$%TaJ)y(IuM(6xnaoMM=- z4fcIq{`>cAVVe3g^^u=Jerfqke04|$FBx7&xOKc5;3h>9O1cLEFFg#B;{FsaIS?jer9jYvz+EyKzAPRSA0nCj#XFklGn+R zcU6ekn&#M@l`F_rv^;pACK%zqUHP!xpL+NFv7Z4ow>8Z<3_h5Rq-C)YnIYrQZXnpdOE zr#_#UQj8<;=tuSX2jL|jl;`Rvziro3)Htd1q~m9T8X;JqZ5&EsANV5TmJ zJ}QY<`F#JYBpa=1u@_v|x?X?f^StDETzc%^l1;ukHKLr^h@DZ;>Gwr!z9<9yO=%k3N+Icr-tfa8zZrR*6D88OE_fo2;?~S)&9Q$l=d^PAWAs ztW)i$RFt#+0^D62Jb!zcgLs9lYx$#^r3%lVq_{d1{3^8sA!*}J84wvL#)8<h0|bD}b6)qQ0PPB0U$g0;1pM@;d|= z!Ax^xcUoyCF^8f|^JAX3OYhX)mNs8F`em$s@yj+p8k(}V#OP_)0hw*ei#v2`iNo~X zC)xZY{(zQI-ZTdOA&>N zZM4kz~~rbs>Mm5%KXj*7!g-%i~cC+t^VHR37>{V?ax~HfZ2A0eTJsZl)K$?n@Tc1*3_j0 zn21DNt=y-xpGu%elbL)M0iUql(H;jH*MWq`!&}~MxaJF|!jg#&EWA;*dn)#rbZN*3(#D!JNGc&M*MMYM<4IcP7r-cD|4i1b zsXFoGQndkN+iH8&!vQ?gkB9Me9S=l43^;?TC!3hJNUw=`)}+wb94EKB6c$$=zsSR| zCF6*^J?TuI4N%r|J>B?wTuGnEE4>ml0tcZVoGA)eiNbKY55~(hiX7LwazVnK(L3P<+LQ0M6`TNtWy({Ry3G!MK16ZGsCfV?`t6;a+cfb&K!R$C%$g+OkVXcf!a{tF)t8d?zNC#(fFQ zFEXquCuNr_rw(p^YgF#KM~94E7mn%E{Vw)m_RB?YNj*Gh>&Rg#2CP4>*k2;ZTw6M7 z{1$1%(kogaYU2LdH$lCma+$#TGd?t-Yb^b*ptn2z2;Dr7M1Lf3zj{1<BDlp$C~#tXc|4p?8c+T`m$2o2>#r)YCt$R&ET>S z+&NzI)e2WEgG7ZVhD(T%y!f-{^6c~GNGpW_I6ZU;V=JS1e~e;_Rc|Z`_dOg)py1fu{1A9p8(4z-*1U^Or3MBZc@K+(=sW4}B@(d4bfAn26m>>!_e^qRZp zg>sVz>}?cYvdxlUc$P3JEC)~HB*2C1Nc_hW_Q)}H4dt&y>n}Os;X%`FarRetlZWso zTd-QEPBgQ6Km|+?MMQya=GBsH#v0L)h{|_o9Y!y^ZbdpDX5apVEPWrQ62i+R2G>8T z@z``gbnTdAoUi)aWHU3my%3sOk+6JHK`_KaSld$$EnIR{BDPVMzPKnG_hDmHeciT}bEbRORGU@e?4J{KxUcM8$wBv-r%gy?{g*i| zrx?Q?cq=hCRc@Bkg^Mfo8k2`aYurtmCGQT7_cdONX?#i(j1Tn+G-zh%wYS2rQDO%Sd>>NIHK#=K#Gzse_vu_W=JlBBnuxzf`w z`FqVj8tm!qrbw**m|h41uGu&GG&T86ADK%8D>qXuAe60A)RdzuOrIPPfICxb(-a#M ztB=xYoFXXBJs#Ta{g&wLzI>_(se8_%+p8FlcKh!3A;C7=R^z{k22eCOX$rJbrnvLW z1;SMj1tF2Q^Y79219bn>BHug^+%$2R3-0a_{J(!$k8(@3n?e+bw_avSEp*W8m!&XhO@8ffJdKOj!kwGcT6=efXpbX5 z^o=`S)B+JJDrk$^zrDRUvK~Zh?MvRjzsVbjvInWYM}oHy?H5WB!{<)^rVG7Ir2mOn z`PW})X`$_){KlvOku))}?0ImeS<6l3Z)ps~>Q1^L4UqLVcn6mH+VlLu>jMvFV@31;w4?>#}D- zy?@i?h5#?`fmTDIuXopPU@*y5!jWPOEx)P3JHntB(6%w7?(5AI>Wwo+&hP2a{>=r9-ybn-9+Qw>PX3-{je;{UU^fx3X66hz3kp z7GVX0B$2M`DA`kKvbWYp+nY4jMm`#4%zT7XgwU$B5%Tm={prJ3b$fa!J(+6`yB5wO zmzB(22lu_u8Rp*mXh?F;2w_4kP?@k0=kpl1_V~!a)di3#Dg%1afjkFzxdLY56$P=~wH0PjABqQq2QH zF;)FHq}m@W#p)RRQmDLox8AJt-IY?X>^u1Ru?h-*um|qX=uZ1ppGMIxydzkr!K^yg z`uP0^0YOjxO0%(*D!;%AlM)iLZ?hOubVsEcxzI;5=7BN*IcsPp_&w+;CNMm-^uV7u zR}Eci-y=soN$l$K=K%^Kg?W>~ar+5%+X~IsxNmn{xD|t#mf!12uV^o^(ZHO>^T4Ta``$+%+cVK84bLFCJ; zMBP}Xp{>p*Kdz{+MNZW3n=cHPAZJS}nkuvEruzE}(Q(GHw37e*e?h$bgL?Uo*JIhG zidc31Y%X;>Nt)lHg_+14URv9f5iEgW{S)pMY>SVZ)XpcuixG}o-_WfNmt#8$ttfbt`sz3J`U$Lg;~EI;>*_9!F!{0K1npJe`G z=$J_jISS8z*^neXSK~*b4S$QNx-}+Oem?dxbg%9{{2rY_Oq49dw_ZznNfoIfU&yC8 zHaCw`Z%spKsGT`Xu8m7PMZ0mVApQ9Z4pOfjKVAe@@G!bdoBoeK-{%_70}s;7a4y^m z^x{n+YLej57CdLQS6z3s=nKMX>5;Suc8eLjfCwJ<%=~FEEnQErLk?`JCpO*>zU@+8 z))Drg;U1Hig_qQCOS-mjon>{*L>q4mwizq$H0@#3vv=>*Rd47JtS-cR zUk9G^mc!bB(br)%BniVYP$!Rg-clSb2Y9eN_4<#n!;viUNL{dRaHpG}plZBA7PI1@ zPM(y*Fb-PrwISKvbg8hdkDE1DDTL~1DnaWKAP@wZbSwOBUnM{G8s zi=Hs!A^tNvXVPOfL(l$yAgHnTkYyKlLQxk=tzEIkwUqSFzk*Wd~#SfML!Qr9B_2b!s}Do2lOAMXt-|wpX4RfY^{ziIAAGtAW1ckrrO%ZM6|Gn zQ$=P&`I*hcWCnP2R*!Hcpd5@~MX@e!(oZ3(WfkYNpVcFYhgCiq84DDbHy)B>-YQM`a?2e^Do+ffQ78E>b0xc=N0r%XSLVcNV= zs2ln40v+APrJ$WI#C4cLJ@GJErPElq|KXUM!>*gJWSO_I(31b-Fwq8sH!xKPOBGye z8+}9eC(ZL@ZkZh187oG$JT+el^O=LkBYXzK>M}WX*yF_pdSm!8c17)yy=UOt(Hu#c z7tKef3k!}Zu>@c2cvf+*&&>Ae#KDBs$;+i$#3x%}vJ2_yrcHATY7(XKz57;(i+D3o z!kk7%9S|@qHMr(AG^kN!m>{$l9rP9iXp^_r=BsRMR1GuNPNvObFnd{RMWVD8Wh0Kt zaK=wJMo-N~Erx>SJDvxDn_kW_-q^etCEWFA<|(qLm4-za_^)9R%xXUe`oPVDj-~*Z zwCMtPbFe$w5Z%V_utk74srQ8G{W}vF0}DUB4;{u-lgDt$hsD9=Habh0=XcD;uNRk` zRZVV~G8MFDO)_msjY?zz5z;R%(B+4CP8{+F=(X=Kz{EySkP3svpk|q!PCqq*M=3$6 zuo)t4pELk9n=A0GHjj6g>#7WUop^;UCx0fG=6jw4)iz-#WW(3a5Utx*X9chqKcJF} z9#Ou*9E6%P6G{6>-U+(e%EafN2ILv~E=$SFC0D4D9~Eu#WJl0?q3RPAFpEML^Fu4h zLE{26(s0nEw=n2)f*DzV_fi7H?laRd;K93@UqNhI*HA*aWH5#YxE>$F461KJPu^-y zTM6_3ipW<8i1sTvccjbacfKCY@F)9nX$u!La;Kw3qSoVzEp5}t3SzM&x8N;RZaqJD zAm{C19C`C0OHAGh*JXAlTlsxzi^0~(p^hKTT0Z61=<{b-v=T)suQ-*4$xqho+yHZ0 zkk-eP20L6;4Al&4zHF~HaI-Kxejqp7s4c#w7Jcmy&88`;M~lLlPU;enu8nb10$g8u zUiIm?#LMF?WM#H(HNR4~XIt6f;oaU_*zM9QTc`^;FbT2~aj4t)*TIY|`BClI@=N|_ zomCsksc&9jg^`J?0q4xQJH6`05+C*|?y$fVh&1aU>-DH$>Fu;{3t)2=lifLh-%;v$ z=al(7s~H0b>Gb<{9rw;XE`06<)NtIHDDgLIq%``@^n`+S$`bweKvTWJYsKxWFYWwn z3O=*6e0{bV_pe}$v86e^c2><2&1{!%n{t~hGczGfOL}Zkd`4|yD*h$_nuQA_petNk!mjsAJ-MNnD3|-Kl zyq>+PSNPky!!u-eXctP{@!UMEtGcyfLXzcGhRv7R6~V;DsfvS&nzzPkL7(azR;b;n zomDo5Z@M(%M9rDhM45iZ+Xz1kd!YVx+ZlbH!i-zr@2xpw-P(0QJDf@em;3Ef2iZm* z9erJMwH|md-sh<*LYQsM}Ev^|d*wj3-U-kB)cZ8YCX9AAcCqF9k+V_=WI zy9g*GcI%KX`u*#c5L>+cycvotTcS3`l9*twz7JB2C5iI%4=e1^nFKd?@mK}Pf5%L} zw~h;;UMB-vRWV&<`7@y2V&lux@QEL;N9j=*dcyp3-BnBV??>8X;*#}-YD~exC&kXmot^!XUTK-Ejpeh#H`Th@ zg*MaBTcJylyE-I949tKCK6EFpX1@L5IyEmf20JrZqq1Dt?~fW5?li1}JmN4|y4D?} z{;CIae(hRkP*`daOl^$H&_=SWraD5{g#kgJ^?9ZDc%Lmf~ zi+;(tfqG9Le(i+!5kzV_j*h*{IFko`(`Dl*dc-DtT^SfJS<-)|_PilMxM^I*L+6{R z&s(h&B68aN{%QzcC))9CZMuUaCeWgjpA}~ZdqN~EL?4t$dawW#LDQ-x#rQj~!Zw^2 zG4PkOKkYtJsd`zCpoT<*yoJx#^ef zSmYvKJ^y^!1#{cd`_T9dD~m>Fv>aY>P2Wykpn?j+gN)L(R3+lximymET<3DC*R1XN z17bEwA}L@rJ`ejf~0_u}P!pnOHl@m15ZH=Cyl-#Oa? zeA#&|HYohaFoo7l0>rMIuN_MBJ47;^yS$vtU@yVdb89huFFS;W-T>yhW(iMd^3m+b zfvfn%!t%`}-Z^(Uuh5HqN!EQO$|5gj9|r&6ypM03Q@{bj_O;F#EDp-gpBnz+>w`7% zn{8bEDF1Y{!SM5EqmNB;ef6#Px%f;KS4`wK`%+5`Bq>Li@|UOfRQ_am>;MIHh+|JC zTSTcnRL1yuFwbg>lwGDJ#WFbGI1rZx$VSi~x2#LVPv3GH4_HmWXA8JwT_9>jR8X|P zZ+hgT>aag7ZRT#eh}9JcTWgEBz4XY8moC~#v;MsI#*)9%HbUhi!fE&%MQNk z`J>RdV_4TN;?vaRCj035#F-ACshlbINhZd6x-hY~=>+wA@arJT{Sg>1j^NbcugN+w z`O>GvM&+d*XK5lw!nb761<@$+mp#$eS_3BAt{JGWOLIz_v}EnfsW(g>4w1v=-kVP8A`x-yOVBqz}-qg3Q7_p)P_<%OC=y0NEM>!lWv z13G~(ctmNUbl-?MMR5N5k?|pNNp_E>(V6|zR;Bx=y&Ca933mSocmLV-KN0p~J$X1U zo>$Idf>kdDgC74vEM{kMH%!53zG{XVL1MJTbj6ru($ge@u()%wM6CXdmxJJKkBo~q zbajHcm(aZHTGwF`-^p>nG~AfmUkb;jX`+6P3<W%_L{7kr z7o*CwFzLp~4t_mDRK2k;45AgjB~y{2pCU#bsg1P3{MBPF+*j`J1Rt#3m)^k$(z~?W z{DAS#lKE8zlhsr&a}R#XR+S|BzRr8*y8f+KhvgpK=o?j62o>qfx{Z$PoB68=HJa5X zU5d-He4{N?-x$$5z``P;w|G4tU(`r$bB7BVFaa)%f3cYZk#exeR1_Mv)_G?N{T^zt454`scD5EqxqL29$u;~w!T0|1Tp*x4M zD%!-%)K3)YyjilWb>gnGz~MZ<;j;-Ww}>)PyI$Y)N$F^BTxQ6(A-?E^a6xt%4_e|y zQ+qY}$Hr!-95ktuxM^iKk4nlnJ*$~1=wpq~ffYt;U3&4P`6Q{80E<_Cn-FDr)f zC65{djvXbDw&Q7K%|&?&J0~v1#51fdIyiN%fMz^8uy0Y>J*_=GXldU0)8coQOtDA8 zA55xiNkmw5Ppuba^jEO1!=)N0-10rNoDE7>&gL^c-zHqMwD~co{2pYqkU#ozR=Vk8 zz4LF~?rOUa$CHBLnj&PwF4gk9LT7>7Bff)%#*oy>K=&M`&dIW`Z=UOlU4JBak1z61 z1mhJ6I&Xsl#Uf*!!-bs*KOVn~#2w^Z1Bsd&ruBPmacdjj*P%PE=tR0Wb>p~lx`xHA zJ_UDww$kQyfWkZVI?;Dm_~W*YmY8H)7nrCLhUqm7U_i%~cA>Nd_UiI!bF1jRIz9nEc)56uBM&hnMO#=Q8rj*6SE)LUZ| zCEV-$4ZqFOjl(F7uNLwK_S9#RRo?>$c})>l9`xU%OR71hd0e6?{CU)2>N&F35~bG8{R&XX1F*HaI`u^uw;^n%l+WmMU@*$#D!fv6@gM9$rdI z)|uc4fcV5P-nYf&guW>Jl2#CNUVQ(ubfV4UFbVcSXw>1C5k)P=^?9jU;#7q14*l%9 z*CwZc@R+m^7|3hUD|@&1?$f95EE5&|Ovj(=_F6&^RD)@>`2n`KY`f2T3Q&gT=ZM} z{7f8c8O=86KI|ay^(gc=a(ZN5C;GhK_t`BII*I5tSA0sZMq?ll(6u1l$i05(Pm%RS zw9|q+PtsLNB&V&Vu5rBm2V+LgJG8WvK?M+VyM~`5(c>sJT*fKjrpw{v8}5BeMk`vt zcR5#T3b1D_%|BGs{npJrL8QUIeYY!Q$^NI0|7hK$XuA|(T>^*F1d3dgs8Qct|9X;w z9jq}a7M;Y1Ix=4E^19oFvA$=98VyT=_=585j#c1gGE;X!%m;G_!^%PGF9GpNa^|TT z%=klll3`h0`kqT*a&x|XK*qOZrj>fLZwT7DVJ5mpcbvaV0ZY9o@ z4295zAxCV{K&#LP0`_-UdHSZqQL|>?7IM!)BNdEp(%an&((EB-hXraTk)jT@iNV96 z2mKmP*B*_!Z}}?Z&+EMv_3Q~Ps&Qr)4C*Pq@mqkK?pA43R-nfX*yBcW>Zu6qamwn3 zhM|SmS*F8VJ74y}Qe;&r{e-kb?Co6^mpr2zmZExQ1fOilo$Kf%_WnwiL8V+wf?XD;fMJide$xf-%ODaMV}SSape3D+TJ{x?f>8R)pTRkMiHOobFRPeoA_ zt$B*n7-LfNJkL=gF%?B(3?ko~-@g0ad)8U|?sN7z>-=fu4=XF{^ZC3#uh;YOcosUh z1+pJbwH_$v#SAnBI6rgmu#D})J-CjMSz}wuVW&9avkW?EY1WqulA{{g2Ksk*4986gFxF;5Zua?}y> z6%niqQ&X=}%tb#BO1yY1MfXhNuV8nC3`a(V3pwSK}-`tPZAGX zYzbKx08=%+=<&i0g;8rDAVjSv|J9!vwb+mwjQ%volv4|RySgl9WOdZLKT}fnupclf zf9hnTz#TYr#cHsaq9x{kWZki$}hrftXXUHH<{z*={+#lvs>N5lunz|r}r-_ z<8H_8$_zbR)3_j?bF+0xfNo)UiHX%kWd9Gs9a;iTGd0-_MCwmuFUjcMF-)rq{wTOC zw!8Iv4*fOZFmgnQ^oO{1X#G=S&IED}ru8=FRK8X?wg;l=pY29GXVs-4?I(`$WgB%8 z;DVha3|cTsKO+r?A7tr!Y_g^1l+6vxg|R*id*-1(dHdUqa~G!`SQAhtIsJM_EyOOk zAkMM8AZTizTz+PVff#Ojc4*kcxg>G%-U&>-1Y^>{iSz32X2%IAMyeJl#xsm%0V~6O zi!6DUHQ7yFP=N2Iv@y33b2a;7jxX@1roxN^7g?$Z$6U%W}q0I~7cfvD31tMIJC<)*D8H$TT#1hhSf z=Pa>V*q;zEsH!uzKhJViUNz3a3?EKkPvUJe`5bmbiY-EI=LmV53@2bBtnZwCRlmHE zrop*Ce16`~XG{n+34u!3o9&qVqA5Q@XbyY4Xm9LCN~|p+kP*)md5F@uwo0l?SUfF} zfVzn`*Nn@_MO8ov{xgz8ydA*>oL{%yF4$Enm({^}_W&_(!l%r> z%>PVz5`Qzg^JV!!F{{j_uXN5sriaO?X)Dlv-xEyJGP6aOxYcw;|H;mHLl|**N$@#Q zvrS!`9Pz%84#N7?^3I_J7tB)lGvKgQ?M6{#OIZJ`9GJ?TXOFt$caVT$T zhpa7syx*M&Q+BE=-0P63dupLYi}UFpnmm5nUIIHr`jZ2ffYMR?mxxEv`!jBm`)6)# zj9LrgwZ`d!DHn?0m;ZXeAbU>m(V#o-7Tifv03VO?Egb#36`0Qj}c>ah)a@n0ol^8NNYRI(H8o#7DSuZe{ECY=t`#BSBOULl~>*Gs92FLjE$x#&VO zz`D=pXbA{O->E}fBLqABP&YsUS&)ZZK81q)XMI%YL7Yz0Xz>X(`Ad{H;5zn^1uJtY z-%f_|O`dF-aG82u8mn9mk8KPpE2Z2ab*6?3L0r}}Tf#e@zF+kT?yw0wVve32SG$w% zM7=ZRJ*OueV>+dl80g!OUdSxcla}bO0M;F~cu*TX_wmfcl9y(dbg#bjdE}Z-cKR5(Z3oMO;PLm;X_)zK$B- z&H+@jkIv{Gew=EA{gN3VYDryiYiD|3Zz`ycpe;5cxg$CRLJdkOlC@}(#YP%vc1t}` zNMZWbFA=ATm542m$Jy^LCYpLOWr97fsQg36psutIzg4ST^JgCbZO{1lGt{K6Zu)k# zVsqu!pYa3@Dc#Fsrk6mj(K^3l60IVlYv&SZga*f9qevb>Jo50GD% zN$hg!`v;6(#;{&7sekBHd+TxrYfg@Q8^VNgapGI#=R(xwjP+TI?tYC6u%IB0pg|30 zBt%Uh5Z^Th1C*Qp!2(XPm@IF}y-tX0?zI9gmS%TFrJ)z8ps$;-$7FF@!k^}9l9yE2 zQo}JiPvu}y5BnH=_u4&B*Ca-d{f#(ebyfSS3PmQ86_`yfit`LWaj-bKc zUJKISRMn4my|$h_ou@x;U(#b?4S!hXUVYRu(PHEUo;unaU$c1r;?x>IS*hN4=^r}W zB~So_5=tO32A}oYv)8}Eiaf3#Keag&Z}T3q^Yu4?p;DVgmmaA3zHgJ;EjHwFFESq- zMGhv!X|#P<3#I8T?$Y-_yO7)vw_oT6t;gA==}!`*#EAj{_lyj|LpJo6zI)@!HFUzBqn^;ROXLkMuf$} z%tqa-dG(&6jInL;l&HBYPH=ID_wG%P$}&iGO(emtAjaxUU7{8eulL8>@6u#*^}3=; z&g^*^e}v+oaPj*UI@vm(9aT*TTf?aiOm9M+874FV0~A`Vs{~GPq_X26)3(kMh`j7w zPN58A^LN6pg=rVGS>DPrp+4svb;JS{h30`hpL&@Zq!ik2DhY-VH0YjR2fE0dWhmQa zEU1>H#ym)S+dlmMr5!^~YVNugG~WtWx`Lr%v-VMgsmaN)0qaLND!s|n+8zAPry!{@ zb?#ebe!iVY{6cl4!XFjuzTCpp=*EKFfvWqzReIiZ#z<6&ccbVU?&v?Xn!@0bk*1jI zXiktzhAenNhDfb+EZt~yNRzB#(JxKvBFOIW^1o#%V{Uns0x9CGAO+pAGKZ-VyJ~4w zT^;I*nMO6J6r1s->ge^*Hi5yk!3<8rN55cp_$TFD7-v3~r5D7ou9)weXu%1!S11Gf ze6wx{;~2dIy@$~0OOh6X=W~H(s;H_a9RMP<-k)4<8AKL_o{56=Q)K}FPq=K_yg2H( zDX6Gq{Ych&251Rzmj6`MRdJYhiQ#jS?Ca=B~OKaZRPiaZc>cg&HtZ z58|koxA=53oo41cr_uRH9SYHw_MWAI(6wZ_og?Uv?JuTZ$*N2F)+(9CfugP1_tIXB zyCMJDPg?UB2X9LXCn?{JQy*PzaERTq;UE7@FK%~THfYR3A*G0%WWem_*_?QiQTRBC(O@`0<{^&8HTJ_l3`qIL!X3_3jCI$qx_T&s9( ztw}T`1l5x0yK`3 z{Dtp-bj_dt4{BhTzR_5@@U)zpwc=mBq{EV2_+?UuIOP%|O9C9-^)ZP8$n{} zIj*nk13L{2El~*?jx9m(XzTkEHMpyA7qH59Q~t(@LHzTTM#f8bfKJ<=%X&nf_Ped! z%q!0|HoYy)pLzNW<}r!ps7SQO$M8t%nIs){9tgtta;tu39E!hL78Kj#Tj_ZFjW0>2 z=P8ro5(X>XVSS5a8jTDlo?$-0qzQb|?b8|dziD||7wgPc*Pu~`+*gqtTbiy8q6>GD z*Q`6Gqop3<)&$NPI<3Hg^T})g!_}=5jZwvl)H#o7+L{#M*S=H~=DjHpcGKwtI;J$y zb7}CYmHKPm{uNjP2@-M z`c$^7dxDgnpwgG#2qnWqU#ymH-|`K2F=>a)*|M{Bc>5d}3<<*5uEi3?YceRA<>P#FK7CBA_V<)eW*9d$(hyKL0*tOI?tAH-Wy4B8FPRyv> zAGi5}*uKgwM;*xq!S{}0N8x|t#}9+Z2w#$6`-hH<*hcS#A9)*qqF#8u7RdsnHPY#R zspEB%>TN%_yTbG`hKcnD2-Alt`X_rIPSa?kUpNnFLB!-m*5Ug44sZp74{-9-_^&G; zFL~0yAASyU;J*S5VLKnabJ)oW!+P;h7E3>uQ-CJibCq3zv#AMXk7Dfqd9kO-@S}AL z?Lz2!WJ-&+5~suWRnO?3qbJ|ix1<@9*qWymM!I?Hh}t)!#RT8&4*Ooej0@w5V*FJu zrmjXtnG@ly|Il%+vo}hnMiCG%+5waMn=>M`RIsQ=(p6z|W}`bM?Xq=#i$$&TW-XqbwGu9RcZXKn4asQ?@Lj%d@h~gdS zTcot$Ib-5PBZE5Q=(gNui-{FL>fO{fgTmC$eh5q7Dkml8?Ev3h)fq-azd(tb8_U>& z`La^HoK*M7xnz(MjPa~UJ|U9&ka!SZdcr!ze{Z~S!gWioxKIzlm#QRn7uD#SnCg&( zT~$`h?P3jS(tlFG6t{agz3p=b8Ke5}qS@e^U}~Bw?N<3e?|uBNN95?_p3I3>)3skb z#V`TRe*CapN3%~HL~cuMziHbBmc>oe%N56oTfMAYwPcLS`iKvTB=?gp8-(RBz@88y zC8l3D{`|-T)((u3+wSLmG4L=r}>{kc6 zx}i8#cOq3qJ5on)T8-~-7LM-_0`vmPWGjyg$~)FQYX;>Kc(v{V?}UBm?u19}t%I(} zC-C~Y5bR=}+D$m#nyr#tDVIXJbNd%7igz|kl(Z_{i5vvioyxPG$xSyl)}9<&cStVv zH@38RkMGPC<+#OkTWf&mtZB~ugqQV`+s&F~oq3(SH(1-yMoGP@j-_{>4ZyGeU9apB zjR?a1)zW|a_g$@crw-u9u3aHzn_d3np}Bs0crS8V4;%nAeBs-YNApHu zDyCZYv1(#?c!Y}lY8Vt@U0b8@f%?P7lMR9X*6X78GV!KbhqKXbonOD+zo+Z; zgLdFTvlXY+rD-|XqKH~Pt=&zFF=;P=2cI>k7g$^mF4U7YB`KR3Wt@sn9JuuW$KM@^ z6fht=8s%6LS5r!?%z6v0tSz3qOU8CfyKM(&D5#g}URk#eAYgdOsP-JbW=8x0gx%n( zU!1g{ox(T|c_rj`f4$ynWLt7Ds$Z0^(@TE?rG^4vXfKNy>kHG>%8SVb{d^gEC#Pp} z*tO5y#48;+#Ok{>>OASL??`3jujJ}e0Arb1`oaMu44K(%8JG(QYyPgla2&C13=PYn zQ?tbfdbV=-X7k05pQ@hHZzV|%Hi#VtFzWLpG%jZ$J+G6#?ez38Xy?oRsR1T+MC_j(e%6zOuNiiZB^r32h3VY8ZP^ zjB3)SCZwatsEWRM38S;@<~vJFm;I6gC2InqkIDY_gTwJW_#%BLF%H5ePFPv(S64gV)2d2?q~|DBl`7fk_(8D2h(p3)<&->A z)iK9x_jn7IF~wWpq~Zn}CpqvSX@o;`apMI>ntA ze8IkSN*Y)&o++B*zXY(}9&Z1DiIQ-m$jJ?I)mS+&s*CgND46<82qrmECtYVz>OhJ0 zU8M&}v+8)FVV5tC8O=l%?X<`@A#PYK#5ouL?sh8wS)SBZ(R$(G%?8_#x=P#NL3MeU z3rXOky4bAKI^xd)GcoPZPBD#pp&D1sQ`lh8OTWOfFYT=S3&{*FXAsy9A zHJ~y!&hJz`EF3HF+p?z}#B}#j1Is}VDVIo!@wK}95QY8@g}jd5LX9YSRX(jYzRTus zWg^n?t_%L9iF74b&2Y>SN|zg%qHq_z<1}ctrAU}SKHPb*cwT<~RL+E+uEG4>1NjO4 zVtye__tOc}M#gHU*lEwD1U|p&b^%T*zq7({t{&}8k>&TWtsE304l1vl{cU+g_gU@( zOvCG%u8!qhrSgP}6!*yrs^DfeH%UKRf?p9@X<@OR26BFK7lg;U2;J=`myRuY(AjlEOK(B@Neq&&^hmn}y8qGGqn9t?4m!2WPw z(Urr`^<1b}d9JNKh5*uffjIsZkfqVP@02@hGNw|zizEie9!f}Z+Wu8^Xmn{Kt;O6X zas*Q}3E<+400@tM{e*qg4X&ws*r(r5KG&Du-#t|pgE)G+z!pZ8px(9}L39*p(v36{ z7sU|a_hzZJ7lJ3{_d>i&wPDk!s-ddi!_2yNvzOn+-c5>Fh0Yf$?`Q7ZBqtb?ZRDpa zi9_JC7wZMJ&BWLbao=`?D--iX2NK7wF!A%|$r4v|dn$MqHM$|b-PUpq#No(oIr`J? zF|9Jz1^y7HH)GGfJomJJkgB~vl1mIacoF_P=y~^}>286@X*feOLq>ciTX|#cVV+}w zc_7Ajb}5zM6{&mg4QK9U$NN`>q|R~mEL6iP-^~6&dwlRAS2X#I4YZ|6;zSL`#P_Rr z0uLJ1VKIvdF>i^1vFtb|#}PF?rdWq5TcUKhyf{F^q_ztDC~9TCUCzB*tU+4*>Ne}A zz>uex+ee*gO!-uk#;+DBp1}ume$|>uCjbtEXgxz;=ZmP9q%y0pOBJ84FtokqP2%md zyI2X0z&ImW$@R|aJYz_h)WTgf>D-ttNz?4NLb80dbKdK4f~uiPFIp$-)iXzYC|?KN zeE`#8Dd}=q0U2bP(XJPvok0bNzs7`LTdirRkEZJB4k+Gf1K!UM`fv@Et{FMLdNBoJ z6oAE%uj`&oO>Sh0wJ78Cs2<~M6X(w!Yt@Xpr?ZyL07UO zxBeCBnxI7ayX%j=l^%hkP_!`UA1{zV--6sIu+v&jrW+1J^CrrXZS*`Z->u=u><9yj zXlGRichQ|6-tsSX;nw~&|1?n6%0jhh(kmvgk!{c1dXP6GrN-kq>!y&Ff|tX~<4AQk zvm4-HwM!L#xzgugkg=tJd67UCka~AJ=4L^%RbzEESlCB*MX$56t>Yx2A zz$tZA1|no_d+Dv2m#P*WA`VJ`tc6o=QdH{46HG;j)Gz~`&TI}?G zq-F0OcK>Z!P&|FiEQGU-{BnQAEQvE-d{6_-gjEr^P7eKbYXRq(scvr`hCD{>#L>DP)C`yUB?IN^M(+~!Lze_@IKSzZlWjSe zYgx$gaE!OP9e0x_^2?pL7`nk0KUS^nT(GZnpR+Y?DZtxhM^7#*Nw11Rr~w^rSWr@% zP8f2kD@p{S<$57%pi7uA2{m)+m}DJ@P)on%3t`zdHyBZ^T4-14Y-&;G;%6~5>)AJX zCVIcLHX9g_u{(q!T0FmDArliMgrf zycnnTIuGclK28AtsXG+EnD>gwCGA!Eg6$ox8>HUIju*x+vbK7VWu>%R)-_!S{H%^q zqXDVs>)Mzx;T~v$A+wHy;PR)qUVd;Pj0d8MmyT*?$v7KeLKu_{$ODxq=?K8Vz#b*k zeN{t)t7qsprQz2efamaG?RY}8Fn3{SzCpQ}^VCRD7)3V2zNL0BW!y^Bt9Px(!SfDa zE>lQEk%6?2U=#B|+hY!>Z_id77yhAB7^jS>?cm|<0LRvBAyPI5=@&f*nrPPZguEy8 z0~8(rX`8N_kqEi+C-hCi)k$!sXSva3ziGp~{Qprj^IxA|iKaN8rKrh}q7HagC*%j| zEhj|A7gqNSrsX-`rbs&KvRO=te^I1d#%|_(_RQM9(|J9 ziPtM3A{LV&@y;r(A!$?Af13{Ql7tJ4!lv5|4Ykp7at? zfA>ns&@O8(jJj0FQu_k$q&!5w{e_iK{xa@bj1Y?7pK+yTtEL>>&2a5FBAu*95RDWj zw*(N7EVG+${unI966Gd5EpJBDEEUbrfD<~M-X!Svs8wDT!i zc`tU!Z{R+tgQ>-e4OJ+-tABf}JHY~WX+mJJJEECQs+uvp01qU)c`;gZ#Av>+XECz7H} z@KaMIwD3=D`~q36=Ri=|3ya!o;!lMm!*LAy)5@NLp@@VWN$>TYz!MLSBh1uHxs_T+N0wiC%(s6cwR=0GPhc zK7BJ%F+c-Ie_O{qU~t!oe!FXBtHUBTTU4j3=^rs9vo#nfd9l}>!J}*A&jy$I4Wkg* ztB*RFC&elnYuCWQ<+}9%IWcG-)u3;ks&G(UUCnMb<+Pyqs%NoGY@5+@*Sxx9K=4=j<`?JgYK48dxz%r*Xc;&B7gw@*$S;T?F~#0(A)!mW3$5lheYg!~ z5q~ock_T((UB`9IVeNuC3^^JZzg#)H0jDp>Wky8PUpV%S6UjN}lW!)ytcs=Qz_vmq z%wO%8zuYjhvlR!ay6{VoB0g#09}u4@W{ZqVI!>y|UxZ^ntWU^`U)@1Epz3vYEVS+S zY*E^lXj?@!$glHIhcxn=gGRm4zdKEPVid`iTIe{kba&QY;4vAr_)NhcyI6-A^E>%2K`&s5O;X-816vt`)3*=(?P$T=7AAmNrK)AS)yMt}~hfl5p1O zh^>FZMhXAWp~xz@Ha#d~9+)}HZS{TZ-SH$bu1TZMdQFj^*?M`R)Gk=1$a2WG^Hgm~ zd{5aYrI?%qH<&tbMQo3?gILw^$<)RJ-{nYx81!b=DVf4fNgjWQ$# zY)ss_txuAiV^i$|1RJHSxf78Yb6k^gyn(@&&pr~BI}yn# zSk76dQjhe;(f?aUSR@CTg9PqC%abnu$oCu40ZnR2Y?CY0tWwLKM5_%B#oUp5jmkOwe!#zFyq-^PyaQD$ zV+Uoc-{nd+h#WPY?o;>|0KhyaXDz8YA@mkp5>(id;5w2JD?&~uq)mLf=_e%)R`T-u zs3|dy$F9D4(gVs6x;~?^2WJHFD%yjS?63&Jl@POp?T;-2Ugk! zcdKZ=4|BuRaU393KJ;|$(02j~;~=Q=X_^tIBVpPEfQ@&c#a@Rx8wb{|{Aa!xuaBh1 z_50eaPxM|cvk4_Pu?6)Xf=A_WpTrmEoFPN`l?F6v!)y(?Ki%=$u={E4T%?_c=F`~k zKV}nKkvT!UWb9EFh@qzb9SIkuew%CyAPrT52>@W5N=oNy>7h+UP)BRI*>4F3s@}QF z>QBF(Ew~Ja!{AE|lw0@&4oe(4e92%;o@Y)po)8eZ&5j*6PL-U#UbbunkfY~o(EX;v z$q{AuInmHQ-+EAcxxsYaZVWa218`O`ct#nfWbV$o1q{G1K;IH`f}5qVaammn`QUco zgjzB8?A3U4Daoy^xBZa>-GSS8JnNJ~LS~m{z^y?-e*LEbts0CJce^_gCUqpY9G66( z8luM1DlmNG@}6z<*gJ099L55(ib_UqqHsVU>zB@VIHISM7Ee+&oqnwo#A0VE#kHkYAN?kUrK zZUT2vOkFWIHSC#qbiEPa58V;_v}ss#5btDowNH}uI$?t?ZowxQ zq%qC~rSDhuC!U-0+R&YLe9H!B)}N)Ld4JdCV4gKF&zyzj2FnLbhAj*`&f2BQJTT9X zea^|E!xj1X+U<9=H}C4Ft=5G%eiAu00jar~Hq zIF<54wq;1gF$EuFo~g_czNie9xfic1s+zt|^=Zh$Zrbo*6(dW`wfl@pL$QV2T?*J~ zRBuq~fhE!bVK`cMg#CvOz6sYJoCNPf*mkGG_IFs|ZOP=sr50&Rk`!tw6O3Vfp8rGv z;VIv2ZM^c^ha{Z+51nwIt5xRbpGvQMKIYwO=f4+fI7SsTCo>TW_Nli(SFyvQG4gTN zkQb7oCcf!*|Iq0q&gMcT1a({JwM^EObqqH3e0JF-_~j+)JsoOp`#Yz5w;_R@qK`_(V_I=H zEjAlXLlNa_{@yM$wl2oI;b7}tSS7|lS=up`OX@8Q&zGfq+v75${3zUa+h>||u$`Kw zhhL?2BP?d3@awbHe{-&rF;7Sf5tpm+2&g{kMuM`(+kVN$ z<3Ez^R4VFVy8Z=oKOnN3c1dc6T8TZoY=+W(gH1N?ZeXDYpcwETC^ES*D1kU}`ZSsd zW&R^T%%YA=sqkhClNk6pBU;Noq<&Cn@^(lba61$llwc9Byq1`SQwW00u&r@N#Ccy; zms$X#V!R`%g4#>{R_fhtaIE1~VdC-br=+t8-&B(SWoL`!ANGmv&9)F`%OM#mn0jlO zauAs3JJz1on12c+C1}h;q!;=T!$0q5Bg)O3xM3)4)9(h6N z<0IYkXf^WdrS?*qJ^M-3crygC86?v{>W-A~9=x$;R-YwVz z6Z1AY%iQEt>{ZbVv9SlGsp{fm$bGJZg2q$zG&je+3aBmEKJw|`q-y^kFN*KsWEO+# zX`~M+=U2`xYjP1GYyq)!=5xqMPh|%0u+y4>GGefCD&1yD(|u8*3x(%cu=?jrtTO9w zPunjBf}fFxPX)+x&&DZMo`f^nhTC1H$S)}%lbE|oG%v5#mPwz?dv={KGV$@1_XWm% zS&FJI$v#^Sk{R|5CzIy5;%rW;5W(EYtJh$>#m#A5bvv^*glIL%9 z`PJh~f}Zt@?$e2x(;udCif}VC^B^G58dqIVP&0mC_kCZ%j8^_i&nvUwZ*&GPj$c4D z+N>qWAfifl8~Q$a(yNzZ>c}_0k`pBbb-SH*D!bfISMQ&#bCvTF^XI~)8nM+X1p4+m z`r5QQ%K~}4v(ns|TOdx&G+(5a0C{=-=QGs%xpLG)1*?!xkB2%lF9;=Dv4q4H@%+Ue zOM#{=e0MiLIjnH@&E->}Is5S~IYBv&qoDgR7jg+c;U+on1*ValtdK}tU29iFeNaH* z5&l~U72(}@(+dM=5tt5%LH*WLMYHG9qkz^0_0aZ=OI@J_ zW-gwz4T?*WktU3=pkSQUO#9vqz+U5vuBY8fQgAdbHVwDEFrN|V3YPZW{)cWEek=}Z zbUMuq^clig){OBLa7@3_^JFP!k!6ibv`jz$*D&4idI$UR$ES6|sL8)l2Rtq6^mG2m zzisk^ave|)H zrms}UmgM*gRZQa@NRXN@F2{eROzO2-3lMX{;9w+gHyr7rpmJCMDYACv_NMXF((c6$ zsy{T+AVR02My+iS`xx?5_ZRB>wo*qF9#B&KBWYTF<&Jt%F4~$*zjPbJ>z%_iNU$fZ z{&=4eH_=#^LTDcf zl)NP0>co_uza2Bq@;Ss^=!zH9qbi*{S$VfvonJ>UzKy-UbMJ>U*lT+2H-FPN9@q_{ zq2C|B`gImKgdT~q-I^A#J2g#PIV<{`?U=as_ZWtg4$_Fu9S^$b$Mb|_kBOG_YWOiJ zvF)}4nLiLOMAfD)KWOm#?)>ugapufVBf8`!r2HRw#`zAD)MNuyNQjW*=frRKxYT?J z1z4F~y_4e`zlTq#joEs{-08)$)E|gV^mUSc7J7gUv2EU-fqVl>+pUyVA}(Q z!=S|7)1~Q1is6;GCDKvYTcY>ghN4bgLLu2>)a~wPtnQ(osehvH&18Sk-kQN*^lq{Fx_D!Khcc!Fbh=Fj0YFg9)E*K)dDv|iHa;dAC)4s;7qW-e}PMf9R5`8upPPFkxap;Y!;alF^BKFd|NkJ8i5gOVLPo ztPiBIMbByo{OJ^{-)G3cNB5Hd9Acj=zPL-b2g2;`jqnvdnHCq#jtDY9TqDFvCELIYn7;r;$>nL zbaJ>(XMP>+bITA6zsVG%c4;ZlLLf@PLMwu)L-%&bjtR{qmFO7gROc>G*Wl9 zGMe<>XYoAm^5~v4X8ee@kX{V(T?q(VNIGo^JSxg*S^<~MoTzqdTvL-I@we;OOuLGv zcoA@R*?m+(AK_+i<)lN%vt+PzUw;#W(p&HsauG2CUznFi)}QXE-1y*-USMT0ySVUKFgoXrybmt?!T3~<%3;4)FoE)H=F`(;PcFBx=9w8)|!agnLDY#{VX~$niU+C~y-zqIgNIh2Qza5u9AJ?pUugi1z<(oLH;l|ECborZmx=n{Xb(MdT zD3fRacD|#EbQCVH!MZ?p81MK1n2wUjW0`j%f}S0HzSG5pXKc|HOuF*g8cy76$6clf z6LN03@Uu?rzbi4XZ?Ud0`K2w!O6WKhlom#6`sU zY16v2t_;&y@BS?VExp@m8D9GG8VcJ4tIiCb;71=5Ji=6OIYaRSWO=*+*x@;>-GJD!QY z8PjJ1BLQ2;9ODy&K_yIP5qjPQ%{VQ&4-DXQZ0DD#Z^WQ?x3P!Vvu&lUP!5~g%DCdE z@xP3%G!~hH<#5+F)p@PBfry7@iNYB0+0)hSRo}_~9=h4qw+RO9TRg?cx0OnJ(O`hM* zvjnpdX~M*QuW%}BLxiecr=a%jWv0eI{bXZvkHR|VzbiED zi24}BNKv1JR4iAGP{i+~LiG2nC%C43(pHl6GBeX05;O1U=`1G(XVaCRiQ@%=YlrEc z6#nsLnfG&i)m%H7y^1poz?b*~c(nKlCW-lxk?$-ocXeF9YAv34hTsN@3q)j;HzMjn z^PJIwd&t_%KNk-h!SZpxeWnfbWJ}^z({Bq^-@D>0WP3O5mjIWu03-FvCiOn)^7lsg zCwr1Yp0`clp)d0zMCY-YeY`xwGmqXoUcb6#&#e#Mgbf*#qVWCpn-wutn}}~?3HNu% zpo|^jvj#V}n3nr)75Jg``>?smbJk9OYeb}Ju~dhE~K_i?W7 zT)d}Mc( zVM;BWRZNN&o#jq1VX8PT7`n@?ZW^Ha87%eC1I1PYFf`jw_thMhnk`4|pCs#0f}{zL zG}or}tr1B%gu?C!z!zbq?K+xolEi+=nx5bg7#w*~=K75dqq9GtVYgF++7{cHJR{gVtm(jYB(N^*h;A#)0Ho=Nn!aeRHzO3<&f0g zapF*fbjgrxnL0CkX=c4Wo0yVk{zT?y&$*PkpT8TpOt|_!tJ{#nZWIwY2y7L#d`F2k zrH?kBxJcjKt83{n<+y3loZ2DZ*)iq@bYOYt3YT*FIA@Kd10%7GtH1}aGSFph{`~gB zV`+1Q(P=Q>?1^Fv^HGD^4qPM|9+!0BvtEv}7xSq_L!{ewwjX&!caKqQ^1#-2rfS6b zo{DDypYOtJByXv1HBU&hZ_Di66M9$oZz_m${V-j!^LL5`v9>!xO^=UqB0u!(@Q}M; zGC2K3ikRSBFOSO24%T-@q|figGf4t}*B6oOWNj_t5P}wa;af<^*Ab~rpt1~_3oi(n zGv}wfJxgZwHqjJROsjF7i{8hInFnEfetB|$7;ET{o5kekYX?`l!c9H>))uZZf zi?u-H04qWferKde1v9=`<>KaR_?(bwv>-sb%_2Uc1dQf8eE?=uwNf-?2RZ0|j~Ix3 zInczuYJc1W7=chtmZ+Z+i!cX zH^w@@`()3Zuack`;%Q0N$pv~eaX1dtRRwG<(G&?TqPMvcaW2evSSFh73)GAG*!PD1 zy6?SX_ZJzfpH$`kq1!bXqofra>3EJA8uYK`C;Oh_mKwJC8OV?DP&zONcU=lJ=p^<( zGEu+3Vi#aaGf|hgQf0_jUXOVr-b{D%^E8gjX~vlx@>$H|f2xsGBRZ^@aDoLM8l03j zIXBfAVDCm@w|r@!HAE1A9xFhh;-4ppk2|kQ=zUg{F@F{EX}A5`bQsyxk|qUcW?quM zx9l4% z4;r*TS&lPx#A&k~ZEhgy4v%&sETr2pJQRr*+R`BuW|l2X47C~-z{2DS=NU`v})k_jP}`@9Tfv|1bCN{vFV((=o|6Zk!dQmx!Ak5x$6Os6+`e zH)9GLvyju@1Y92Z#IB+~Cr%mq_Rl=c3ej~7T4zQyJ>tTE2Jld)t-#Tf&(m&KKRd z+AI(!@=d%1LUwdP5fypYpr>)GdT`w9=vPz6UjR@`w{>ctu-Ky4aJO-k;K_C1WwRh= z+|fs+CGAq&!Wg*RTchO+TebHLBVri=_J(9{3{%IozQ@FN+V(5;6>Vq53-~LB9m$qT zDD8G`e-nP|Cmqi@=#G_FjK!rQe=I&8siy4h$~U#IRZW<|>Eha*!e|w&Vm6U=26uDY zjEG>EJexjr!oXW<(rcB3s=Z{Jh6pNk%Te`9O$pOcVTdVR72;W;Y}m>V<1A3}I4H+7 zG2Fu27?Hc|#^*~!)5m`-_7p{qy?y{ntpQKfyqc}GG~|}Qu~g-bviW@i2JqskWjrg- z<@VI>6Hqa?YqL6&4%V7q_G`p0T}Uc$*5};lvhd&aeEkPahjd|)Ti@Vnu{Vzo}ib2Z9K5$!i^=pM&jq@sL`~!YTt4 z5vB6&z^foR=HLl<#ck8 zG8ap$z|JqbboPI$t4ewAZe;)7OieV+0H(IB%nof~Nh@z(X0HuDB3lXUnq=E@?pIwT zE^M@hJ4KwFYtH*7Nnf4N^XN0w!mElr_Pcq|XaQDdml{w*9wsZt$2G}Bg(*018rn4u z+1yqS4ZSWlYo%N2SXr$@?2bIvZRt7CHuR(eojJ@|3DMn}Kcts~_i2iWizm#JDkf11 z?CO=o&>uF9LpMRZHiuXr|JJg5?d5$H18YTsAT~CfaTwJ{1 ztU)Us^Y{#MG^{cddJQB?tm8YH*mGUD#I4yD5}Ys39`f|5K>Q>T8OH_f4$;Vqc2vkd zPYpRVW)JT7NRYI@oWCL+F)z?~o5PlCw_62{QxgJtKR?eR;4@Pn)BcX>Ud8_i8W-PO zL)cpm4VF^-s-CtJz@}iGfWUME6ka7+UA&H^oMI`=#w9Opn?C1GQV9Amsc^H=sjW@gM?2zhvgs{Xq3;(7_xQ zWWsuZn9_KTKzE>-?H{8H#PJ2y;|zvO0h3A}-h{2T@pRTK(N9^*=nyhY8m-AO!O3F- zaQ%<1W4P$WGNTsX}S!a}qVQVX{b z(<_hno3;jaK|cV4JO%P*??4!5HkE~}(-^{$Myf&(vzSKr9vR&e=CRL8oMlsD#C#DY z^%pNp>3989v*U%1o4cFb&7o^Y@9}CNc%OELLh+5taCiQIdTpU=4#+%ZH9^GYOspNb zev_fk4k4MtdMbG2@$9F)-{Ova=w1JAnLmJXDT~6axHY_J-7_|C3cKExRIGO1q%&Px z5@-dMeJJ_hfG>eYQfJEvVBdOJ7c>vH2`t4thMU1!k=c4aR(v)F>c!hff)*F?JBZ8V zheX)$+ThUdnIWA5o2}nNrT(9}5Fv6qQJ&v6^9%4Xv+18j3IEmm{f)JT*$3e$MGA8n zZIWF^6?_#mZ4i=U%G_>iP~YR_0Ip{lhuy>t(gE+ZvY^ZjN**Nfsky} z#1JbFq;|c3DQDb+Jvrd&mn!S(a#s9QCirNPd56s=Y1K-vgB8!aP5%pM0$dg4>|lam za{AA_A)T!=ia-?}Eb+MK=uHy2<#g9~k7sa15 zO1X90!%~j@Bx?$`dwvUu4rqVEnZR3;6ST zj#W2i`};>nM&{ zbRKCewqa!4NtClL$)m>E4t~)*gM44BQ z7F9H9-E{!#94W$TOa56k(XZ44o*G8Kd&|>!sGD_XrvnCV59r}48_5*PMNi#xtvp)z Kdm+-lM*jucPD8E$ diff --git a/tensorflow/core/profiler/internal/tfprof_op.cc b/tensorflow/core/profiler/internal/tfprof_op.cc index c04b0ea0c62..5a8429d4893 100644 --- a/tensorflow/core/profiler/internal/tfprof_op.cc +++ b/tensorflow/core/profiler/internal/tfprof_op.cc @@ -109,7 +109,6 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts, fprintf(stderr, "Only 'code' view supports pprof output now.\n"); return root_.get(); } - if (opts.output_type == kOutput[1] || opts.output_type == kOutput[2]) { root_->formatted_str = FormatNode(root_.get(), root_.get(), opts); } @@ -130,7 +129,6 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts, nodes.push_back(n.second.get()); } nodes = SortNodes(nodes, opts); - // pre keeps track of previous visited node. OpNode* pre = nullptr; std::vector account_nodes; @@ -166,10 +164,6 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts, (*it)->AddSelfToTotalStats(); if (pre) (*it)->AggregateTotalStats(pre); } - if (pre) { - (*it)->mutable_proto()->add_children()->MergeFrom(pre->proto()); - pre->mutable_proto()->clear_children(); - } pre = *it; } if (opts.account_displayed_op_only) { @@ -178,11 +172,6 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts, root_->AggregateTotalStats(pre); } } - if (pre) { - root_->mutable_proto()->add_children()->MergeFrom(pre->proto()); - pre->mutable_proto()->clear_children(); - } - if (opts.output_type == kOutput[1] || opts.output_type == kOutput[2]) { string display_str = FormatLegend(opts); for (OpNode* node : show_nodes) { @@ -192,6 +181,13 @@ const ShowMultiNode* TFOp::ShowInternal(const Options& opts, // TODO(xpan): Is it the right choice? root_->formatted_str = display_str; } + // Populate the chidren field. + auto* pre_pb = root_->mutable_proto(); + for (auto& show_node : show_nodes) { + pre_pb->clear_children(); + pre_pb->add_children()->Swap(show_node->mutable_proto()); + pre_pb = pre_pb->mutable_children(0); + } return root_.get(); } diff --git a/tensorflow/core/profiler/profiler.cc b/tensorflow/core/profiler/profiler.cc index a5e513aa21c..b280242df18 100644 --- a/tensorflow/core/profiler/profiler.cc +++ b/tensorflow/core/profiler/profiler.cc @@ -266,7 +266,18 @@ int Run(int argc, char** argv) { linenoiseSetCompletionCallback(completion); linenoiseHistoryLoad(".tfprof_history.txt"); - for (char* line = nullptr; (line = linenoise("tfprof> ")) != nullptr;) { + bool looped = false; + while (true) { + char* line = linenoise("tfprof> "); + if (line == nullptr) { + if (!looped) { + fprintf(stderr, + "Cannot start interative shell, " + "use 'bazel-bin' instead of 'bazel run'.\n"); + } + break; + } + looped = true; string line_s = line; free(line); diff --git a/tensorflow/python/profiler/model_analyzer.py b/tensorflow/python/profiler/model_analyzer.py index 040a4891637..46a921c0a13 100644 --- a/tensorflow/python/profiler/model_analyzer.py +++ b/tensorflow/python/profiler/model_analyzer.py @@ -20,6 +20,8 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import sys + import six from google.protobuf import message @@ -206,8 +208,8 @@ class Profiler(object): try: tfprof_node.ParseFromString( print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString())) - except message.DecodeError as _: - pass + except message.DecodeError as e: + sys.stderr.write('Cannot parse returned proto: %s.\n' % e) return tfprof_node def profile_operations(self, options): @@ -223,8 +225,8 @@ class Profiler(object): try: tfprof_node.ParseFromString( print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString())) - except message.DecodeError as _: - pass + except message.DecodeError as e: + sys.stderr.write('Cannot parse returned proto: %s.\n' % e) return tfprof_node def profile_name_scope(self, options): @@ -240,8 +242,8 @@ class Profiler(object): try: tfprof_node.ParseFromString( print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString())) - except message.DecodeError as _: - pass + except message.DecodeError as e: + sys.stderr.write('Cannot parse returned proto: %s.\n' % e) return tfprof_node def profile_graph(self, options): @@ -257,8 +259,8 @@ class Profiler(object): try: tfprof_node.ParseFromString( print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString())) - except message.DecodeError as _: - pass + except message.DecodeError as e: + sys.stderr.write('Cannot parse returned proto: %s.\n' % e) return tfprof_node def advise(self, options): @@ -331,9 +333,8 @@ def profile(graph, opts.SerializeToString()) try: tfprof_node.ParseFromString(ret) - except message.DecodeError as _: - pass - # sys.stderr.write('Cannot parse returned proto: %s.\n' % e) + except message.DecodeError as e: + sys.stderr.write('Cannot parse returned proto: %s.\n' % e) elif cmd == 'graph' or cmd == 'scope': tfprof_node = tfprof_output_pb2.GraphNodeProto() @@ -345,9 +346,8 @@ def profile(graph, opts.SerializeToString()) try: tfprof_node.ParseFromString(ret) - except message.DecodeError as _: - pass - # sys.stderr.write('Cannot parse returned proto: %s.\n' % e) + except message.DecodeError as e: + sys.stderr.write('Cannot parse returned proto: %s.\n' % e) else: raise errors.InvalidArgumentError( None, None, 'unknown cmd: %s\n' % cmd) From 3a63bce95f67854b6745cb43e0e9feb1e93587f1 Mon Sep 17 00:00:00 2001 From: Russell Power Date: Tue, 14 Nov 2017 15:57:27 -0800 Subject: [PATCH 034/104] Make the definition of summary operations a warning instead of raising an exception. PiperOrigin-RevId: 175748972 --- tensorflow/contrib/tpu/python/tpu/tpu.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tensorflow/contrib/tpu/python/tpu/tpu.py b/tensorflow/contrib/tpu/python/tpu/tpu.py index 9aa5a9c78db..f3ddc097544 100644 --- a/tensorflow/contrib/tpu/python/tpu/tpu.py +++ b/tensorflow/contrib/tpu/python/tpu/tpu.py @@ -29,6 +29,7 @@ from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variable_scope +from tensorflow.python.platform import tf_logging as logging _SUMMARY_OPS = ("ScalarSummary",) @@ -111,7 +112,8 @@ class TPUReplicateContext(control_flow_ops.ControlFlowContext): raise ValueError("Placeholder %s is not supported." % op.name) if op.type in _SUMMARY_OPS: - raise ValueError("Summary operations are not currently supported.") + logging.warning( + "Summary operations are not currently supported (%s)" % op.name) if any(x.dtype._is_ref_dtype for x in op.inputs): raise NotImplementedError( From 205ff0f7592c60ab09fc705f2c5501d8547e83be Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 16:37:32 -0800 Subject: [PATCH 035/104] [TF:XLA] Added tf_xla_cpu_global_jit flag to TF_XLA_FLAGS environment variable to enable global JIT compilation for CPU via SessionOptions. By default, global JIT compilation for CPU via SessionOptions is disabled. When TF_XLA_FLAGS=--tf_xla_cpu_global_jit is set, the value of enable_jit_by_default variable in mark_for_compilation_pass.cc is ignored allowing XLA to use JIT compilation for the whole graph according to SessionOptions setting . Unless tf_xla_cpu_dev_mode is explicitly set via TF_XLA_FLAGS, this code change should have no effect on Tensorflow or XLA execution. RELNOTES: n/a PiperOrigin-RevId: 175754729 --- .../mark_for_compilation_pass_flags.cc | 32 ++++++++++--------- .../mark_for_compilation_pass_flags.h | 2 ++ .../compiler/jit/mark_for_compilation_pass.cc | 13 ++++++-- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.cc b/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.cc index 09aee39d8cd..4bc209b7ecf 100644 --- a/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.cc +++ b/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.cc @@ -39,21 +39,23 @@ static void AllocateFlags() { flags->tf_xla_min_cluster_size = 2; flags->tf_xla_max_cluster_size = std::numeric_limits::max(); flags->tf_xla_clustering_debug = false; - flag_list = new std::vector({ - Flag("tf_xla_auto_jit", &flags->tf_xla_auto_jit, - "Control compilation of operators into XLA computations on CPU and " - "GPU devices. 0 = use ConfigProto setting; -1 = off; 1 = on for " - "things very likely to be improved; 2 = on for everything. " - "Experimental."), - Flag("tf_xla_min_cluster_size", &flags->tf_xla_min_cluster_size, - "Minimum number of operators in an XLA compilation. Ignored for " - "operators placed on an XLA device or operators explicitly marked " - "for compilation."), - Flag("tf_xla_max_cluster_size", &flags->tf_xla_max_cluster_size, - "Maximum number of operators in an XLA compilation."), - Flag("tf_xla_clustering_debug", &flags->tf_xla_clustering_debug, - "Dump graphs during XLA compilation."), - }); + flags->tf_xla_cpu_global_jit = false; + flag_list = new std::vector( + {Flag("tf_xla_auto_jit", &flags->tf_xla_auto_jit, + "Control compilation of operators into XLA computations on CPU and " + "GPU devices. 0 = use ConfigProto setting; -1 = off; 1 = on for " + "things very likely to be improved; 2 = on for everything. " + "Experimental."), + Flag("tf_xla_min_cluster_size", &flags->tf_xla_min_cluster_size, + "Minimum number of operators in an XLA compilation. Ignored for " + "operators placed on an XLA device or operators explicitly marked " + "for compilation."), + Flag("tf_xla_max_cluster_size", &flags->tf_xla_max_cluster_size, + "Maximum number of operators in an XLA compilation."), + Flag("tf_xla_clustering_debug", &flags->tf_xla_clustering_debug, + "Dump graphs during XLA compilation."), + Flag("tf_xla_cpu_global_jit", &flags->tf_xla_cpu_global_jit, + "Enables global JIT compilation for CPU via SessionOptions.")}); xla::legacy_flags::ParseFlagsFromEnv(*flag_list); } diff --git a/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.h b/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.h index 24f80507428..e1ccd7ddb87 100644 --- a/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.h +++ b/tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.h @@ -46,6 +46,8 @@ typedef struct { int32 tf_xla_max_cluster_size; // Maximum number of operators in an XLA // compilation. bool tf_xla_clustering_debug; // Dump graphs during XLA compilation. + bool tf_xla_cpu_global_jit; // Enables global JIT compilation for CPU + // via SessionOptions. } MarkForCompilationPassFlags; // Return a pointer to the MarkForCompilationPassFlags struct; diff --git a/tensorflow/compiler/jit/mark_for_compilation_pass.cc b/tensorflow/compiler/jit/mark_for_compilation_pass.cc index 78d0aa86a8f..74c9791f5ea 100644 --- a/tensorflow/compiler/jit/mark_for_compilation_pass.cc +++ b/tensorflow/compiler/jit/mark_for_compilation_pass.cc @@ -290,9 +290,11 @@ Status MarkForCompilationPass::Run( global_jit_level = static_cast(flags->tf_xla_auto_jit); } + bool cpu_global_jit = flags->tf_xla_cpu_global_jit; const FunctionLibraryDefinition* fld = options.flib_def; - auto is_compilable = [global_jit_level, fld](const Node* node, - const DeviceType& device_type) { + + auto is_compilable = [global_jit_level, cpu_global_jit, fld]( + const Node* node, const DeviceType& device_type) { const XlaOpRegistry::DeviceRegistration* registration; if (!XlaOpRegistry::GetCompilationDevice(device_type.type(), ®istration)) { @@ -315,7 +317,11 @@ Status MarkForCompilationPass::Run( if (status.ok()) return compile; // Otherwise use the value of global_jit_level. - return registration->enable_jit_by_default && global_jit_level > 0; + // Ignore enable_jit_by_default if global jit compilation for CPU + // is explicitly requested via tf_xla_cpu_global_jit flag + bool ignore_registration = cpu_global_jit && device_type == DEVICE_CPU; + return (ignore_registration || registration->enable_jit_by_default) && + global_jit_level > 0; }; return RunImpl(options, is_compilable); } @@ -556,6 +562,7 @@ Status MarkForCompilationPass::RunImpl( if (cluster_sizes[cluster] >= min_cluster_size || marked_for_compilation || registration->requires_compilation) { string& name = cluster_names[cluster]; + if (name.empty()) { name = strings::StrCat("cluster_", cluster_sequence_num++); } From 94b275420b2c5310f37d0bda3d329d7e0d0b5e99 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Tue, 14 Nov 2017 16:54:35 -0800 Subject: [PATCH 036/104] Replace uses of @%ws% with @org_tensorflow in LLVM BUILD file. @%ws% existed to work around a bug in older Bazel versions. The minimum Bazel version no longer has this issue so we can just write @org_tensorflow. Fixes #14445. PiperOrigin-RevId: 175756784 --- third_party/llvm/llvm.BUILD | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/third_party/llvm/llvm.BUILD b/third_party/llvm/llvm.BUILD index 97b833e49d5..5344525ba8b 100644 --- a/third_party/llvm/llvm.BUILD +++ b/third_party/llvm/llvm.BUILD @@ -7,18 +7,18 @@ licenses(["notice"]) exports_files(["LICENSE.TXT"]) load( - "@%ws%//third_party/llvm:llvm.bzl", + "@org_tensorflow//third_party/llvm:llvm.bzl", "gentbl", "expand_cmake_vars", "llvm_target_cmake_vars", "cmake_var_string", ) load( - "@%ws%//third_party:common.bzl", + "@org_tensorflow//third_party:common.bzl", "template_rule", ) -package(default_visibility = ["@%ws%//tensorflow/compiler/xla:internal"]) +package(default_visibility = ["//visibility:public"]) llvm_host_triple = "x86_64-unknown-linux_gnu" @@ -145,11 +145,11 @@ darwin_cmake_vars = { # TODO(phawkins): use a better method to select the right host triple, rather # than hardcoding x86_64. all_cmake_vars = select({ - "@%ws%//tensorflow:darwin": cmake_var_string( + "@org_tensorflow//tensorflow:darwin": cmake_var_string( cmake_vars + llvm_target_cmake_vars("X86", "x86_64-apple-darwin") + darwin_cmake_vars, ), - "@%ws%//tensorflow:linux_ppc64le": cmake_var_string( + "@org_tensorflow//tensorflow:linux_ppc64le": cmake_var_string( cmake_vars + llvm_target_cmake_vars("PowerPC", "powerpc64le-unknown-linux_gnu") + linux_cmake_vars, From 95c46f90474b3ae3996a8596bf5a53af9c52b290 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 16:59:20 -0800 Subject: [PATCH 037/104] Add head name to tf.contrib.estimator.regression_head metrics PiperOrigin-RevId: 175757275 --- tensorflow/python/estimator/canned/head.py | 2 +- .../python/estimator/canned/head_test.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/estimator/canned/head.py b/tensorflow/python/estimator/canned/head.py index eaed412c8bc..62fea058676 100644 --- a/tensorflow/python/estimator/canned/head.py +++ b/tensorflow/python/estimator/canned/head.py @@ -1081,7 +1081,7 @@ class _RegressionHeadWithMeanSquaredErrorLoss(_Head): if mode == model_fn.ModeKeys.EVAL: # Estimator already adds a metric for loss. eval_metric_ops = { - metric_keys.MetricKeys.LOSS_MEAN: + _summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN): metrics_lib.mean( # Both values and weights here are reduced, scalar Tensors. # values is the actual mean we want -- weights represents diff --git a/tensorflow/python/estimator/canned/head_test.py b/tensorflow/python/estimator/canned/head_test.py index 4497cd26f2d..f3afd84125d 100644 --- a/tensorflow/python/estimator/canned/head_test.py +++ b/tensorflow/python/estimator/canned/head_test.py @@ -2325,6 +2325,24 @@ class RegressionHeadWithMeanSquaredErrorLossTest(test.TestCase): self.assertAllClose(expected_loss_mean, loss_mean) self.assertAllClose(expected_loss_mean, loss_mean_value_op.eval()) + def test_eval_metric_ops_with_head_name_for_regression(self): + head = head_lib._regression_head_with_mean_squared_error_loss( + name='some_regression_head') + logits = np.array(((1,), (9,)), dtype=np.float32) + labels = np.array(((1,), (1,)), dtype=np.int64) + features = {'x': np.array(((42,),), dtype=np.int32)} + # Create estimator spec. + spec = head.create_estimator_spec( + features=features, + mode=model_fn.ModeKeys.EVAL, + logits=logits, + labels=labels) + + expected_metric_keys = [ + '{}/some_regression_head'.format(metric_keys.MetricKeys.LOSS_MEAN), + ] + self.assertItemsEqual(expected_metric_keys, spec.eval_metric_ops.keys()) + def test_train_create_loss(self): head = head_lib._regression_head_with_mean_squared_error_loss() logits = np.array(((45,), (41,),), dtype=np.float32) From 48b25d0a1d71fb426b5765a88785b35a4327e4f5 Mon Sep 17 00:00:00 2001 From: Anjali Sridhar Date: Tue, 14 Nov 2017 17:03:58 -0800 Subject: [PATCH 038/104] Update tf.keras to the Keras 2.0.9 API. PiperOrigin-RevId: 175757949 --- .../keras/_impl/keras/callbacks_test.py | 1 - tensorflow/python/keras/_impl/keras/losses.py | 2 +- tensorflow/python/keras/_impl/keras/models.py | 73 ++++-- .../keras/_impl/keras/preprocessing/image.py | 222 +++++++++++------- .../_impl/keras/preprocessing/image_test.py | 2 + .../_impl/keras/preprocessing/sequence.py | 2 +- .../keras/_impl/keras/utils/__init__.py | 1 + .../keras/_impl/keras/utils/data_utils.py | 108 +++++---- .../keras/_impl/keras/utils/generic_utils.py | 66 ++++-- .../_impl/keras/wrappers/scikit_learn.py | 4 +- .../golden/tensorflow.keras.-sequential.pbtxt | 4 +- .../tensorflow.keras.models.-sequential.pbtxt | 4 +- ...processing.image.-directory-iterator.pbtxt | 5 + ....keras.preprocessing.image.-iterator.pbtxt | 5 + ...ocessing.image.-numpy-array-iterator.pbtxt | 5 + ...tensorflow.keras.preprocessing.image.pbtxt | 2 +- ...flow.keras.utils.-generator-enqueuer.pbtxt | 2 +- 17 files changed, 332 insertions(+), 176 deletions(-) diff --git a/tensorflow/python/keras/_impl/keras/callbacks_test.py b/tensorflow/python/keras/_impl/keras/callbacks_test.py index 9f578a0fab3..6924a8926b6 100644 --- a/tensorflow/python/keras/_impl/keras/callbacks_test.py +++ b/tensorflow/python/keras/_impl/keras/callbacks_test.py @@ -571,7 +571,6 @@ class KerasCallbacksTest(test.TestCase): loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) - tsb = keras.callbacks.TensorBoard( log_dir=temp_dir, histogram_freq=1, write_images=True, write_grads=True, batch_size=5) diff --git a/tensorflow/python/keras/_impl/keras/losses.py b/tensorflow/python/keras/_impl/keras/losses.py index 7c6b304622a..da0984d3c33 100644 --- a/tensorflow/python/keras/_impl/keras/losses.py +++ b/tensorflow/python/keras/_impl/keras/losses.py @@ -91,7 +91,7 @@ def poisson(y_true, y_pred): def cosine_proximity(y_true, y_pred): y_true = K.l2_normalize(y_true, axis=-1) y_pred = K.l2_normalize(y_pred, axis=-1) - return -K.mean(y_true * y_pred, axis=-1) + return -K.sum(y_true * y_pred, axis=-1) # Aliases. diff --git a/tensorflow/python/keras/_impl/keras/models.py b/tensorflow/python/keras/_impl/keras/models.py index 06941e4bac0..046fd116337 100644 --- a/tensorflow/python/keras/_impl/keras/models.py +++ b/tensorflow/python/keras/_impl/keras/models.py @@ -716,25 +716,46 @@ class Sequential(Model): metrics=None, sample_weight_mode=None, weighted_metrics=None, + target_tensors=None, **kwargs): - """Configures the learning process. + """Configures the model for training. Arguments: - optimizer: str (name of optimizer) or optimizer object. + optimizer: String (name of optimizer) or optimizer object. See [optimizers](/optimizers). - loss: str (name of objective function) or objective function. + loss: String (name of objective function) or objective function. See [losses](/losses). - metrics: list of metrics to be evaluated by the model + If the model has multiple outputs, you can use a different loss + on each output by passing a dictionary or a list of losses. + The loss value that will be minimized by the model + will then be the sum of all individual losses. + metrics: List of metrics to be evaluated by the model during training and testing. Typically you will use `metrics=['accuracy']`. - See [metrics](/metrics). - sample_weight_mode: if you need to do timestep-wise - sample weighting (2D weights), set this to "temporal". - "None" defaults to sample-wise weights (1D). - weighted_metrics: list of metrics to be evaluated and weighted - by `sample_weight` or `class_weight` during training and testing. - **kwargs: These are passed into `tf.Session.run`. - + To specify different metrics for different outputs of a + multi-output model, you could also pass a dictionary, + such as `metrics={'output_a': 'accuracy'}`. + sample_weight_mode: If you need to do timestep-wise + sample weighting (2D weights), set this to `"temporal"`. + `None` defaults to sample-wise weights (1D). + If the model has multiple outputs, you can use a different + `sample_weight_mode` on each output by passing a + dictionary or a list of modes. + weighted_metrics: List of metrics to be evaluated and weighted + by sample_weight or class_weight during training and testing. + target_tensors: By default, Keras will create placeholders for the + model's target, which will be fed with the target data during + training. If instead you would like to use your own + target tensors (in turn, Keras will not expect external + Numpy data for these targets at training time), you + can specify them via the `target_tensors` argument. It can be + a single tensor (for a single-output model), a list of tensors, + or a dict mapping output names to target tensors. + **kwargs: When using the Theano/CNTK backends, these arguments + are passed into K.function. When using the TensorFlow backend, + these arguments are passed into `tf.Session.run`. + Raises: + ValueError: In case of invalid arguments for Example: ```python model = Sequential() @@ -754,18 +775,19 @@ class Sequential(Model): metrics=metrics, sample_weight_mode=sample_weight_mode, weighted_metrics=weighted_metrics, + target_tensors=target_tensors, **kwargs) self.optimizer = self.model.optimizer self.loss = self.model.loss - self.total_loss = self.model.total_loss - self.loss_weights = self.model.loss_weights self.metrics = self.model.metrics + self.loss_weights = self.model.loss_weights + self.sample_weight_mode = self.model.sample_weight_mode self.weighted_metrics = self.model.weighted_metrics + self.targets = self.model.targets self.metrics_tensors = self.model.metrics_tensors self.metrics_names = self.model.metrics_names - self.sample_weight_mode = self.model.sample_weight_mode self.sample_weights = self.model.sample_weights - self.targets = self.model.targets + self.total_loss = self.model.total_loss def fit(self, x, @@ -787,7 +809,11 @@ class Sequential(Model): (if the model has multiple inputs). y: labels, as a Numpy array. batch_size: integer. Number of samples per gradient update. - epochs: integer, the number of epochs to train the model. + epochs: integer. Number of epochs to train the model. + Note that in conjunction with initial_epoch, the parameter + epochs is to be understood as "final epoch". The model is + not trained for a number of steps given by epochs, but + until the epoch epochs is reached. verbose: 0 for no logging to stdout, 1 for progress bar logging, 2 for one log line per epoch. callbacks: list of `keras.callbacks.Callback` instances. @@ -814,8 +840,8 @@ class Sequential(Model): to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). - initial_epoch: epoch at which to start training - (useful for resuming a previous training run) + initial_epoch: Epoch at which to start training + (useful for resuming a previous training run). Returns: A `History` object. Its `History.history` attribute is @@ -1003,6 +1029,7 @@ class Sequential(Model): max_queue_size=10, workers=1, use_multiprocessing=False, + shuffle=True, initial_epoch=0, **kwargs): """Fits the model on data generated batch-by-batch by a Python generator. @@ -1026,6 +1053,10 @@ class Sequential(Model): be equal to the number of unique samples of your dataset divided by the batch size. epochs: Integer, total number of iterations on the data. + Note that in conjunction with initial_epoch, the parameter + epochs is to be understood as "final epoch". The model is + not trained for n steps given by epochs, but until the + epoch epochs is reached. verbose: Verbosity mode, 0, 1, or 2. callbacks: List of callbacks to be called during training. validation_data: This can be either @@ -1049,6 +1080,9 @@ class Sequential(Model): non picklable arguments to the generator as they can't be passed easily to children processes. + shuffle: Whether to shuffle the order of the batches at + the beginning of each epoch. Only used with instances + of `Sequence` (keras.utils.Sequence). initial_epoch: Epoch at which to start training (useful for resuming a previous training run) **kwargs: support for legacy arguments. @@ -1105,6 +1139,7 @@ class Sequential(Model): max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, + shuffle=shuffle, initial_epoch=initial_epoch) def evaluate_generator(self, diff --git a/tensorflow/python/keras/_impl/keras/preprocessing/image.py b/tensorflow/python/keras/_impl/keras/preprocessing/image.py index 052a8addc4c..12dc718cd79 100644 --- a/tensorflow/python/keras/_impl/keras/preprocessing/image.py +++ b/tensorflow/python/keras/_impl/keras/preprocessing/image.py @@ -31,6 +31,7 @@ import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensorflow.python.keras._impl.keras import backend as K +from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence from tensorflow.python.platform import tf_logging as logging @@ -47,6 +48,21 @@ except ImportError: ndi = None # pylint: enable=g-import-not-at-top +if pil_image is not None: + _PIL_INTERPOLATION_METHODS = { + 'nearest': pil_image.NEAREST, + 'bilinear': pil_image.BILINEAR, + 'bicubic': pil_image.BICUBIC, + } + # These methods were only introduced in version 3.4.0 (2016). + if hasattr(pil_image, 'HAMMING'): + _PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING + if hasattr(pil_image, 'BOX'): + _PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX + # This method is new in version 1.1.3 (2013). + if hasattr(pil_image, 'LANCZOS'): + _PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS + def random_rotation(x, rg, @@ -172,10 +188,8 @@ def random_zoom(x, (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. - Returns: Zoomed Numpy image tensor. - Raises: ValueError: if `zoom_range` isn't a tuple. """ @@ -344,7 +358,7 @@ def img_to_array(img, data_format=None): return x -def load_img(path, grayscale=False, target_size=None): +def load_img(path, grayscale=False, target_size=None, interpolation='nearest'): """Loads an image into PIL format. Arguments: @@ -352,12 +366,19 @@ def load_img(path, grayscale=False, target_size=None): grayscale: Boolean, whether to load the image as grayscale. target_size: Either `None` (default to original size) or tuple of ints `(img_height, img_width)`. + interpolation: Interpolation method used to resample the image if the + target size is different from that of the loaded image. + Supported methods are "nearest", "bilinear", and "bicubic". + If PIL version 1.1.3 or newer is installed, "lanczos" is also + supported. If PIL version 3.4.0 or newer is installed, "box" and + "hamming" are also supported. By default, "nearest" is used. Returns: A PIL Image instance. Raises: ImportError: if PIL is not available. + ValueError: if interpolation method is not supported. """ if pil_image is None: raise ImportError('Could not import PIL.Image. ' @@ -369,14 +390,21 @@ def load_img(path, grayscale=False, target_size=None): else: if img.mode != 'RGB': img = img.convert('RGB') - if target_size: - hw_tuple = (target_size[1], target_size[0]) - if img.size != hw_tuple: - img = img.resize(hw_tuple) + if target_size is not None: + width_height_tuple = (target_size[1], target_size[0]) + if img.size != width_height_tuple: + if interpolation not in _PIL_INTERPOLATION_METHODS: + raise ValueError( + 'Invalid interpolation method {} specified. Supported ' + 'methods are {}'.format( + interpolation, + ', '.join(_PIL_INTERPOLATION_METHODS.keys()))) + resample = _PIL_INTERPOLATION_METHODS[interpolation] + img = img.resize(width_height_tuple, resample) return img -def list_pictures(directory, ext='jpg|jpeg|bmp|png'): +def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'): return [ os.path.join(root, f) for root, _, files in os.walk(directory) for f in files @@ -401,7 +429,7 @@ class ImageDataGenerator(object): zoom_range: amount of zoom. if scalar z, zoom will be randomly picked in the range [1-z, 1+z]. A sequence of two can be passed instead to select this range. - channel_shift_range: shift range for each channels. + channel_shift_range: shift range for each channel. fill_mode: points outside the boundaries are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default is 'nearest'. @@ -558,12 +586,10 @@ class ImageDataGenerator(object): x = self.preprocessing_function(x) if self.rescale: x *= self.rescale - # x is a single image, so it doesn't have image number at index 0 - img_channel_axis = self.channel_axis - 1 if self.samplewise_center: - x -= np.mean(x, axis=img_channel_axis, keepdims=True) + x -= np.mean(x, keepdims=True) if self.samplewise_std_normalization: - x /= (np.std(x, axis=img_channel_axis, keepdims=True) + 1e-7) + x /= np.std(x, keepdims=True) + 1e-7 if self.featurewise_center: if self.mean is not None: @@ -762,49 +788,76 @@ class ImageDataGenerator(object): np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T) -class Iterator(object): - """Abstract base class for image data iterators. +class Iterator(Sequence): + """Base class for image data iterators. + + Every `Iterator` must implement the `_get_batches_of_transformed_samples` + method. Arguments: - n: Integer, total number of samples in the dataset to loop over. - batch_size: Integer, size of a batch. - shuffle: Boolean, whether to shuffle the data between epochs. - seed: Random seeding for data shuffling. + n: Integer, total number of samples in the dataset to loop over. + batch_size: Integer, size of a batch. + shuffle: Boolean, whether to shuffle the data between epochs. + seed: Random seeding for data shuffling. """ def __init__(self, n, batch_size, shuffle, seed): self.n = n self.batch_size = batch_size + self.seed = seed self.shuffle = shuffle self.batch_index = 0 self.total_batches_seen = 0 self.lock = threading.Lock() - self.index_generator = self._flow_index(n, batch_size, shuffle, seed) + self.index_array = None + self.index_generator = self._flow_index() + + def _set_index_array(self): + self.index_array = np.arange(self.n) + if self.shuffle: + self.index_array = np.random.permutation(self.n) + + def __getitem__(self, idx): + if idx >= len(self): + raise ValueError('Asked to retrieve element {idx}, ' + 'but the Sequence ' + 'has length {length}'.format(idx=idx, + length=len(self))) + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) + self.total_batches_seen += 1 + if self.index_array is None: + self._set_index_array() + index_array = self.index_array[self.batch_size * idx:self.batch_size * + (idx + 1)] + return self._get_batches_of_transformed_samples(index_array) + + def __len__(self): + length = int(np.ceil(self.n / float(self.batch_size))) + return np.maximum(length, 0) + + def on_epoch_end(self): + self._set_index_array() def reset(self): self.batch_index = 0 - def _flow_index(self, n, batch_size=32, shuffle=False, seed=None): + def _flow_index(self): # Ensure self.batch_index is 0. self.reset() while 1: - if seed is not None: - np.random.seed(seed + self.total_batches_seen) + if self.seed is not None: + np.random.seed(self.seed + self.total_batches_seen) if self.batch_index == 0: - index_array = np.arange(n) - if shuffle: - index_array = np.random.permutation(n) + self._set_index_array() - current_index = (self.batch_index * batch_size) % n - if n > current_index + batch_size: - current_batch_size = batch_size + current_index = (self.batch_index * self.batch_size) % self.n + if self.n > current_index + self.batch_size: self.batch_index += 1 else: - current_batch_size = n - current_index self.batch_index = 0 self.total_batches_seen += 1 - yield (index_array[current_index:current_index + current_batch_size], - current_index, current_batch_size) + yield self.index_array[current_index:current_index + self.batch_size] def __iter__(self): # pylint: disable=non-iterator-returned # Needed if we want to do something like: @@ -814,6 +867,16 @@ class Iterator(object): def __next__(self, *args, **kwargs): return self.next(*args, **kwargs) + def _get_batches_of_transformed_samples(self, index_array): + """Gets a batch of transformed samples. + + Arguments: + index_array: array of sample indices to include in batch. + Returns: + A batch of transformed samples. + """ + raise NotImplementedError + class NumpyArrayIterator(Iterator): """Iterator yielding data from a Numpy array. @@ -883,6 +946,26 @@ class NumpyArrayIterator(Iterator): super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle, seed) + def _get_batches_of_transformed_samples(self, index_array): + batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]), + dtype=K.floatx()) + for i, j in enumerate(index_array): + x = self.x[j] + x = self.image_data_generator.random_transform(x.astype(K.floatx())) + x = self.image_data_generator.standardize(x) + batch_x[i] = x + if self.save_to_dir: + for i, j in enumerate(index_array): + img = array_to_img(batch_x[i], self.data_format, scale=True) + fname = '{prefix}_{index}_{hash}.{format}'.format( + prefix=self.save_prefix, index=j, hash=np.random.randint(1e4), + format=self.save_format) + img.save(os.path.join(self.save_to_dir, fname)) + if self.y is None: + return batch_x + batch_y = self.y[index_array] + return batch_x, batch_y + def next(self): """For python 2.x. @@ -892,30 +975,10 @@ class NumpyArrayIterator(Iterator): # Keeps under lock only the mechanism which advances # the indexing of each batch. with self.lock: - index_array, current_index, current_batch_size = next( - self.index_generator) + index_array = next(self.index_generator) # The transformation of images is not under thread lock # so it can be done in parallel - batch_x = np.zeros( - tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx()) - for i, j in enumerate(index_array): - x = self.x[j] - x = self.image_data_generator.random_transform(x.astype(K.floatx())) - x = self.image_data_generator.standardize(x) - batch_x[i] = x - if self.save_to_dir: - for i in range(current_batch_size): - img = array_to_img(batch_x[i], self.data_format, scale=True) - fname = '{prefix}_{index}_{hash}.{format}'.format( - prefix=self.save_prefix, - index=current_index + i, - hash=np.random.randint(1e4), - format=self.save_format) - img.save(os.path.join(self.save_to_dir, fname)) - if self.y is None: - return batch_x - batch_y = self.y[index_array] - return batch_x, batch_y + return self._get_batches_of_transformed_samples(index_array) def _count_valid_files_in_directory(directory, white_list_formats, @@ -939,7 +1002,7 @@ def _count_valid_files_in_directory(directory, white_list_formats, samples = 0 for _, _, files in _recursive_list(directory): - for fname in files: + for fname in sorted(files): is_valid = False for extension in white_list_formats: if fname.lower().endswith('.' + extension): @@ -1006,7 +1069,7 @@ class DirectoryIterator(Iterator): to use for random transformations and normalization. target_size: tuple of integers, dimensions to resize input images to. color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images. - classes: Optional list of strings, names of sudirectories + classes: Optional list of strings, names of subdirectories containing images from each class (e.g. `["dogs", "cats"]`). It will be computed automatically if not set. class_mode: Mode for yielding the targets: @@ -1086,7 +1149,7 @@ class DirectoryIterator(Iterator): for subdir in sorted(os.listdir(directory)): if os.path.isdir(os.path.join(directory, subdir)): classes.append(subdir) - self.num_class = len(classes) + self.num_classes = len(classes) self.class_indices = dict(zip(classes, range(len(classes)))) pool = multiprocessing.pool.ThreadPool() @@ -1099,7 +1162,7 @@ class DirectoryIterator(Iterator): for subdir in classes))) print('Found %d images belonging to %d classes.' % (self.samples, - self.num_class)) + self.num_classes)) # second, build an index of the images in the different class subfolders results = [] @@ -1121,39 +1184,25 @@ class DirectoryIterator(Iterator): super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed) - def next(self): - """For python 2.x. - - Returns: - The next batch. - """ - with self.lock: - index_array, current_index, current_batch_size = next( - self.index_generator) - # The transformation of images is not under thread lock - # so it can be done in parallel - batch_x = np.zeros( - (current_batch_size,) + self.image_shape, dtype=K.floatx()) + def _get_batches_of_transformed_samples(self, index_array): + batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx()) grayscale = self.color_mode == 'grayscale' # build batch of image data for i, j in enumerate(index_array): fname = self.filenames[j] - img = load_img( - os.path.join(self.directory, fname), - grayscale=grayscale, - target_size=self.target_size) + img = load_img(os.path.join(self.directory, fname), + grayscale=grayscale, + target_size=self.target_size) x = img_to_array(img, data_format=self.data_format) x = self.image_data_generator.random_transform(x) x = self.image_data_generator.standardize(x) batch_x[i] = x # optionally save augmented images to disk for debugging purposes if self.save_to_dir: - for i in range(current_batch_size): + for i, j in enumerate(index_array): img = array_to_img(batch_x[i], self.data_format, scale=True) fname = '{prefix}_{index}_{hash}.{format}'.format( - prefix=self.save_prefix, - index=current_index + i, - hash=np.random.randint(1e4), + prefix=self.save_prefix, index=j, hash=np.random.randint(1e7), format=self.save_format) img.save(os.path.join(self.save_to_dir, fname)) # build batch of labels @@ -1164,9 +1213,22 @@ class DirectoryIterator(Iterator): elif self.class_mode == 'binary': batch_y = self.classes[index_array].astype(K.floatx()) elif self.class_mode == 'categorical': - batch_y = np.zeros((len(batch_x), self.num_class), dtype=K.floatx()) + batch_y = np.zeros((len(batch_x), self.num_classes), dtype=K.floatx()) for i, label in enumerate(self.classes[index_array]): batch_y[i, label] = 1. else: return batch_x return batch_x, batch_y + + def next(self): + """For python 2.x. + + Returns: + The next batch. + """ + with self.lock: + index_array = next(self.index_generator) + # The transformation of images is not under thread lock + # so it can be done in parallel + return self._get_batches_of_transformed_samples(index_array) + diff --git a/tensorflow/python/keras/_impl/keras/preprocessing/image_test.py b/tensorflow/python/keras/_impl/keras/preprocessing/image_test.py index 19693410e76..c0790b5a514 100644 --- a/tensorflow/python/keras/_impl/keras/preprocessing/image_test.py +++ b/tensorflow/python/keras/_impl/keras/preprocessing/image_test.py @@ -192,6 +192,8 @@ class TestImage(test.TestCase): _ = keras.preprocessing.image.load_img(fname) _ = keras.preprocessing.image.load_img(fname, grayscale=True) _ = keras.preprocessing.image.load_img(fname, target_size=(10, 10)) + _ = keras.preprocessing.image.load_img(fname, target_size=(10, 10), + interpolation='bilinear') # create iterator generator = keras.preprocessing.image.ImageDataGenerator() diff --git a/tensorflow/python/keras/_impl/keras/preprocessing/sequence.py b/tensorflow/python/keras/_impl/keras/preprocessing/sequence.py index a5deec87af7..642f4f2face 100644 --- a/tensorflow/python/keras/_impl/keras/preprocessing/sequence.py +++ b/tensorflow/python/keras/_impl/keras/preprocessing/sequence.py @@ -169,7 +169,7 @@ def skipgrams(sequence, integers (eg. [0, 1, 1 .. ]), if True labels will be categorical eg. [[1,0],[0,1],[0,1] .. ] sampling_table: 1D array of size `vocabulary_size` where the entry i - encodes the probabibily to sample a word of rank i. + encodes the probability to sample a word of rank i. seed: Random seed. Returns: diff --git a/tensorflow/python/keras/_impl/keras/utils/__init__.py b/tensorflow/python/keras/_impl/keras/utils/__init__.py index 78f325cf619..370ae0dd0f0 100644 --- a/tensorflow/python/keras/_impl/keras/utils/__init__.py +++ b/tensorflow/python/keras/_impl/keras/utils/__init__.py @@ -30,6 +30,7 @@ from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object from tensorflow.python.keras._impl.keras.utils.io_utils import HDF5Matrix from tensorflow.python.keras._impl.keras.utils.layer_utils import convert_all_kernels_in_model +from tensorflow.python.keras._impl.keras.utils.layer_utils import print_summary from tensorflow.python.keras._impl.keras.utils.np_utils import normalize from tensorflow.python.keras._impl.keras.utils.np_utils import to_categorical from tensorflow.python.keras._impl.keras.utils.training_utils import multi_gpu_model diff --git a/tensorflow/python/keras/_impl/keras/utils/data_utils.py b/tensorflow/python/keras/_impl/keras/utils/data_utils.py index 0ede7f12f2c..b3a1f640423 100644 --- a/tensorflow/python/keras/_impl/keras/utils/data_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/data_utils.py @@ -70,15 +70,15 @@ if sys.version_info[0] == 2: if content_type is not None: total_size = int(content_type.strip()) count = 0 - while 1: + while True: chunk = response.read(chunk_size) count += 1 - if not chunk: - reporthook(count, total_size, total_size) - break - if reporthook: + if reporthook is not None: reporthook(count, chunk_size, total_size) - yield chunk + if chunk: + yield chunk + else: + break response = urlopen(url, data) with open(filename, 'wb') as fd: @@ -262,9 +262,9 @@ def _hash_file(fpath, algorithm='sha256', chunk_size=65535): Example: ```python - >>> from keras.data_utils import _hash_file - >>> _hash_file('/path/to/file.zip') - 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + >>> from keras.data_utils import _hash_file + >>> _hash_file('/path/to/file.zip') + 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` Arguments: @@ -318,32 +318,35 @@ class Sequence(object): """Base object for fitting to a sequence of data, such as a dataset. Every `Sequence` must implements the `__getitem__` and the `__len__` methods. + If you want to modify your dataset between epochs you may implement + `on_epoch_end`. The method `__getitem__` should return a complete batch. + Notes: + `Sequence` are a safer way to do multiprocessing. This structure guarantees + that the network will only train once on each sample per epoch which is not + the case with generators. Examples: - ```python - from skimage.io import imread - from skimage.transform import resize - import numpy as np - - # Here, `x_set` is list of path to the images - # and `y_set` are the associated classes. - - class CIFAR10Sequence(Sequence): - def __init__(self, x_set, y_set, batch_size): - self.X,self.y = x_set,y_set - self.batch_size = batch_size - - def __len__(self): - return len(self.X) // self.batch_size - - def __getitem__(self,idx): - batch_x = self.X[idx*self.batch_size:(idx+1)*self.batch_size] - batch_y = self.y[idx*self.batch_size:(idx+1)*self.batch_size] - - return np.array([ - resize(imread(file_name), (200,200)) - for file_name in batch_x]), np.array(batch_y) + from skimage.io import imread + from skimage.transform import resize + import numpy as np + import math + # Here, `x_set` is list of path to the images + # and `y_set` are the associated classes. + class CIFAR10Sequence(Sequence): + def __init__(self, x_set, y_set, batch_size): + self.x, self.y = x_set, y_set + self.batch_size = batch_size + def __len__(self): + return math.ceil(len(self.x) / self.batch_size) + def __getitem__(self, idx): + batch_x = self.x[idx * self.batch_size:(idx + 1) * + self.batch_size] + batch_y = self.y[idx * self.batch_size:(idx + 1) * + self.batch_size] + return np.array([ + resize(imread(file_name), (200, 200)) + for file_name in batch_x]), np.array(batch_y) ``` """ @@ -372,7 +375,7 @@ class Sequence(object): def on_epoch_end(self): """Method called at the end of every epoch. """ - raise NotImplementedError + pass def get_index(ds, i): @@ -397,13 +400,13 @@ class SequenceEnqueuer(object): Examples: ```python - enqueuer = SequenceEnqueuer(...) - enqueuer.start() - datas = enqueuer.get() - for data in datas: - # Use the inputs; training, evaluating, predicting. - # ... stop sometime. - enqueuer.close() + enqueuer = SequenceEnqueuer(...) + enqueuer.start() + datas = enqueuer.get() + for data in datas: + # Use the inputs; training, evaluating, predicting. + # ... stop sometime. + enqueuer.close() ``` The `enqueuer.get()` should be an infinite stream of datas. @@ -549,28 +552,31 @@ class OrderedEnqueuer(SequenceEnqueuer): class GeneratorEnqueuer(SequenceEnqueuer): """Builds a queue out of a data generator. + The provided generator can be finite in which case the class will throw + a `StopIteration` exception. + Used in `fit_generator`, `evaluate_generator`, `predict_generator`. Arguments: - generator: a generator function which endlessly yields data + generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, - will be incremented by one for each workers. + will be incremented by one for each worker. """ def __init__(self, generator, use_multiprocessing=False, wait_time=0.05, - random_seed=None): + seed=None): self.wait_time = wait_time self._generator = generator self._use_multiprocessing = use_multiprocessing self._threads = [] self._stop_event = None self.queue = None - self.random_seed = random_seed + self.seed = seed def start(self, workers=1, max_queue_size=10): """Kicks off threads which add data from the generator into the queue. @@ -589,6 +595,8 @@ class GeneratorEnqueuer(SequenceEnqueuer): self.queue.put(generator_output) else: time.sleep(self.wait_time) + except StopIteration: + break except Exception: self._stop_event.set() raise @@ -605,11 +613,11 @@ class GeneratorEnqueuer(SequenceEnqueuer): if self._use_multiprocessing: # Reset random seed else all children processes # share the same seed - np.random.seed(self.random_seed) + np.random.seed(self.seed) thread = multiprocessing.Process(target=data_generator_task) thread.daemon = True - if self.random_seed is not None: - self.random_seed += 1 + if self.seed is not None: + self.seed += 1 else: thread = threading.Thread(target=data_generator_task) self._threads.append(thread) @@ -661,4 +669,8 @@ class GeneratorEnqueuer(SequenceEnqueuer): if inputs is not None: yield inputs else: - time.sleep(self.wait_time) + all_finished = all([not thread.is_alive() for thread in self._threads]) + if all_finished and self.queue.empty(): + raise StopIteration() + else: + time.sleep(self.wait_time) diff --git a/tensorflow/python/keras/_impl/keras/utils/generic_utils.py b/tensorflow/python/keras/_impl/keras/utils/generic_utils.py index 39a10c8650f..efa79b1612f 100644 --- a/tensorflow/python/keras/_impl/keras/utils/generic_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/generic_utils.py @@ -43,7 +43,7 @@ class CustomObjectScope(object): Example: - Consider a custom object `MyObject` + Consider a custom object `MyObject` (e.g. a class): ```python with CustomObjectScope({'MyObject':MyObject}): @@ -271,6 +271,8 @@ class Progbar(object): self.total_width = 0 self.seen_so_far = 0 self.verbose = verbose + self._dynamic_display = (sys.stdout.isatty() or + 'ipykernel' in sys.modules) def update(self, current, values=None, force=False): """Updates the progress bar. @@ -294,18 +296,23 @@ class Progbar(object): self.seen_so_far = current now = time.time() + info = ' - %.0fs' % (now - self.start) if self.verbose == 1: - if not force and (now - self.last_update) < self.interval: + if (not force and (now - self.last_update) < self.interval and + current < self.target): return prev_total_width = self.total_width - sys.stdout.write('\b' * prev_total_width) - sys.stdout.write('\r') + if self._dynamic_display: + sys.stdout.write('\b' * prev_total_width) + sys.stdout.write('\r') + else: + sys.stdout.write('\n') - if self.target is not -1: + if self.target is not None: numdigits = int(np.floor(np.log10(self.target))) + 1 - barstr = '%%%dd/%%%dd [' % (numdigits, numdigits) - bar = barstr % (current, self.target) + barstr = '%%%dd/%d [' % (numdigits, self.target) + bar = barstr % current prog = float(current) / self.target prog_width = int(self.width * prog) if prog_width > 0: @@ -318,17 +325,35 @@ class Progbar(object): bar += ']' sys.stdout.write(bar) self.total_width = len(bar) + else: + bar = '%7d/Unknown' % current + + self.total_width = len(bar) + sys.stdout.write(bar) if current: time_per_unit = (now - self.start) / current else: time_per_unit = 0 - eta = time_per_unit * (self.target - current) - info = '' - if current < self.target and self.target is not -1: - info += ' - ETA: %ds' % eta + if self.target is not None and current < self.target: + eta = time_per_unit * (self.target - current) + if eta > 3600: + eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, + eta % 60) + elif eta > 60: + eta_format = '%d:%02d' % (eta // 60, eta % 60) + else: + eta_format = '%ds' % eta + + info = ' - ETA: %s' % eta_format else: - info += ' - %ds' % (now - self.start) + if time_per_unit >= 1: + info += ' %.0fs/step' % time_per_unit + elif time_per_unit >= 1e-3: + info += ' %.0fms/step' % (time_per_unit * 1e3) + else: + info += ' %.0fus/step' % (time_per_unit * 1e6) + for k in self.unique_values: info += ' - %s:' % k if isinstance(self.sum_values[k], list): @@ -342,7 +367,9 @@ class Progbar(object): self.total_width += len(info) if prev_total_width > self.total_width: - info += ((prev_total_width - self.total_width) * ' ') + info += (' ' * (prev_total_width - self.total_width)) + if self.target is not None and current >= self.target: + info += '\n' sys.stdout.write(info) sys.stdout.flush() @@ -350,17 +377,20 @@ class Progbar(object): if current >= self.target: sys.stdout.write('\n') - if self.verbose == 2: - if current >= self.target: - info = '%ds' % (now - self.start) + elif self.verbose == 2: + if self.target is None or current >= self.target: for k in self.unique_values: info += ' - %s:' % k - avg = np.mean(self.sum_values[k][0] / max(1, self.sum_values[k][1])) + avg = np.mean( + self.sum_values[k][0] / max(1, self.sum_values[k][1])) if avg > 1e-3: info += ' %.4f' % avg else: info += ' %.4e' % avg - sys.stdout.write(info + '\n') + info += '\n' + + sys.stdout.write(info) + sys.stdout.flush() self.last_update = now diff --git a/tensorflow/python/keras/_impl/keras/wrappers/scikit_learn.py b/tensorflow/python/keras/_impl/keras/wrappers/scikit_learn.py index ac7bd494062..31ef4773ad6 100644 --- a/tensorflow/python/keras/_impl/keras/wrappers/scikit_learn.py +++ b/tensorflow/python/keras/_impl/keras/wrappers/scikit_learn.py @@ -352,5 +352,5 @@ class KerasRegressor(BaseWrapper): kwargs = self.filter_sk_params(Sequential.evaluate, kwargs) loss = self.model.evaluate(x, y, **kwargs) if isinstance(loss, list): - return loss[0] - return loss + return -loss[0] + return -loss diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt index 5076434dbb5..04fe46cedcb 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt @@ -153,7 +153,7 @@ tf_class { } member_method { name: "compile" - argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'sample_weight_mode\', \'weighted_metrics\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\'], " } member_method { name: "compute_mask" @@ -177,7 +177,7 @@ tf_class { } member_method { name: "fit_generator" - argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'initial_epoch\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'0\'], " + argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], " } member_method { name: "from_config" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt index 5034fdff2a6..3946ff4d5f1 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt @@ -153,7 +153,7 @@ tf_class { } member_method { name: "compile" - argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'sample_weight_mode\', \'weighted_metrics\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\'], " + argspec: "args=[\'self\', \'optimizer\', \'loss\', \'metrics\', \'sample_weight_mode\', \'weighted_metrics\', \'target_tensors\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'None\'], " } member_method { name: "compute_mask" @@ -177,7 +177,7 @@ tf_class { } member_method { name: "fit_generator" - argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'initial_epoch\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'0\'], " + argspec: "args=[\'self\', \'generator\', \'steps_per_epoch\', \'epochs\', \'verbose\', \'callbacks\', \'validation_data\', \'validation_steps\', \'class_weight\', \'max_queue_size\', \'workers\', \'use_multiprocessing\', \'shuffle\', \'initial_epoch\'], varargs=None, keywords=kwargs, defaults=[\'1\', \'1\', \'None\', \'None\', \'None\', \'None\', \'10\', \'1\', \'False\', \'True\', \'0\'], " } member_method { name: "from_config" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt index 8ad1f32551d..66cd37bb3a3 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-directory-iterator.pbtxt @@ -2,6 +2,7 @@ path: "tensorflow.keras.preprocessing.image.DirectoryIterator" tf_class { is_instance: "" is_instance: "" + is_instance: "" is_instance: "" member_method { name: "__init__" @@ -11,6 +12,10 @@ tf_class { name: "next" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "on_epoch_end" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "reset" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt index d30462a8eb6..69488d63bf1 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-iterator.pbtxt @@ -1,11 +1,16 @@ path: "tensorflow.keras.preprocessing.image.Iterator" tf_class { is_instance: "" + is_instance: "" is_instance: "" member_method { name: "__init__" argspec: "args=[\'self\', \'n\', \'batch_size\', \'shuffle\', \'seed\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "on_epoch_end" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "reset" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt index 841f1c5585e..4ef6e6e99e3 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.-numpy-array-iterator.pbtxt @@ -2,6 +2,7 @@ path: "tensorflow.keras.preprocessing.image.NumpyArrayIterator" tf_class { is_instance: "" is_instance: "" + is_instance: "" is_instance: "" member_method { name: "__init__" @@ -11,6 +12,10 @@ tf_class { name: "next" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" } + member_method { + name: "on_epoch_end" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } member_method { name: "reset" argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt index 56526870335..d28fef69651 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.preprocessing.image.pbtxt @@ -34,7 +34,7 @@ tf_module { } member_method { name: "load_img" - argspec: "args=[\'path\', \'grayscale\', \'target_size\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], " + argspec: "args=[\'path\', \'grayscale\', \'target_size\', \'interpolation\'], varargs=None, keywords=None, defaults=[\'False\', \'None\', \'nearest\'], " } member_method { name: "random_channel_shift" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.utils.-generator-enqueuer.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.utils.-generator-enqueuer.pbtxt index bf27a97cf25..1c5868e711b 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.utils.-generator-enqueuer.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.utils.-generator-enqueuer.pbtxt @@ -5,7 +5,7 @@ tf_class { is_instance: "" member_method { name: "__init__" - argspec: "args=[\'self\', \'generator\', \'use_multiprocessing\', \'wait_time\', \'random_seed\'], varargs=None, keywords=None, defaults=[\'False\', \'0.05\', \'None\'], " + argspec: "args=[\'self\', \'generator\', \'use_multiprocessing\', \'wait_time\', \'seed\'], varargs=None, keywords=None, defaults=[\'False\', \'0.05\', \'None\'], " } member_method { name: "get" From 8ad5cc00f21eb9d6f1811d7ed771f6f042dba1ba Mon Sep 17 00:00:00 2001 From: Jacques Pienaar Date: Tue, 14 Nov 2017 17:08:49 -0800 Subject: [PATCH 039/104] [TFXLA] Add source node and make GetSwitchCluster more conservative. PiperOrigin-RevId: 175758538 --- .../tf2xla/functionalize_control_flow.cc | 55 ++++++++++--------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/tensorflow/compiler/tf2xla/functionalize_control_flow.cc b/tensorflow/compiler/tf2xla/functionalize_control_flow.cc index 6ef4860f358..40a484da098 100644 --- a/tensorflow/compiler/tf2xla/functionalize_control_flow.cc +++ b/tensorflow/compiler/tf2xla/functionalize_control_flow.cc @@ -731,11 +731,12 @@ string DebugString(const Graph& graph, FunctionalizeCond::ClusterHandle::Vector* clusters) { string ret = "digraph {\ncompound=true;labeljust=\"r\";ranksep=0.24\n"; std::map subgraphs; + auto name = [](const Node* n) { + return strings::StrCat(n->type_string(), "_", n->id()); + }; for (Node* n : graph.nodes()) { - if (n->IsOp()) { - strings::StrAppend(&subgraphs[clusters->at(n).Get()], n->id(), - " [label=\"", n->name(), "\"];\n"); - } + strings::StrAppend(&subgraphs[clusters->at(n).Get()], n->id(), " [label=\"", + name(n), "\"];\n"); } for (auto kv : subgraphs) { strings::StrAppend(&ret, "subgraph cluster_", kv.first.ToString(), " {\n", @@ -743,16 +744,11 @@ string DebugString(const Graph& graph, kv.first.ToString(), "\";\n", kv.second, "}\n"); } for (Node* n : graph.nodes()) { - if (!n->IsOp()) { - continue; - } for (Node* in : n->in_nodes()) { - if (in->IsOp()) { - strings::StrAppend(&ret, in->id(), " -> ", n->id(), ";\n"); - } + strings::StrAppend(&ret, in->id(), " -> ", n->id(), ";\n"); } } - return strings::StrCat(ret, "}"); + return strings::StrCat(ret, "} // end"); } string DebugString(const FunctionalizeCond::ClusteredGraph& clustered_graph) { @@ -761,16 +757,24 @@ string DebugString(const FunctionalizeCond::ClusteredGraph& clustered_graph) { return cluster.representative.ToString(); }; for (auto kv : clustered_graph) { - strings::StrAppend(&ret, kv.first.ToString(), " [label=\"", name(kv.second), - " (", kv.second.switch_nodes.size(), ", ", - kv.second.merge_nodes.size(), ")\"];\n"); + if (!kv.second.switch_nodes.empty() || !kv.second.merge_nodes.empty()) { + strings::StrAppend( + &ret, kv.first.ToString(), " [label=\"", name(kv.second), + kv.second.switch_nodes.empty() + ? "" + : strings::StrCat(" switches=", kv.second.switch_nodes.size()), + kv.second.merge_nodes.empty() + ? "" + : strings::StrCat(" merges=", kv.second.merge_nodes.size()), + "\"];\n"); + } } for (auto kv : clustered_graph) { for (auto in : kv.second.in_nodes) { strings::StrAppend(&ret, name(*in), " -> ", name(kv.second), ";\n"); } } - return strings::StrCat(ret, "}"); + return strings::StrCat(ret, "} // end"); } bool IsDeadSwitch(const Node* node) { @@ -790,9 +794,6 @@ bool IsDeadSwitch(const Node* node) { void FunctionalizeCond::CreateClusters() { for (Node* node : graph_->nodes()) { - if (!node->IsOp()) { - continue; - } if (IsSwitch(node)) { switch_nodes_.insert(node); } else if (IsMerge(node)) { @@ -825,6 +826,10 @@ void FunctionalizeCond::CreateClusters() { clusters_.at(node).Merge(&clusters_.at(in)); } } + // Group all source clusters together. + if (node->IsSource() || node->in_edges().empty()) { + clusters_.at(node).Merge(&clusters_.at(ClusterHandle(Graph::kSourceId))); + } } } @@ -876,7 +881,7 @@ void FunctionalizeCond::CreateClusteredGraph() { for (const Node* in : node->in_nodes()) { ClusterHandle other_repr = Representative(in); // Skip source, sink and internal edges. - if (!in->IsOp() || other_repr == repr) { + if (other_repr == repr) { continue; } Cluster& cluster_node_in = clustered_graph_[other_repr]; @@ -887,7 +892,7 @@ void FunctionalizeCond::CreateClusteredGraph() { for (const Node* out : node->out_nodes()) { ClusterHandle other_repr = Representative(out); // Skip source, sink and internal edges. - if (!out->IsOp() || other_repr == repr) { + if (other_repr == repr) { continue; } Cluster& cluster_node_out = clustered_graph_[other_repr]; @@ -897,6 +902,7 @@ void FunctionalizeCond::CreateClusteredGraph() { } return cluster_node; }; + update_cluster_for_node(graph_->source_node()); for (Node* node : switch_nodes_) { update_cluster_for_node(node).switch_nodes.insert(node); } @@ -955,7 +961,7 @@ gtl::optional FunctionalizeCond::GetSwitchCluster( for (Cluster* in : merge_cluster.in_nodes) { Cluster* cluster = in; if (in->switch_nodes.empty()) { - if (in->in_nodes.size() != 1) { + if (in->in_nodes.size() != 1 || in->out_nodes.size() != 1) { return gtl::nullopt; } // There is only a single `in` cluster. @@ -1292,11 +1298,8 @@ std::vector> FunctionalizeCond::SortedMergeNodes() { VLOG(2) << "ProcessClusteredGraph"; std::stack> stack; - for (auto& c : clustered_graph_) { - if (c.second.in_nodes.empty()) { - stack.push({0, &c.second}); - } - } + // Initialize with the source node. + stack.push({0, &clustered_graph_[ClusterHandle(Graph::kSourceId)]}); // Perform a depth-first traversal of the clustered graph computing the // switch-merge depth. From d16d8495d024e531b34d88745f99679414992fc2 Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Tue, 14 Nov 2017 17:10:33 -0800 Subject: [PATCH 040/104] python3 fixes PiperOrigin-RevId: 175758757 --- tensorflow/python/kernel_tests/template_test.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tensorflow/python/kernel_tests/template_test.py b/tensorflow/python/kernel_tests/template_test.py index 798bd0fe894..40c0ade62a8 100644 --- a/tensorflow/python/kernel_tests/template_test.py +++ b/tensorflow/python/kernel_tests/template_test.py @@ -513,8 +513,8 @@ class TemplateTest(test.TestCase): tb = template.make_template("s", function_with_create, trainable=False) # Initially there are not variables created. - self.assertEqual([], ta.global_variables) - self.assertEqual([], tb.global_variables) + self.assertEqual([], list(ta.global_variables)) + self.assertEqual([], list(tb.global_variables)) # After calling there are variables created. ta() tb() @@ -531,8 +531,8 @@ class TemplateTest(test.TestCase): tb = template.make_template("bar", variable_scoped_function, True) # Initially there are not variables created. - self.assertEqual([], ta.trainable_variables) - self.assertEqual([], tb.trainable_variables) + self.assertEqual([], list(ta.trainable_variables)) + self.assertEqual([], list(tb.trainable_variables)) # After calling there are variables created. ta() tb() @@ -550,8 +550,8 @@ class TemplateTest(test.TestCase): variable_scoped_function_with_local_variable) # Initially there are not variables created. - self.assertEqual([], ta.local_variables) - self.assertEqual([], tb.local_variables) + self.assertEqual([], list(ta.local_variables)) + self.assertEqual([], list(tb.local_variables)) # After calling there are variables created. ta() tb() From 5936396ceb7bf5e0f45d8c2364bba51901124b54 Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Tue, 14 Nov 2017 17:56:15 -0800 Subject: [PATCH 041/104] Benchmark for tfe_py_execute(identity), for reference with the other identity benchmarks PiperOrigin-RevId: 175763756 --- tensorflow/python/eager/benchmarks_test.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tensorflow/python/eager/benchmarks_test.py b/tensorflow/python/eager/benchmarks_test.py index 435505edd74..9849f0f322e 100644 --- a/tensorflow/python/eager/benchmarks_test.py +++ b/tensorflow/python/eager/benchmarks_test.py @@ -170,6 +170,18 @@ class MicroBenchmarks(test.Benchmark): m = self._m_2 self._run(lambda: gen_array_ops.identity(m), 30000) + def benchmark_tfe_py_execute_identity(self): + m = self._m_2 + ctx_handle = context.context()._handle + attrs = ("T", self._m_2.dtype.as_datatype_enum) + inputs = [m] + + def f(): + pywrap_tensorflow.TFE_Py_Execute( + ctx_handle, None, "Identity", inputs, attrs, 1) + + self._run(f, 30000) + def benchmark_tf_gradient_function_identity(self): m = self._m_2 self._run( From 4c9b4ebfcb40b8a7b3fe11411f6b91c9de326c56 Mon Sep 17 00:00:00 2001 From: Mark Heffernan Date: Tue, 14 Nov 2017 18:12:07 -0800 Subject: [PATCH 042/104] Fix a few issues with HloValue and dataflow analysis identified when debugging the causes for the rollback of the new copy insertion (cl/174423881): (1) Mark values for deletion during dataflow propagation and delete later, rather than delete immediately. It was possible for a value to be deleted (a phi is optimized away), and still have references to it in the value sets. (2) Make call/while and root instructions explicit uses of the values which reach them. This subsumes the need for the HloValue::live_out_of_computation_ property which was buggy (which computation is it live-out of?). (3) Delete unused methods HloValue::RecomputeUses and HloValue::RemovePosition. PiperOrigin-RevId: 175765613 --- .../xla/service/hlo_dataflow_analysis.cc | 82 +++++++--- .../xla/service/hlo_dataflow_analysis.h | 22 ++- .../xla/service/hlo_dataflow_analysis_test.cc | 63 ++++---- .../compiler/xla/service/hlo_ordering.cc | 30 ++-- tensorflow/compiler/xla/service/hlo_value.cc | 141 ++++++------------ tensorflow/compiler/xla/service/hlo_value.h | 19 +-- 6 files changed, 168 insertions(+), 189 deletions(-) diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc index ff80f18bb56..3f34b9ceb34 100644 --- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc +++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.cc @@ -75,11 +75,43 @@ HloValue* HloDataflowAnalysis::NewHloValue(HloInstruction* instruction, std::forward_as_tuple(value_id, instruction, index, is_phi)); CHECK(emplaced.second); + VLOG(4) << "NewHloValue = " << emplaced.first->second.ToShortString(); + return &emplaced.first->second; } -void HloDataflowAnalysis::DeleteHloValue(HloValue::Id value_id) { - values_.erase(value_id); +void HloDataflowAnalysis::MarkValueForDeletion(HloValue::Id value_id) { + HloValue& value = values_.at(value_id); + VLOG(4) << "MarkValueForDeletion(" << value.ToShortString() << ")"; + + value_ids_to_delete_.push_back(value_id); +} + +void HloDataflowAnalysis::DeleteMarkedValues() { +#ifndef NDEBUG + // Verify that no marked-for-deletion values are in any of the value sets. + tensorflow::gtl::FlatSet id_set(value_ids_to_delete_.begin(), + value_ids_to_delete_.end()); + for (const auto& pair : value_sets_) { + const HloInstruction* instruction = pair.first; + const InstructionValueSet& instruction_value_set = pair.second; + for (const auto& index_value_set : instruction_value_set) { + const HloValueSet& value_set = index_value_set.second; + for (const HloValue* value : value_set.values()) { + DCHECK(!ContainsKey(id_set, value->id())) + << "Value " << value->ToShortString() + << " marked for deletion, but still exists in value set for " + "instruction " + << instruction->name(); + } + } + } +#endif + + for (HloValue::Id value_id : value_ids_to_delete_) { + values_.erase(value_id); + } + value_ids_to_delete_.clear(); } string HloDataflowAnalysis::ToString() const { @@ -121,6 +153,7 @@ bool HloDataflowAnalysis::Phi( HloInstruction* instruction, tensorflow::gtl::ArraySlice inputs) { CHECK(ssa_form_); + VLOG(4) << "Phi(" << instruction->name() << ")"; for (const InstructionValueSet* input : inputs) { DCHECK(ShapeUtil::Compatible(instruction->shape(), input->shape())); @@ -183,7 +216,7 @@ bool HloDataflowAnalysis::Phi( } else if (current_value != &new_value) { if (current_value_defined_here) { // Remove the existing phi. - DeleteHloValue(current_value->id()); + MarkValueForDeletion(current_value->id()); } value_set.Clear(); value_set.AddValue(&new_value); @@ -193,7 +226,8 @@ bool HloDataflowAnalysis::Phi( // Multiple distinct values reach this point. A phi value is // necessary. CHECK_GT(input_value_ids.size(), 1); - if (current_value == nullptr || !current_value->is_phi()) { + if (current_value == nullptr || + !(current_value->is_phi() && current_value_defined_here)) { value_set.Clear(); value_set.AddValue(NewHloValue(instruction, index, /*is_phi=*/true)); changed = true; @@ -485,11 +519,13 @@ bool HloDataflowAnalysis::UpdateInstructionValueSet( } } -void HloDataflowAnalysis::UpdateInstructionsAndPropagate( - tensorflow::gtl::ArraySlice instructions) { +void HloDataflowAnalysis::Propagate() { std::queue worklist; - for (HloInstruction* instruction : instructions) { - worklist.push(instruction); + + for (HloComputation* computation : module_->computations()) { + for (HloInstruction* instruction : computation->instructions()) { + worklist.push(instruction); + } } while (!worklist.empty()) { @@ -662,20 +698,17 @@ StatusOr> HloDataflowAnalysis::Run( new HloDataflowAnalysis(module, ssa_form, bitcast_defines_value)); TF_RETURN_IF_ERROR(dataflow_analysis->InitializeInstructionValueSets()); + dataflow_analysis->Propagate(); - // Construct list of all instructions to initialize the worklist to propagate - // the data flow. For efficiency sort the instruction in post order so - // producers appear before consumers. - std::vector all_instructions; - for (const HloComputation* computation : module->MakeComputationPostOrder()) { - for (HloInstruction* instruction : - computation->MakeInstructionPostOrder()) { - all_instructions.push_back(instruction); - } - } - dataflow_analysis->UpdateInstructionsAndPropagate(all_instructions); + // Delete all values marked for deletion. + dataflow_analysis->DeleteMarkedValues(); - // Add in positions to all values. + // Gather and set all non-definition positions of all values. Value deletion + // is rare, so just use a vector indexed by Value::Id rather than a map from + // Value::Id to positions. There should be very few holes in the vector, and + // lookup is faster. + std::vector> value_positions( + dataflow_analysis->next_value_id_); for (const HloComputation* computation : module->computations()) { for (HloInstruction* instruction : computation->instructions()) { for (const auto& pair : @@ -684,13 +717,18 @@ StatusOr> HloDataflowAnalysis::Run( const HloValueSet& value_set = pair.second; for (const HloValue* value : value_set.values()) { if (value->defining_instruction() != instruction) { - dataflow_analysis->GetValue(value->id()) - .AddPosition(instruction, index); + value_positions[value->id()].push_back( + HloPosition{instruction, index}); } } } } } + for (auto& pair : dataflow_analysis->values_) { + HloValue::Id value_id = pair.first; + HloValue& value = pair.second; + value.SetPositionsAndComputeUses(value_positions[value_id]); + } // Construct vector of values. dataflow_analysis->values_vector_.reserve(dataflow_analysis->values_.size()); diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h index 63467f32060..dfd81ae9510 100644 --- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h +++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h @@ -126,13 +126,16 @@ class HloDataflowAnalysis { HloValue* NewHloValue(HloInstruction* instruction, const ShapeIndex& index, bool is_phi = false); - // Delete the HloValue with the given ID. - void DeleteHloValue(HloValue::Id value_id); + // Mark the HloValue with the given ID for deletion. + void MarkValueForDeletion(HloValue::Id value_id); + + // Delete all HloValues marked for deletion. Should be called after + // propagation is complete. + void DeleteMarkedValues(); // Constructs and initializes the InstructionValueSets of all instructions to // contain exactly the HloValues defined by each instruction. These values can - // then propagated throughout the HLO graph by calling - // UpdateInstructionsAndPropagate. + // then propagated throughout the HLO graph by calling Propagate. Status InitializeInstructionValueSets(); // Updates the value set of the given instruction based on the values flowing @@ -152,10 +155,8 @@ class HloDataflowAnalysis { bool UpdateTupleValueSet(HloInstruction* tuple); bool UpdateWhileValueSet(HloInstruction* xla_while); - // Update the value sets of the given instructions and propagate the - // changes to fixed point. - void UpdateInstructionsAndPropagate( - tensorflow::gtl::ArraySlice instructions); + // Propagate the dataflow through the module. + void Propagate(); // Return the result of the SSA Phi function applied to the given inputs at // the given instruction. If skip_top_level is true, then the top level of the @@ -191,6 +192,11 @@ class HloDataflowAnalysis { // A map from instruction to InstructionValueSet. std::unordered_map value_sets_; + // Values marked for deletion during construction. We don't delete them + // immediately because references to them may remain in ValueSets temporarily + // during propagation. After construction, these values are deleted. + std::vector value_ids_to_delete_; + // A vector containing all HloValues sorted by HloValue::Id. std::vector values_vector_; diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc index 66a538fc519..f08f0b1d683 100644 --- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc +++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis_test.cc @@ -211,10 +211,10 @@ TEST_P(HloDataflowAnalysisTest, NestedTuple) { HloPosition{nested_tuple, {0, 0}}, HloPosition{nested_tuple, {1, 0}}, HloPosition{nested_tuple, {2}}, HloPosition{gte_tuple, {0}}, HloPosition{gte_out, {}})); - // Constant values should have no uses though one is live out. The positions - // where they appear as operands are on instructions which do not use the - // values (eg, Tuple). - EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).uses().empty()); + // Constant values should have only a single use, which is the root of the + // computation. + EXPECT_THAT(analysis.GetValueDefinedAt(constant1, /*index=*/{}).uses(), + UnorderedElementsAre(HloUse{gte_out, 0, {0}})); EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).uses().empty()); // The top-level tuple values are used in GTE instructions. @@ -274,12 +274,11 @@ TEST_P(HloDataflowAnalysisTest, SingleCall) { EXPECT_EQ(analysis.GetUniqueValueAt(call), analysis.GetValueDefinedAt(add)); EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), - UnorderedElementsAre(HloUse{add, 0, {}})); + UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{add, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), - UnorderedElementsAre(HloUse{add, 1, {}})); + UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{add, 1, {}})); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); - EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_computation()); } TEST_P(HloDataflowAnalysisTest, ComputationCalledTwiceWithSameArguments) { @@ -323,18 +322,17 @@ TEST_P(HloDataflowAnalysisTest, ComputationCalledTwiceWithSameArguments) { EXPECT_TRUE(analysis.ValueIsDefinedAt(sub)); EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), - UnorderedElementsAre(HloUse{add, 0, {}})); + UnorderedElementsAre(HloUse{call1, 0, {}}, HloUse{call2, 0, {}}, + HloUse{add, 0, {}})); EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), - UnorderedElementsAre(HloUse{add, 1, {}})); + UnorderedElementsAre(HloUse{call1, 1, {}}, HloUse{call2, 1, {}}, + HloUse{add, 1, {}})); // The Add from the subcomputation is used as both operands of the Subtract. EXPECT_THAT(analysis.GetValueDefinedAt(add).uses(), UnorderedElementsAre(HloUse{sub, 0, {}}, HloUse{sub, 1, {}})); EXPECT_FALSE(analysis.GetValueDefinedAt(add).live_out_of_module()); - EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_computation()); - EXPECT_TRUE(analysis.GetValueDefinedAt(sub).live_out_of_module()); - EXPECT_TRUE(analysis.GetValueDefinedAt(sub).live_out_of_computation()); } TEST_P(HloDataflowAnalysisTest, ComputationCalledTwiceWithDifferentArguments) { @@ -408,7 +406,7 @@ TEST_P(HloDataflowAnalysisTest, NestedCalls) { auto outer_param1 = outer_builder.AddInstruction( HloInstruction::CreateParameter(1, scalar_shape_, "param1")); // Swizzle parameters. - outer_builder.AddInstruction(HloInstruction::CreateCall( + auto nested_call = outer_builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {outer_param1, outer_param0}, inner_computation)); HloComputation* outer_computation = module_->AddEmbeddedComputation(outer_builder.Build()); @@ -418,7 +416,7 @@ TEST_P(HloDataflowAnalysisTest, NestedCalls) { HloInstruction::CreateConstant(Literal::CreateR0(1.0))); auto constant2 = builder.AddInstruction( HloInstruction::CreateConstant(Literal::CreateR0(2.0))); - builder.AddInstruction(HloInstruction::CreateCall( + auto call = builder.AddInstruction(HloInstruction::CreateCall( scalar_shape_, {constant1, constant2}, outer_computation)); module_->AddEntryComputation(builder.Build()); @@ -431,10 +429,14 @@ TEST_P(HloDataflowAnalysisTest, NestedCalls) { // Verify that the uses of the constants are properly swizzled by parameter // permutation in nested_call. - EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), - UnorderedElementsAre(HloUse{add, 1, {}})); - EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), - UnorderedElementsAre(HloUse{add, 0, {}})); + EXPECT_THAT( + analysis.GetValueDefinedAt(constant1).uses(), + UnorderedElementsAre(HloUse{call, 0, {}}, HloUse{nested_call, 1, {}}, + HloUse{add, 1, {}})); + EXPECT_THAT( + analysis.GetValueDefinedAt(constant2).uses(), + UnorderedElementsAre(HloUse{call, 1, {}}, HloUse{nested_call, 0, {}}, + HloUse{add, 0, {}})); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); } @@ -469,7 +471,7 @@ TEST_P(HloDataflowAnalysisTest, SingleWhile) { HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1)); auto add = body_builder.AddInstruction(HloInstruction::CreateBinary( scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1)); - body_builder.AddInstruction( + auto body_root = body_builder.AddInstruction( HloInstruction::CreateTuple({body_element_0, add})); HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build()); @@ -496,8 +498,6 @@ TEST_P(HloDataflowAnalysisTest, SingleWhile) { bool ssa_form = GetParam(); const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); - EXPECT_TRUE( - analysis.GetValueDefinedAt(cond_constant).live_out_of_computation()); EXPECT_FALSE(analysis.GetValueDefinedAt(cond_constant).live_out_of_module()); if (ssa_form) { @@ -517,14 +517,14 @@ TEST_P(HloDataflowAnalysisTest, SingleWhile) { EXPECT_THAT( analysis.GetValueDefinedAt(constant1).uses(), - UnorderedElementsAre(HloUse{add, 0, {}}, HloUse{xla_while, 0, {0}})); + UnorderedElementsAre(HloUse{add, 0, {}}, HloUse{body_root, 0, {}}, + HloUse{xla_while, 0, {0}})); // Constant1 passes through the body and out of the module. EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(xla_while, /*index=*/{1}) .live_out_of_module()); - EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_computation()); EXPECT_FALSE(analysis.GetValueDefinedAt(add).live_out_of_module()); } else { // While instruction and subcomputation parameters should not define values @@ -538,7 +538,6 @@ TEST_P(HloDataflowAnalysisTest, SingleWhile) { EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_module()); - EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_computation()); } } @@ -915,9 +914,11 @@ TEST_P(HloDataflowAnalysisTest, TupleSelect) { HloUse{select12, 1, {}})); // The two constant values just pass through the Selects and are not - // used. They are live out however. - EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).uses().empty()); - EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).uses().empty()); + // used except at the root. They are live out however. + EXPECT_THAT(analysis.GetValueDefinedAt(constant1).uses(), + UnorderedElementsAre(HloUse{select1234, 1, {0}})); + EXPECT_THAT(analysis.GetValueDefinedAt(constant2).uses(), + UnorderedElementsAre(HloUse{select1234, 1, {0}})); EXPECT_TRUE(analysis.GetValueDefinedAt(constant1).live_out_of_module()); EXPECT_TRUE(analysis.GetValueDefinedAt(constant2).live_out_of_module()); } @@ -1318,7 +1319,7 @@ TEST_P(HloDataflowAnalysisTest, WhileParameters_Sequential) { auto entry = module_->AddEntryComputation(builder.Build()); bool ssa_form = GetParam(); - const HloDataflowAnalysis& analysis = RunAnalysis(ssa_form); + RunAnalysis(ssa_form); SequentialHloOrdering::HloModuleSequence sequence; sequence.insert({entry, {param, xla_while}}); @@ -1329,12 +1330,6 @@ TEST_P(HloDataflowAnalysisTest, WhileParameters_Sequential) { SequentialHloOrdering ordering(module_.get(), sequence); - // 'add' is the body root even though later instructions follow in the order - // like 'dead_negate'. Only 'add' should be live out of the computation. - EXPECT_TRUE(analysis.GetValueDefinedAt(add).live_out_of_computation()); - EXPECT_FALSE( - analysis.GetValueDefinedAt(dead_negate).live_out_of_computation()); - // 'add' is live out of the body and will interfere with an later instructions // such as 'dead_constant' and 'dead_negate'. EXPECT_TRUE(InstructionsMayInterfere(ordering, add, dead_constant)); diff --git a/tensorflow/compiler/xla/service/hlo_ordering.cc b/tensorflow/compiler/xla/service/hlo_ordering.cc index 37009369797..6f6e679a218 100644 --- a/tensorflow/compiler/xla/service/hlo_ordering.cc +++ b/tensorflow/compiler/xla/service/hlo_ordering.cc @@ -173,6 +173,19 @@ bool HloOrdering::UseIsBeforeValueDefinition( return true; } } + + // The use at a call occurs before values that are defined in the called + // computation. + if (use.instruction->opcode() == HloOpcode::kCall) { + const HloInstruction* call = use.instruction; + if (call_graph_->InstructionIsNestedIn(value.defining_instruction(), + call->to_apply())) { + VLOG(4) << " use is call " << use.instruction->name() + << " and def is in called computation"; + return true; + } + } + VLOG(4) << " use is not before value"; return false; } @@ -187,23 +200,6 @@ bool HloOrdering::LiveRangeStrictlyBefore( return false; } - // Live-out values from the module can never have ranges strictly before any - // other value. - if (a.live_out_of_module()) { - VLOG(4) << "a is live out of module"; - return false; - } - - // Live-out values of computations can never have ranges strictly before any - // other value in the computation (including values nested in - // subcomputations). - if (a.live_out_of_computation() && - call_graph_->InstructionIsNestedIn(b.defining_instruction(), - a.defining_instruction()->parent())) { - VLOG(4) << "a is live out of computation containing b"; - return false; - } - // All uses of 'a' must be before 'b' is defined. for (const HloUse& use : a.uses()) { if (!UseIsBeforeValueDefinition(use, b, dataflow)) { diff --git a/tensorflow/compiler/xla/service/hlo_value.cc b/tensorflow/compiler/xla/service/hlo_value.cc index e6cf0d37b8a..05b7dce3d1e 100644 --- a/tensorflow/compiler/xla/service/hlo_value.cc +++ b/tensorflow/compiler/xla/service/hlo_value.cc @@ -71,7 +71,7 @@ HloValue::HloValue(HloValue::Id id, HloInstruction* instruction, const ShapeIndex& index, bool is_phi) : id_(id), is_phi_(is_phi) { // The defining position is always the first element in the positions_ vector. - AddPosition(instruction, index); + positions_.push_back(HloPosition{instruction, index}); } bool HloValue::operator==(const HloValue& other) const { @@ -130,18 +130,14 @@ bool MayUseOperandValue(int64 operand_number, const ShapeIndex& index, CHECK_LE(operand_number, 2); return operand_number == 0 || index.empty(); - case HloOpcode::kCall: case HloOpcode::kTuple: // These instructions always pass through their operands transparently. return false; + case HloOpcode::kCall: case HloOpcode::kWhile: - // Though the while instructions passes through its operands, we return - // true because in SSA form there may be a Phi at the parameter of the - // while which is considered a use of its incoming value because the Phi - // input values are not passed through into the body computation. Because - // this function is used in both SSA and non-SSA forms of the analysis - // conservatively return true. + // Although call and while instructions pass through their operands, they + // are considered uses. return true; default: @@ -151,103 +147,58 @@ bool MayUseOperandValue(int64 operand_number, const ShapeIndex& index, } // namespace -void HloValue::AddPosition(HloInstruction* instruction, - const ShapeIndex& index) { - HloPosition new_position{instruction, index}; +void HloValue::SetPositionsAndComputeUses( + tensorflow::gtl::ArraySlice positions) { + CHECK_EQ(positions_.size(), 1) << "SetPositions should only be called once."; - // The new position must not already exist in positions_. + // The positions must be unique and should not contain the defining position + // as this is added at construction time. + for (const HloPosition& position_a : positions) { + DCHECK_NE(position_a, defining_position()); + for (const HloPosition& position_b : positions) { + if (&position_a != &position_b) { + DCHECK_NE(position_a, position_b); + } + } + } + + positions_.insert(positions_.end(), positions.begin(), positions.end()); + + // Gather the computation roots at which this value appears. + tensorflow::gtl::FlatSet root_positions; for (const HloPosition& position : positions_) { - DCHECK_NE(position, new_position); - } - - positions_.push_back(std::move(new_position)); - - // Update uses. - for (HloInstruction* user : instruction->users()) { - for (int64 operand_number : user->OperandIndices(instruction)) { - if (MayUseOperandValue(operand_number, index, user)) { - HloUse new_use{user, operand_number, index}; - - // The new use must not already exist in uses_. - for (const HloUse& use : uses_) { - DCHECK_NE(use, new_use); - } - - uses_.push_back(std::move(new_use)); - } + if (position.instruction == + position.instruction->parent()->root_instruction()) { + root_positions.insert(position.instruction); } } - // Update liveout status of this HloValue. - const HloModule& module = *instruction->parent()->parent(); - if (instruction == module.entry_computation()->root_instruction()) { - live_out_of_module_ = true; - } - - if (instruction == instruction->parent()->root_instruction()) { - live_out_of_computation_ = true; - } -} - -void HloValue::RemovePosition(HloInstruction* instruction, - const ShapeIndex& index) { - // The defining position cannot be removed. - CHECK(!(instruction == defining_instruction() && index == defining_index())); - - int64 size_before = positions_.size(); - positions_.erase( - std::remove_if(positions_.begin(), positions_.end(), - [instruction, &index](const HloPosition& position) { - return position.instruction == instruction && - position.index == index; - }), - positions_.end()); - // Only a single position should have been removed. - CHECK_EQ(positions_.size(), size_before - 1); - - // Update uses which referred to this position. - uses_.erase(std::remove_if(uses_.begin(), uses_.end(), - [instruction, &index](const HloUse& use) { - return use.instruction->operand( - use.operand_number) == instruction && - use.operand_index == index; - }), - uses_.end()); - - // Returns whether this value is contained in the given instruction's output. - auto is_contained_in = [this](const HloInstruction* instruction) { - for (const HloPosition& position : positions()) { - if (position.instruction == instruction) { - return true; - } - } - return false; - }; - - const HloModule& module = *instruction->parent()->parent(); - if (instruction == module.entry_computation()->root_instruction()) { - // Value has been removed from a position in the entry root instruction. - live_out_of_module_ = - is_contained_in(module.entry_computation()->root_instruction()); - } - if (instruction == defining_instruction()->parent()->root_instruction()) { - // Value has been removed from the root of the computation the value has - // been defined in. - live_out_of_computation_ = - is_contained_in(defining_instruction()->parent()->root_instruction()); - } -} - -void HloValue::RecomputeUses() { - uses_.clear(); - for (const HloPosition& position : positions()) { + // Build vector of HloUses for the value. + for (const HloPosition& position : positions_) { for (HloInstruction* user : position.instruction->users()) { for (int64 operand_number : user->OperandIndices(position.instruction)) { - if (MayUseOperandValue(operand_number, position.index, user)) { - uses_.push_back(HloUse{user, operand_number, position.index}); + // Root instructions of computations are considered to be uses whether + // or not the root instruction itself actually uses the value. + if (MayUseOperandValue(operand_number, position.index, user) || + ContainsKey(root_positions, user)) { + HloUse new_use{user, operand_number, position.index}; + + // The new use must not already exist in uses_. + for (const HloUse& use : uses_) { + DCHECK_NE(use, new_use); + } + + uses_.push_back(std::move(new_use)); } } } + + // Update liveout status of this HloValue. + const HloModule& module = *position.instruction->parent()->parent(); + if (position.instruction == + module.entry_computation()->root_instruction()) { + live_out_of_module_ = true; + } } } diff --git a/tensorflow/compiler/xla/service/hlo_value.h b/tensorflow/compiler/xla/service/hlo_value.h index 6872bc76a82..2a711e8b425 100644 --- a/tensorflow/compiler/xla/service/hlo_value.h +++ b/tensorflow/compiler/xla/service/hlo_value.h @@ -121,6 +121,12 @@ class HloValue { HloValue(Id id, HloInstruction* instruction, const ShapeIndex& index, bool is_phi = false); + // Sets the positions in the module at which the HloValue appears. Updates + // uses. Should be called once and only once. The defining position should not + // be included in 'positions' as this is set at construction time. + void SetPositionsAndComputeUses( + tensorflow::gtl::ArraySlice positions); + // Return a unique identifier for this HloValue. This value is used for stable // sorting and iteration Id id() const { return id_; } @@ -143,28 +149,15 @@ class HloValue { // Return the shape of this HloValue. const Shape& shape() const { return defining_position().shape(); } - // Add or remove a position at which the HloValue appears. The definition - // position can not be removed. The uses of the HloValue are updated. - void AddPosition(HloInstruction* instruction, const ShapeIndex& index); - void RemovePosition(HloInstruction* instruction, const ShapeIndex& index); - - // Remove all positions except the defining position. Updates uses. - void ClearPositions(); - // Return all positions of the HloValue in the module. const std::vector& positions() const { return positions_; } // Return all uses of the HloValue. const std::vector& uses() const { return uses_; } - void RecomputeUses(); - // Get whether this HloValue is live out of the module. bool live_out_of_module() const { return live_out_of_module_; } - // Get whether this HloValue is live out of the computation it is defined in. - bool live_out_of_computation() const { return live_out_of_computation_; } - bool operator==(const HloValue& other) const; bool operator!=(const HloValue& other) const; From 952030285da7b265c2748506d9613a2ef34e214e Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 18:26:41 -0800 Subject: [PATCH 043/104] Added support for multi-column vocabs to the int-to-string lookup API. PiperOrigin-RevId: 175767044 --- .../python/kernel_tests/lookup_ops_test.py | 33 ++++++++++++- tensorflow/python/ops/lookup_ops.py | 48 ++++++++++++++----- 2 files changed, 66 insertions(+), 15 deletions(-) diff --git a/tensorflow/python/kernel_tests/lookup_ops_test.py b/tensorflow/python/kernel_tests/lookup_ops_test.py index 9944b5929fc..d4bc71f1c8e 100644 --- a/tensorflow/python/kernel_tests/lookup_ops_test.py +++ b/tensorflow/python/kernel_tests/lookup_ops_test.py @@ -597,10 +597,10 @@ class IndexTableFromTensor(test.TestCase): class IndexToStringTableFromFileTest(test.TestCase): - def _createVocabFile(self, basename): + def _createVocabFile(self, basename, values=("brain", "salad", "surgery")): vocabulary_file = os.path.join(self.get_temp_dir(), basename) with open(vocabulary_file, "w") as f: - f.write("\n".join(["brain", "salad", "surgery"]) + "\n") + f.write("\n".join(values) + "\n") return vocabulary_file def test_index_to_string_table(self): @@ -614,6 +614,35 @@ class IndexToStringTableFromFileTest(test.TestCase): self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), features.eval()) + def test_index_to_string_table_from_multicolumn_file(self): + vocabulary_file = self._createVocabFile( + "f2i_vocab1.txt", values=("brain\t300", "salad\t20", "surgery\t1")) + with self.test_session(): + table = lookup_ops.index_to_string_table_from_file( + vocabulary_file=vocabulary_file, + key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER, + value_column_index=0) + features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64)) + self.assertRaises(errors_impl.OpError, features.eval) + lookup_ops.tables_initializer().run() + self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), + features.eval()) + + def test_index_to_string_table_from_multicolumn_file_custom_delimiter(self): + vocabulary_file = self._createVocabFile( + "f2i_vocab1.txt", values=("brain 300", "salad 20", "surgery 1")) + with self.test_session(): + table = lookup_ops.index_to_string_table_from_file( + vocabulary_file=vocabulary_file, + key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER, + value_column_index=0, + delimiter=" ") + features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64)) + self.assertRaises(errors_impl.OpError, features.eval) + lookup_ops.tables_initializer().run() + self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), + features.eval()) + def test_index_to_string_table_with_default_value(self): default_value = b"NONE" vocabulary_file = self._createVocabFile("f2i_vocab2.txt") diff --git a/tensorflow/python/ops/lookup_ops.py b/tensorflow/python/ops/lookup_ops.py index f28eadf2489..156e415735f 100644 --- a/tensorflow/python/ops/lookup_ops.py +++ b/tensorflow/python/ops/lookup_ops.py @@ -561,9 +561,9 @@ class TextFileStringTableInitializer(TextFileInitializer): The path must be accessible from wherever the graph is initialized (eg. trainer or eval workers). The filename may be a scalar `Tensor`. key_column_index: The column index from the text file to get the keys - from. The default is 0 that represents the whole line content. + from. The default is to use the line number, starting from zero. value_column_index: The column index from the text file to get the - values from. The default is to use the line number, starting from zero. + values from. The default is to use the whole line content. vocab_size: The number of elements in the file, if known. delimiter: The delimiter to separate fields in a line. name: Optional name for the op. @@ -613,9 +613,9 @@ class TextFileIdTableInitializer(TextFileInitializer): The path must be accessible from wherever the graph is initialized (eg. trainer or eval workers). The filename may be a scalar `Tensor`. key_column_index: The column index from the text file to get the `key` + values from. The default is to use the whole line content. + value_column_index: The column index from the text file to get the `value` values from. The default is to use the line number, starting from zero. - value_column_index: The column index from the text file ro get the `value` - values from. The default is 0 that represents the whole line content. vocab_size: The number of elements in the file, if known. delimiter: The delimiter to separate fields in a line. name: Optional name for the op. @@ -926,9 +926,9 @@ def index_table_from_file(vocabulary_file=None, key_dtype: The `key` data type. name: A name for this op (optional). key_column_index: The column index from the text file to get the `key` + values from. The default is to use the whole line content. + value_column_index: The column index from the text file to get the `value` values from. The default is to use the line number, starting from zero. - value_column_index: The column index from the text file ro get the `value` - values from. The default is 0 that represents the whole line content. delimiter: The delimiter to separate fields in a line. Returns: @@ -1095,7 +1095,10 @@ def index_table_from_tensor(vocabulary_list, def index_to_string_table_from_file(vocabulary_file, vocab_size=None, default_value="UNK", - name=None): + name=None, + key_column_index=TextFileIndex.LINE_NUMBER, + value_column_index=TextFileIndex.WHOLE_LINE, + delimiter="\t"): """Returns a lookup table that maps a `Tensor` of indices into strings. This operation constructs a lookup table to map int64 indices into string @@ -1109,6 +1112,16 @@ def index_to_string_table_from_file(vocabulary_file, The underlying table must be initialized by calling `tf.tables_initializer.run()` or `table.init.run()` once. + To specify multi-column vocabulary files, use key_column_index and + value_column_index and delimiter. + + - TextFileIndex.LINE_NUMBER means use the line number starting from zero, + expects data type int64. + - TextFileIndex.WHOLE_LINE means use the whole line content, expects data + type string. + - A value >=0 means use the index (starting at zero) of the split line based + on `delimiter`. + Sample Usages: If we have a vocabulary file "test.txt" with the following content: @@ -1135,6 +1148,11 @@ def index_to_string_table_from_file(vocabulary_file, vocab_size: Number of the elements in the vocabulary, if known. default_value: The value to use for out-of-vocabulary indices. name: A name for this op (optional). + key_column_index: The column index from the text file to get the `key` + values from. The default is to use the line number, starting from zero. + value_column_index: The column index from the text file to get the `value` + values from. The default is to use the whole line content. + delimiter: The delimiter to separate fields in a line. Returns: The lookup table to map a string values associated to a given index `int64` @@ -1155,15 +1173,19 @@ def index_to_string_table_from_file(vocabulary_file, # Keep a shared_name # ____ shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size, - TextFileIndex.LINE_NUMBER, - TextFileIndex.WHOLE_LINE) + key_column_index, + value_column_index) else: # Keep a shared_name ___ - shared_name = "hash_table_%s_%s_%s" % (vocabulary_file, - TextFileIndex.LINE_NUMBER, - TextFileIndex.WHOLE_LINE) + shared_name = "hash_table_%s_%s_%s" % (vocabulary_file, key_column_index, + value_column_index) init = TextFileStringTableInitializer( - vocabulary_file, vocab_size=vocab_size, name="table_init") + vocabulary_file, + vocab_size=vocab_size, + name="table_init", + key_column_index=key_column_index, + value_column_index=value_column_index, + delimiter=delimiter) # TODO(yleon): Use a more effienct structure. return HashTable(init, default_value, shared_name=shared_name, name=scope) From 9a9b18ca6a865a2cd65ec49421eea7b788d7d856 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 18:28:34 -0800 Subject: [PATCH 044/104] Remove a Done TODO from Svdf op. PiperOrigin-RevId: 175767193 --- tensorflow/contrib/lite/kernels/svdf.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/tensorflow/contrib/lite/kernels/svdf.cc b/tensorflow/contrib/lite/kernels/svdf.cc index dd414d53bd3..72f705fe424 100644 --- a/tensorflow/contrib/lite/kernels/svdf.cc +++ b/tensorflow/contrib/lite/kernels/svdf.cc @@ -183,8 +183,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // Reduction sum - // TODO(ghodrat): Consider not reusing state for the temporary output, this - // way ReductionSum operates on row-vector instead of column vector. for (int b = 0; b < batch_size; b++) { float* output_ptr_batch = output->data.f + b * num_units; float* scratch_ptr_batch = scratch->data.f + b * num_filters; From bd5b3acc1481ba14ab86757b107bd25f4fb1aef3 Mon Sep 17 00:00:00 2001 From: Russell Power Date: Tue, 14 Nov 2017 18:46:55 -0800 Subject: [PATCH 045/104] Improve the model comparison utility. Add session reset, verify input function is consistent and freeze random variables. PiperOrigin-RevId: 175768806 --- .../contrib/tpu/python/tpu/test_util.py | 38 +++++++++++++++---- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/tensorflow/contrib/tpu/python/tpu/test_util.py b/tensorflow/contrib/tpu/python/tpu/test_util.py index b83c72d0ffe..a5d4ff97227 100644 --- a/tensorflow/contrib/tpu/python/tpu/test_util.py +++ b/tensorflow/contrib/tpu/python/tpu/test_util.py @@ -32,6 +32,7 @@ from tensorflow.python.client import session as tf_session from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.framework import errors from tensorflow.python.framework import ops +from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import variables @@ -89,7 +90,12 @@ def copy_dir(src, tgt): gfile.Copy(src_f, tgt_f, overwrite=True) -def compare_model(model_fn, input_fn, params, master="local", temp_dir=None, +def compare_model(model_fn, + input_fn, + params, + master="local", + temp_dir=None, + num_shards=2, tolerance=1e-4): """Compare the results of running `model_fn` on the TPU and CPU.""" if not temp_dir: @@ -102,7 +108,17 @@ def compare_model(model_fn, input_fn, params, master="local", temp_dir=None, logging.info("Checkpoints and weights will be written to %s", temp_dir) num_steps = 1 - num_shards = 8 + + def _model_adapter(features, labels, mode, params): + """Run users model function with random seeds fixed to known values.""" + random_seed.set_random_seed(0) + np.random.seed(0) + return model_fn(features, labels, mode, params) + + def _input_adapter(params): + random_seed.set_random_seed(0) + np.random.seed(0) + return input_fn(params) def _make_run_config(model_dir): return tpu_config.RunConfig( @@ -119,7 +135,7 @@ def compare_model(model_fn, input_fn, params, master="local", temp_dir=None, def _make_estimator(use_tpu, model_dir): return tpu_estimator.TPUEstimator( - model_fn=model_fn, + model_fn=_model_adapter, use_tpu=use_tpu, config=_make_run_config(model_dir), train_batch_size=num_shards, @@ -131,8 +147,9 @@ def compare_model(model_fn, input_fn, params, master="local", temp_dir=None, weights = {} graph = ops.Graph() with graph.as_default(): + features, labels = _input_adapter(dict(params, batch_size=num_shards)) model_fn( - *input_fn(params), + features, labels, params=dict(params, use_tpu=False), mode=model_fn_lib.ModeKeys.TRAIN) saver = tf_saver.Saver() @@ -148,10 +165,15 @@ def compare_model(model_fn, input_fn, params, master="local", temp_dir=None, return weights def _run_step(use_tpu, model_dir): + """Create an estimator and run a single step on the given device.""" + tf_session.Session.reset(target=master) + + logging.info("Running step. TPU=%d. model_dir=%s", use_tpu, model_dir) est = _make_estimator(use_tpu=use_tpu, model_dir=model_dir) - est.train(input_fn=input_fn, steps=num_steps) + est.train(input_fn=_input_adapter, steps=num_steps) weights = _extract_weights(est.latest_checkpoint()) - with gfile.Open(temp_dir + "tpu-%d.weights" % use_tpu, "wb") as f: + with gfile.Open(os.path.join(temp_dir, "tpu-%d.weights" % use_tpu), + "wb") as f: f.write(pickle.dumps(weights)) return weights @@ -159,9 +181,9 @@ def compare_model(model_fn, input_fn, params, master="local", temp_dir=None, _run_step(use_tpu=False, model_dir=initial_model_dir) copy_dir(initial_model_dir, cpu_model_dir) - cpu_weights = _run_step(use_tpu=False, model_dir=cpu_model_dir) - copy_dir(initial_model_dir, tpu_model_dir) + + cpu_weights = _run_step(use_tpu=False, model_dir=cpu_model_dir) tpu_weights = _run_step(use_tpu=True, model_dir=tpu_model_dir) bad_weights = False From 79733d756285b92c49cbd6315a91933cffac774f Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 19:07:13 -0800 Subject: [PATCH 046/104] Update BUILD visibility. PiperOrigin-RevId: 175770716 --- tensorflow/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/tensorflow/BUILD b/tensorflow/BUILD index 95be0bc8df2..6a66d1d44b8 100644 --- a/tensorflow/BUILD +++ b/tensorflow/BUILD @@ -366,6 +366,7 @@ config_setting( package_group( name = "internal", packages = [ + "//learning/meta_rank/...", "//tensorflow/...", "//tensorflow_fold/llgtm/...", ], From c798e04fbd24809d0bc52d4b80f30e74418b8f4d Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 19:36:08 -0800 Subject: [PATCH 047/104] Remove unused BUILD dependencies PiperOrigin-RevId: 175772549 --- tensorflow/compiler/tf2xla/BUILD | 1 - tensorflow/contrib/lite/toco/BUILD | 4 ---- 2 files changed, 5 deletions(-) diff --git a/tensorflow/compiler/tf2xla/BUILD b/tensorflow/compiler/tf2xla/BUILD index 376c8108ed6..5a81438b1c4 100644 --- a/tensorflow/compiler/tf2xla/BUILD +++ b/tensorflow/compiler/tf2xla/BUILD @@ -179,7 +179,6 @@ cc_library( visibility = ["//visibility:public"], deps = [ "//tensorflow/compiler/xla:status_macros", - "//tensorflow/compiler/xla:statusor", "//tensorflow/compiler/xla/client:computation_builder", "//tensorflow/core:core_cpu", "//tensorflow/core:core_cpu_internal", diff --git a/tensorflow/contrib/lite/toco/BUILD b/tensorflow/contrib/lite/toco/BUILD index 77d381c1c5d..eb08b5d1e54 100644 --- a/tensorflow/contrib/lite/toco/BUILD +++ b/tensorflow/contrib/lite/toco/BUILD @@ -75,7 +75,6 @@ cc_library( ":runtime", ":toco_port", "//tensorflow/core:lib", - "@protobuf_archive//:protobuf_headers", ], ) @@ -88,9 +87,6 @@ cc_library( "toco_graphviz_dump_options.h", ], visibility = ["//visibility:public"], - deps = [ - "@com_google_absl//absl/strings", - ], ) cc_library( From 24a6162d2d5fad078157e2ec514f2fbb7ee0c676 Mon Sep 17 00:00:00 2001 From: Justin Lebar Date: Tue, 14 Nov 2017 20:12:00 -0800 Subject: [PATCH 048/104] [XLA:GPU] Mark loads as invariant where appropriate. If we read a value within an HLO that isn't modified by that HLO, mark it as invariant in LLVM IR. LLVM can perform more aggressive optimizations on invariant loads, but I don't expect this will help much in our case, because XLA already emits pretty aggressive noalias information on loads and stores. However, on nvidia GPUs, marking loads as invariant has the additional benefit of allowing LLVM to lower the load as ld.global.nc (equivalent to CUDA's __ldg). This instruction uses a special cache on the GPU, and it's usually faster than a regular load. PiperOrigin-RevId: 175774979 --- .../compiler/xla/service/buffer_assignment.cc | 36 +++++++++++++ .../compiler/xla/service/buffer_assignment.h | 6 +++ .../xla/service/gpu/hlo_to_ir_bindings.cc | 35 +++++++++++++ .../xla/service/gpu/hlo_to_ir_bindings.h | 9 +++- .../compiler/xla/service/gpu/ir_emitter.cc | 25 +++++---- .../compiler/xla/service/gpu/ir_emitter.h | 10 +++- .../xla/service/gpu/ir_emitter_nested.cc | 3 +- .../xla/service/gpu/ir_emitter_unnested.cc | 51 +++++++++++-------- .../xla/service/llvm_ir/alias_analysis.cc | 2 +- .../compiler/xla/service/llvm_ir/ir_array.cc | 4 +- .../compiler/xla/service/llvm_ir/ir_array.h | 32 ++++++++++-- 11 files changed, 171 insertions(+), 42 deletions(-) diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc index 3c5b360c8ef..033034b4210 100644 --- a/tensorflow/compiler/xla/service/buffer_assignment.cc +++ b/tensorflow/compiler/xla/service/buffer_assignment.cc @@ -265,6 +265,42 @@ bool BufferAssignment::SharesSliceAtIndex( GetUniqueSlice(hlo_b, shape_index_b).ConsumeValueOrDie(); } +bool BufferAssignment::HaveDisjointSlices(const HloInstruction* hlo_a, + const HloInstruction* hlo_b) const { + using SliceSet = + FlatSet; + // Gets the slices all of instr's subshapes. If any subshape doesn't have an + // assigned slice, returns the empty set. + auto collect_slices = [&](const HloInstruction* instr) -> SliceSet { + SliceSet slices; + Status status = ShapeUtil::ForEachSubshapeWithStatus( + instr->shape(), + [&](const Shape& /*subshape*/, const ShapeIndex& index) { + auto shape_slices = GetAllSlices(instr, index); + if (shape_slices.empty()) { + return InvalidArgument("No slices assigned to part of instr."); + } + slices.insert(shape_slices.begin(), shape_slices.end()); + return Status::OK(); + }); + if (!status.ok()) { + return {}; + } + return slices; + }; + + SliceSet slices_a = collect_slices(hlo_a); + SliceSet slices_b = collect_slices(hlo_b); + // hlo_a and hlo_b have disjoint slices if collect_slices succeeded (i.e. + // didn't return the empty set) for both HLOs, and the two resulting sets of + // slices are disjoint. + return !slices_a.empty() && !slices_b.empty() && + std::none_of(slices_a.begin(), slices_a.end(), + [&](const BufferAllocation::Slice& slice) { + return slices_b.count(slice) > 0; + }); +} + StatusOr BufferAssignment::GetUniqueTopLevelOutputSlice() const { return GetUniqueTopLevelSlice( diff --git a/tensorflow/compiler/xla/service/buffer_assignment.h b/tensorflow/compiler/xla/service/buffer_assignment.h index 08a53af8baa..08a40bfeb2a 100644 --- a/tensorflow/compiler/xla/service/buffer_assignment.h +++ b/tensorflow/compiler/xla/service/buffer_assignment.h @@ -327,6 +327,12 @@ class BufferAssignment { return SharesSliceAtIndex(hlo_a, {}, hlo_b, {}); } + // Returns true if hlo_a and hlo_b both have at least one buffer assigned for + // their top-level and each of their nested shape indices, and if hlo_a's + // buffers are all different from hlo_b's buffers. + bool HaveDisjointSlices(const HloInstruction* hlo_a, + const HloInstruction* hlo_b) const; + // Returns the underlying points-to analysis used for this assignment. const TuplePointsToAnalysis& points_to_analysis() const { return liveness_->points_to_analysis(); diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc index 163a161353f..c2115c49993 100644 --- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc +++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc @@ -166,11 +166,46 @@ void HloToIrBindings::BindHloToIrValue(const HloInstruction& hlo, *(base_ptrs_[&hlo].mutable_element(shape_index)) = typed_ir_value; } +// Determines whether hlo's buffers are never modified within the execution of +// consumer. +static bool BuffersInvariantWithinConsumer( + const HloInstruction& hlo, const HloInstruction& consumer, + const BufferAssignment* buffer_assignment) { + // Check if consumer is inside a fusion node -- if so, "dereference" it until + // we get to a non-fusion node. + const HloInstruction* c = &consumer; + while (c->IsFused()) { + c = c->parent()->FusionInstruction(); + } + + // If, after dereferencing c, we end up with a node that's not inside our + // module's top-level computation (say our node is inside a while loop), we + // give up on marking array as invariant, because this HLO may be run multiple + // times (e.g. multiple while loop iterations, or multiple invocations of a + // reducer's computation). TODO(jlebar): We could relax this constraint if we + // emitted an llvm.invariant.group.barrier at the end of the computation. + return c->parent() == c->GetModule()->entry_computation() && + buffer_assignment->HaveDisjointSlices(&hlo, &consumer); +} + llvm_ir::IrArray HloToIrBindings::GetIrArray(const HloInstruction& hlo, + const HloInstruction& consumer, const ShapeIndex& shape_index) { llvm_ir::IrArray ir_array(GetBasePointer(hlo, shape_index), ShapeUtil::GetSubshape(hlo.shape(), shape_index)); alias_analysis_.AddAliasingInformationToIrArray(hlo, &ir_array); + + // The GPU backend emits one kernel per top-level HLO, and LLVM views + // execution of one kernel as the "whole program" executed on the GPU. + // Therefore if hlo's output buffer is not modified within consumer, and if + // consumer runs hlo only once (so that it doesn't create two different + // outputs), then we can mark ir_array as invariant over the whole program. + if (BuffersInvariantWithinConsumer(hlo, consumer, buffer_assignment_)) { + VLOG(2) << "Marking " << hlo.name() << " as invariant within " + << consumer.name(); + ir_array.MarkInvariantOverWholeProgram(&module_->getContext()); + } + return ir_array; } diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h index a3120f15bcb..62ae1769a1f 100644 --- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h +++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h @@ -76,8 +76,15 @@ class HloToIrBindings { return it->second.element(shape_index); } - // Return the underlying IrArray of the output of the given instruction. + // Returns the IrArray which contains the output of hlo. + // + // consumer is the HLO in which this IrArray is used -- we use this to (try + // to) add metadata indicating that the array is invariant within consumer. + // + // To get the buffer into which hlo should write its own output, call + // GetIrArray(hlo, hlo). llvm_ir::IrArray GetIrArray(const HloInstruction& hlo, + const HloInstruction& consumer, const ShapeIndex& shape_index = {}); private: diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc index af2a92e11e5..6e2bd4e11d3 100644 --- a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc +++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc @@ -68,7 +68,8 @@ Status IrEmitter::DefaultAction(HloInstruction* hlo) { ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator; for (const HloInstruction* operand : hlo->operands()) { operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) { - return GetIrArray(*operand).EmitReadArrayElement(index, &ir_builder_); + return GetIrArray(*operand, *hlo) + .EmitReadArrayElement(index, &ir_builder_); }; } return EmitTargetElementLoop( @@ -145,7 +146,8 @@ Status IrEmitter::HandleTuple(HloInstruction* tuple) { for (const HloInstruction* operand : tuple->operands()) { base_ptrs.push_back(GetBasePointer(*operand)); } - llvm_ir::EmitTuple(GetIrArray(*tuple), base_ptrs, &ir_builder_, module_); + llvm_ir::EmitTuple(GetIrArray(*tuple, *tuple), base_ptrs, &ir_builder_, + module_); return Status::OK(); } @@ -334,7 +336,8 @@ Status IrEmitter::HandleSelect(HloInstruction* select) { TF_RET_CHECK(pred->shape().element_type() == PRED); if (ShapeUtil::IsTuple(select->shape())) { - llvm_ir::EmitTupleSelect(GetIrArray(*select), GetIrArray(*pred), + llvm_ir::EmitTupleSelect(GetIrArray(*select, *select), + GetIrArray(*pred, *select), GetBasePointer(*on_true), GetBasePointer(*on_false), &ir_builder_, module_); return Status::OK(); @@ -349,9 +352,9 @@ Status IrEmitter::HandleSelect(HloInstruction* select) { Status IrEmitter::HandleDot(HloInstruction* dot) { auto lhs_instruction = dot->operand(0); auto rhs_instruction = dot->operand(1); - const llvm_ir::IrArray& target_array = GetIrArray(*dot); - const llvm_ir::IrArray& lhs_array = GetIrArray(*lhs_instruction); - const llvm_ir::IrArray& rhs_array = GetIrArray(*rhs_instruction); + const llvm_ir::IrArray& target_array = GetIrArray(*dot, *dot); + const llvm_ir::IrArray& lhs_array = GetIrArray(*lhs_instruction, *dot); + const llvm_ir::IrArray& rhs_array = GetIrArray(*rhs_instruction, *dot); const Shape& lhs_shape = lhs_instruction->shape(); const Shape& rhs_shape = rhs_instruction->shape(); @@ -571,7 +574,8 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce) { // Apply the reduction function to the loaded value. llvm::Value* input_address = - GetIrArray(*arg).EmitArrayElementAddress(input_index, &ir_builder_); + GetIrArray(*arg, *reduce) + .EmitArrayElementAddress(input_index, &ir_builder_); TF_RETURN_IF_ERROR(EmitCallToNestedComputation( *function, {accumulator_addr, input_address}, accumulator_addr)); @@ -587,7 +591,7 @@ Status IrEmitter::HandleFusion(HloInstruction* fusion) { std::vector parameter_arrays; for (HloInstruction* operand : fusion->operands()) { - parameter_arrays.push_back(GetIrArray(*operand)); + parameter_arrays.push_back(GetIrArray(*operand, *fusion)); } GpuElementalIrEmitter elemental_emitter(hlo_module_config_, module_, &ir_builder_, GetNestedComputer()); @@ -622,7 +626,8 @@ Status IrEmitter::HandleRng(HloInstruction* random) { ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator; for (const HloInstruction* operand : random->operands()) { operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) { - return GetIrArray(*operand).EmitReadArrayElement(index, &ir_builder_); + return GetIrArray(*operand, *random) + .EmitReadArrayElement(index, &ir_builder_); }; } // Emits a single-threaded loop because the loop body generated by the element @@ -631,7 +636,7 @@ Status IrEmitter::HandleRng(HloInstruction* random) { GpuElementalIrEmitter(hlo_module_config_, module_, &ir_builder_, GetNestedComputer()) .MakeElementGenerator(random, operand_to_generator), - GetIrArray(*random), &ir_builder_) + GetIrArray(*random, *random), &ir_builder_) .EmitLoop(IrName(random)); } diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.h b/tensorflow/compiler/xla/service/gpu/ir_emitter.h index 61fdeaa0ee7..9c01f5b7c72 100644 --- a/tensorflow/compiler/xla/service/gpu/ir_emitter.h +++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.h @@ -105,10 +105,16 @@ class IrEmitter : public DfsHloVisitorWithDefault { explicit IrEmitter(const HloModuleConfig& hlo_module_config, IrEmitterContext* ir_emitter_context, bool is_nested); - // A convenient helper for calling HloToIrBindings::GetIrArray. + // Helper for calling HloToIrBindings::GetIrArray. + // + // Gets the IrArray which contains inst. This array has metadata that makes + // it valid only within the IR that implements consumer. If you are + // implementing an HLO and want to get its own output buffer, call + // GetIrArray(hlo, hlo). llvm_ir::IrArray GetIrArray(const HloInstruction& inst, + const HloInstruction& consumer, const ShapeIndex& shape_index = {}) { - return bindings_.GetIrArray(inst, shape_index); + return bindings_.GetIrArray(inst, consumer, shape_index); } // A convenient helper for calling HloToIrBindings::GetBasePointer. llvm::Value* GetBasePointer(const HloInstruction& inst) const { diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc index 5da1a130d56..5225ff36ff3 100644 --- a/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc +++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc @@ -115,7 +115,8 @@ Status IrEmitterNested::HandleParameter(HloInstruction* parameter) { Status IrEmitterNested::EmitTargetElementLoop( const HloInstruction& hlo, const llvm_ir::ElementGenerator& element_generator) { - return llvm_ir::LoopEmitter(element_generator, GetIrArray(hlo), &ir_builder_) + return llvm_ir::LoopEmitter(element_generator, GetIrArray(hlo, hlo), + &ir_builder_) .EmitLoop(); } diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc index db78f4b84dc..1b863c9e3c5 100644 --- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc +++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc @@ -282,7 +282,7 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) { MakeUnique(std::move(thunks), fusion)); std::vector parameter_arrays; for (HloInstruction* operand : fusion->operands()) { - parameter_arrays.push_back(GetIrArray(*operand)); + parameter_arrays.push_back(GetIrArray(*operand, *fusion)); } GpuElementalIrEmitter elemental_emitter( hlo_module_config_, ir_emitter_context_->llvm_module(), @@ -344,7 +344,7 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) { thunk_sequence_->emplace_back(BuildKernelThunk(fusion)); std::vector operand_arrays; for (HloInstruction* operand : fusion->operands()) { - operand_arrays.push_back(GetIrArray(*operand)); + operand_arrays.push_back(GetIrArray(*operand, *fusion)); } GpuElementalIrEmitter elemental_emitter(hlo_module_config_, ir_emitter_context_->llvm_module(), @@ -355,7 +355,7 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) { // Array to write into. Because this is an in-place operation, this is the // same as operand 0's array. - llvm_ir::IrArray output_array = GetIrArray(*fusion); + llvm_ir::IrArray output_array = GetIrArray(*fusion, *fusion); LaunchDimensions launch_dimensions = CalculateLaunchDimensions( update_shape, ir_emitter_context_->device_description()); @@ -693,9 +693,10 @@ Status IrEmitterUnnested::HandleCopy(HloInstruction* copy) { constexpr int64 tile_size = 32; constexpr int64 num_rows = 8; int64 num_tiles = EmitTranspose021Tiled( - GetIrArray(*(copy->operand(0))) + GetIrArray(*copy->operand(0), *copy) .CastToShape(reduced_input_shape, &ir_builder_), - GetIrArray(*copy).CastToShape(reduced_output_shape, &ir_builder_), + GetIrArray(*copy, *copy) + .CastToShape(reduced_output_shape, &ir_builder_), tile_size, num_rows, &ir_builder_); UpdateLaunchDimensions(LaunchDimensions(num_tiles, num_rows * tile_size), LastThunk(), ir_emitter_context_->llvm_module()); @@ -850,9 +851,11 @@ Status IrEmitterUnnested::EmitColumnReduction( &ir_builder_); const HloInstruction* output = reduce->IsFused() ? reduce->parent()->FusionInstruction() : reduce; - llvm::Value* output_address = GetIrArray(*output).EmitArrayElementAddress( - llvm_ir::IrArray::Index(x, output->shape(), &ir_builder_), &ir_builder_, - "output_element_address"); + llvm::Value* output_address = + GetIrArray(*output, *output) + .EmitArrayElementAddress( + llvm_ir::IrArray::Index(x, output->shape(), &ir_builder_), + &ir_builder_, "output_element_address"); return EmitAtomicOperationForNestedComputation( *reducer, output_address, partial_reduction_result_address); }; @@ -1116,9 +1119,11 @@ Status IrEmitterUnnested::EmitRowReduction( "lane_id_is_zero", &ir_builder_); llvm_ir::SetToFirstInsertPoint(if_lane_id_is_zero_data.true_block, &ir_builder_); - llvm::Value* output_address = GetIrArray(*output).EmitArrayElementAddress( - llvm_ir::IrArray::Index(y, output->shape(), &ir_builder_), &ir_builder_, - "output_element_address"); + llvm::Value* output_address = + GetIrArray(*output, *output) + .EmitArrayElementAddress( + llvm_ir::IrArray::Index(y, output->shape(), &ir_builder_), + &ir_builder_, "output_element_address"); return EmitAtomicOperationForNestedComputation( *reducer, output_address, partial_reduction_result_address); }; @@ -1258,11 +1263,12 @@ Status IrEmitterUnnested::HandleReduce(HloInstruction* reduce) { MakeUnique(std::move(thunks), reduce)); return EmitReductionToVector( reduce, input->shape(), - [this, input](const llvm_ir::IrArray::Index& index) { - return GetIrArray(*input).EmitReadArrayElement(index, &ir_builder_); + [&](const llvm_ir::IrArray::Index& index) { + return GetIrArray(*input, *reduce) + .EmitReadArrayElement(index, &ir_builder_); }, - [this, init_value](const llvm_ir::IrArray::Index& index) { - return GetIrArray(*init_value) + [&](const llvm_ir::IrArray::Index& index) { + return GetIrArray(*init_value, *reduce) .EmitReadArrayElement(index, &ir_builder_); }, dimensions_to_reduce, reducer); @@ -1426,7 +1432,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter( ir_builder_.CreateStore(operand_index[i], selected_index_address_slot); } }; - llvm_ir::IrArray operand_array(GetIrArray(*operand)); + llvm_ir::IrArray operand_array = GetIrArray(*operand, *select_and_scatter); llvm::Value* operand_data = operand_array.EmitReadArrayElement(operand_index, &ir_builder_); ir_builder_.CreateStore(operand_data, selected_value_address); @@ -1479,9 +1485,10 @@ Status IrEmitterUnnested::HandleSelectAndScatter( ir_builder_.CreateLoad(selected_index_address_slot)); } llvm::Value* source_value_address = - GetIrArray(*source).EmitArrayElementAddress(source_index, &ir_builder_); + GetIrArray(*source, *select_and_scatter) + .EmitArrayElementAddress(source_index, &ir_builder_); llvm::Value* output_value_address = - GetIrArray(*select_and_scatter) + GetIrArray(*select_and_scatter, *select_and_scatter) .EmitArrayElementAddress(selected_index, &ir_builder_); return EmitAtomicOperationForNestedComputation( *select_and_scatter->scatter(), output_value_address, @@ -1758,7 +1765,7 @@ Status IrEmitterUnnested::EmitInitializer(const HloInstruction* hlo, return EmitTargetElementLoopInThunk( *hlo, [=](const llvm_ir::IrArray::Index& index) { - return GetIrArray(*init_value) + return GetIrArray(*init_value, *hlo) .EmitReadArrayElement(index, &ir_builder_); }, thunk); @@ -1859,7 +1866,7 @@ Status IrEmitterUnnested::EmitTargetElementLoopInThunk( UpdateLaunchDimensions(launch_dimensions, thunk, ir_emitter_context_->llvm_module()); if (!hlo.IsMultiOutputFusion()) { - return ParallelLoopEmitter(element_generator, GetIrArray(hlo), + return ParallelLoopEmitter(element_generator, GetIrArray(hlo, hlo), launch_dimensions, &ir_builder_) .EmitLoop(IrName(&hlo)); } @@ -1867,7 +1874,7 @@ Status IrEmitterUnnested::EmitTargetElementLoopInThunk( // For multiple outputs fusion, we need to emit each operand and the root. std::vector output_arrays; for (int64 i = 0; i < ShapeUtil::TupleElementCount(hlo.shape()); ++i) { - output_arrays.push_back(GetIrArray(hlo, {i})); + output_arrays.push_back(GetIrArray(hlo, hlo, {i})); } TF_RETURN_IF_ERROR(ParallelLoopEmitter(element_generator, output_arrays, launch_dimensions, &ir_builder_) @@ -1878,7 +1885,7 @@ Status IrEmitterUnnested::EmitTargetElementLoopInThunk( tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer()); } ir_builder_.SetInsertPoint(ir_builder_.GetInsertBlock()->getTerminator()); - llvm_ir::EmitTuple(GetIrArray(hlo), tuple_operand_ptrs, &ir_builder_, + llvm_ir::EmitTuple(GetIrArray(hlo, hlo), tuple_operand_ptrs, &ir_builder_, module_); return Status::OK(); } diff --git a/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc index bdddc232ef7..21bca1d6bef 100644 --- a/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc +++ b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc @@ -83,7 +83,7 @@ void AliasAnalysis::AddAliasingInformationToIrArray(const HloInstruction& hlo, if (std::find(parameter_instructions.begin(), parameter_instructions.end(), &hlo) != parameter_instructions.end()) { - array->AddInvariantLoad(llvm::MDNode::get(*context_, /*MDs=*/{})); + array->MarkInvariantOverWholeProgram(context_); } } } diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc index e3f98ac13e7..7224bd68984 100644 --- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc +++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc @@ -256,10 +256,10 @@ void IrArray::AnnotateLoadStoreInstructionWithMetadata( llvm::Instruction* instruction) const { CHECK(llvm::isa(instruction) || llvm::isa(instruction)); + CHECK(!llvm::isa(instruction) || !is_invariant_) + << "Trying to create a store to an invariant IRArray."; for (const auto& kind_md_pair : metadata_) { - CHECK(kind_md_pair.first != llvm::LLVMContext::MD_invariant_load || - llvm::isa(instruction)); instruction->setMetadata(kind_md_pair.first, kind_md_pair.second); } } diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h index 1ed7e99a829..387d4629125 100644 --- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h +++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h @@ -229,9 +229,33 @@ class IrArray { AddMetadata(llvm::LLVMContext::MD_noalias, noalias); } - void AddInvariantLoad(llvm::MDNode* invariant_load) { - CHECK_NE(invariant_load, nullptr); - AddMetadata(llvm::LLVMContext::MD_invariant_load, invariant_load); + // Promises LLVM that the data pointed to by this IrArray never changes after + // it's first loaded. + // + // The temporal scope of this promise is the "whole program" from LLVM's point + // of view, but how this translates to HLOs differs between backends. + // + // In the single-threaded CPU backend, we emit one function that + // runs all the HLOs in sequence, so the whole program is the whole HLO + // module. + // + // In the GPU backend, we emit one GPU kernel per top-level HLO (i.e. per HLO + // in the entry computation). From LLVM's perspective, launching a new kernel + // is like launching a new program, and so the whole program is one top-level + // HLO. Since the scope of the promise is smaller than in the CPU backend, we + // can mark more things as invariant in the GPU backend. + // + // Marking loads as invariant is particularly helpful on GPUs because + // invariant loads can be lowered to PTX ld.global.nc (equivalent to CUDA's + // __ldg intrinsic). These loads use a special cache, and can be + // significantly faster than regular loads. + void MarkInvariantOverWholeProgram(llvm::LLVMContext* context) { + if (is_invariant_) { + return; + } + is_invariant_ = true; + AddMetadata(llvm::LLVMContext::MD_invariant_load, + llvm::MDNode::get(*context, {})); } const std::map& metadata() const { return metadata_; } @@ -261,6 +285,8 @@ class IrArray { // loads/stores for this array. They keys are the metadata kinds and the // values are the metadata nodes. std::map metadata_; + + bool is_invariant_ = false; }; } // namespace llvm_ir From 0c19b61a4073aaf191340a02a766ebe238bc7e56 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 14 Nov 2017 21:36:38 -0800 Subject: [PATCH 049/104] Add a control dependency optimizer to Grappler. The first two rewrites implemented are: 1. Turn nodes with only control outputs into NoOps, if we know that they are safe to remove. Such nodes can be produced, e.g., by rewrite rules in the arithmetic optimizer. 2. Completely disconnect NoOp nodes with at most 1 input or at most 1 output by rerouting their inputs to their outputs. The restriction on fan-in/fan-out guarantees that we reduce the number of control dependencies in the graph. The two (slightly) non-trivial cases are: // Case a) // x --^> +------+ x --^> +---+ // y --^> | NoOp | --^> a ==> y --^> | a | // ... | | ... | | // z --^> +------+ z --^> +---+ // // Case b) // +------+ --^> a +---+ --^> a // x --^> | NoOp | --^> b ==> | x | --^> b // | | ... | | ... // +------+ --^> c +---+ --^> c PiperOrigin-RevId: 175780178 --- tensorflow/core/grappler/optimizers/BUILD | 42 +++ .../optimizers/arithmetic_optimizer.cc | 8 +- .../optimizers/arithmetic_optimizer.h | 6 +- .../grappler/optimizers/constant_folding.cc | 1 - .../optimizers/dependency_optimizer.cc | 278 ++++++++++++++++++ .../optimizers/dependency_optimizer.h | 68 +++++ .../optimizers/dependency_optimizer_test.cc | 201 +++++++++++++ .../grappler/optimizers/meta_optimizer.cc | 15 +- tensorflow/core/grappler/utils.cc | 7 +- tensorflow/core/grappler/utils_test.cc | 8 + .../core/protobuf/rewriter_config.proto | 2 + 11 files changed, 626 insertions(+), 10 deletions(-) create mode 100644 tensorflow/core/grappler/optimizers/dependency_optimizer.cc create mode 100644 tensorflow/core/grappler/optimizers/dependency_optimizer.h create mode 100644 tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc diff --git a/tensorflow/core/grappler/optimizers/BUILD b/tensorflow/core/grappler/optimizers/BUILD index 54004a5e07f..dbfa8ae503f 100644 --- a/tensorflow/core/grappler/optimizers/BUILD +++ b/tensorflow/core/grappler/optimizers/BUILD @@ -194,6 +194,47 @@ tf_cc_test( ], ) +cc_library( + name = "dependency_optimizer", + srcs = ["dependency_optimizer.cc"], + hdrs = [ + "dependency_optimizer.h", + ], + visibility = ["//visibility:public"], + deps = [ + ":arithmetic_optimizer", + ":constant_folding", + ":graph_optimizer", + "//tensorflow/core:framework", + "//tensorflow/core:lib", + "//tensorflow/core:lib_internal", + "//tensorflow/core:protos_all_cc", + "//tensorflow/core/grappler:grappler_item", + "//tensorflow/core/grappler:op_types", + "//tensorflow/core/grappler:utils", + "//tensorflow/core/grappler/costs:graph_properties", + "//tensorflow/core/grappler/utils:frame", + ], +) + +tf_cc_test( + name = "dependency_optimizer_test", + size = "small", + srcs = ["dependency_optimizer_test.cc"], + deps = [ + ":constant_folding", + ":dependency_optimizer", + ":model_pruner", + "//tensorflow/cc:cc_ops", + "//tensorflow/core:protos_all_cc", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + "//tensorflow/core/grappler:grappler_item", + "//tensorflow/core/grappler:utils", + "//tensorflow/core/grappler/inputs:trivial_test_graph_input_yielder", + ], +) + cc_library( name = "model_pruner", srcs = ["model_pruner.cc"], @@ -311,6 +352,7 @@ cc_library( ":arithmetic_optimizer", ":auto_parallel", ":constant_folding", + ":dependency_optimizer", ":graph_optimizer", ":layout_optimizer", ":memory_optimizer", diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc index 5cce34e2a61..0cd0d4351e9 100644 --- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.cc @@ -512,8 +512,10 @@ bool UniqueNodes::SameNode(const NodeDef& node1, const NodeDef& node2) const { return true; } -bool ArithmeticOptimizer::CanDedup(const NodeDef& node) const { - if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) { +// static +bool ArithmeticOptimizer::CanDedup( + const NodeDef& node, const std::unordered_set& nodes_to_preserve) { + if (nodes_to_preserve.find(node.name()) != nodes_to_preserve.end()) { return false; } if (IsEnter(node) || IsExit(node) || IsPlaceholder(node)) { @@ -551,7 +553,7 @@ void ArithmeticOptimizer::DedupComputations(GraphDef* optimized_graph) const { continue; } NodeDef* node = optimized_graph->mutable_node(i); - if (!CanDedup(*node)) { + if (!CanDedup(*node, nodes_to_preserve_)) { continue; } NodeDef* rep = nodes.FindOrAddRepresentative(node); diff --git a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h index 4d2e160ff48..c8cc292295c 100644 --- a/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h +++ b/tensorflow/core/grappler/optimizers/arithmetic_optimizer.h @@ -28,6 +28,11 @@ namespace grappler { // run a model. class ArithmeticOptimizer : public GraphOptimizer { public: + // Returns true if it is safe to dedup node from the graph. + // TODO(rmlarsen): Refactor to op_types.{h,cc}. + static bool CanDedup(const NodeDef& node, + const std::unordered_set& nodes_to_preserve); + ArithmeticOptimizer() : opt_level_(RewriterConfig::ON) {} explicit ArithmeticOptimizer(RewriterConfig::Toggle opt_level) : opt_level_(opt_level) {} @@ -42,7 +47,6 @@ class ArithmeticOptimizer : public GraphOptimizer { const GraphDef& optimized_graph, double result) override; private: - bool CanDedup(const NodeDef& node) const; void DedupComputations(GraphDef* optimized_graph) const; // Runs peep-hole optimizations on `optimized_graph`, e.g., removing inverse // transposes. diff --git a/tensorflow/core/grappler/optimizers/constant_folding.cc b/tensorflow/core/grappler/optimizers/constant_folding.cc index 02a732b0923..993831c4121 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding.cc +++ b/tensorflow/core/grappler/optimizers/constant_folding.cc @@ -122,7 +122,6 @@ string ConstantFolding::AddControlDependency(const string& input_name, auto outputs = node_map->GetOutputs(node->name()); for (const NodeDef* node : outputs) { if (IsIdentity(*node)) { - CHECK_EQ(1, node->input_size()); if (IsSameInput(node->input(0), input_name)) { return AsControlDependency(*node); } diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc new file mode 100644 index 00000000000..49eb29d0371 --- /dev/null +++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.cc @@ -0,0 +1,278 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/grappler/optimizers/dependency_optimizer.h" + +#include + +#include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/grappler/costs/graph_properties.h" +#include "tensorflow/core/grappler/grappler_item.h" +#include "tensorflow/core/grappler/op_types.h" +#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h" +#include "tensorflow/core/grappler/optimizers/constant_folding.h" +#include "tensorflow/core/grappler/utils/frame.h" +#include "tensorflow/core/lib/core/errors.h" +#include "tensorflow/core/lib/core/stringpiece.h" +#include "tensorflow/core/lib/strings/strcat.h" +#include "tensorflow/core/util/device_name_utils.h" + +namespace tensorflow { +namespace grappler { + +namespace { +// A vector with a set. The set stores the same elements as the vector, and +// quickly answers whether a value is in the vector. Duplicated elements are not +// allowed for now. +template +class SetVector { + public: + // Returns false if value already existed in the set, true otherwise. + bool PushBack(const T& value) { + if (!set_.insert(value).second) { + return false; + } + vector_.push_back(value); + return true; + } + + T PopBack() { + T back = vector_.back(); + set_.erase(back); + vector_.pop_back(); + return back; + } + + bool Exists(const T& value) const { return set_.count(value); } + + bool Empty() const { return vector_.empty(); } + + void Reserve(int64 size) { vector_.reserve(size); } + + private: + std::unordered_set set_; + std::vector vector_; +}; + +bool HasRegularOutputs(const NodeDef& node, const NodeMap& node_map) { + for (const NodeDef* output : node_map.GetOutputs(node.name())) { + for (const string& input : output->input()) { + if (input == node.name()) { + return true; + } + } + } + return false; +} + +int FindInputSlot(const NodeDef& node, const string& input) { + for (int i = 0; i < node.input_size(); ++i) { + if (node.input(i) == input) { + return i; + } + } + return -1; +} + +} // namespace + +bool DependencyOptimizer::SafeToConvertToNoOp(const NodeDef& node) { + if (!has_fetch_ || HasRegularOutputs(node, *node_map_)) { + return false; + } + + if (IsMerge(node)) { + return false; + } + if (!ArithmeticOptimizer::CanDedup(node, nodes_to_preserve_)) { + return false; + } + + const OpDef* op_def = nullptr; + Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def); + if (!status.ok() || op_def->output_arg_size() == 0) { + return false; + } + + // TODO(rmlarsen): We have to skip Const nodes to make + // core/debug/debug_gateway_test pass. See if we can fix that test. + // TODO(rmlarsen): We have to skip Identity nodes to make an obsolete test in + // python/training/session_manager_test.py pass. See if we can fix or get rid + // of that test. + const std::unordered_set do_not_rewrite_ops = { + "Assert", "CheckNumerics", "Const", "Identity", "_Retval", + "_Arg", "_ParallelConcatUpdate", "_TPUExecute"}; + return do_not_rewrite_ops.find(node.op()) == do_not_rewrite_ops.end(); +} + +string DependencyOptimizer::TryOptimizeDependencies( + NodeDef* node, GraphDef* graph, std::vector* new_nodes) { + // Change ops that only have control dependencies as outputs to NoOps. + if (node->op() != "NoOp" && SafeToConvertToNoOp(*node)) { + VLOG(2) << "***** Replacing " << node->name() << " (" << node->op() + << ") with NoOp."; + // The outputs of this node are not consumed. Replace its inputs with + // control dependencies and replace the op itself with the NoOp op. + for (int i = 0; i < node->input_size(); ++i) { + const string& old_input = node->input(i); + if (IsControlInput(old_input)) { + continue; + } + const string ctrl_input = ConstantFolding::AddControlDependency( + old_input, graph, node_map_.get()); + node->set_input(i, ctrl_input); + node_map_->UpdateInput(node->name(), old_input, ctrl_input); + new_nodes->push_back(node_map_->GetNode(old_input)); + } + node->set_op("NoOp"); + node->clear_attr(); + new_nodes->push_back(node); + return ""; + } + + // Remove NoOp nodes if their fan-in or fan-out is less than 2. + // The non-trivial rewrites take the following form: + // + // Case a) + // x --^> +------+ x --^> +---+ + // y --^> | NoOp | --^> a ==> y --^> | a | + // ... | | ... | | + // z --^> +------+ z --^> +---+ + // + // Case b) + // +------+ --^> a +---+ --^> a + // x --^> | NoOp | --^> b ==> | x | --^> b + // | | ... | | ... + // +------+ --^> c +---+ --^> c + if (node->op() == "NoOp" && + nodes_to_preserve_.find(node->name()) == nodes_to_preserve_.end()) { + auto outputs = node_map_->GetOutputs(node->name()); + const int num_outputs = outputs.size(); + const int num_inputs = node->input_size(); + if (num_inputs > 1 && num_outputs > 1) { + return ""; + } + + for (auto consumer : outputs) { + for (int i = 0; i < num_inputs; ++i) { + const string& input = node->input(i); + // Forward dependencies from inputs to consumer if it doesn't already + // depend on it. + if (node_map_->GetOutputs(input).count(consumer) == 0) { + consumer->add_input(ConstantFolding::AddControlDependency( + input, graph, node_map_.get())); + node_map_->AddOutput(NodeName(input), consumer->name()); + } + new_nodes->push_back(node_map_->GetNode(input)); + } + // Remove dependency on node from consumer. + int pos = FindInputSlot(*consumer, AsControlDependency(node->name())); + if (pos >= 0) { + consumer->mutable_input()->SwapElements(pos, + consumer->input_size() - 1); + consumer->mutable_input()->RemoveLast(); + node_map_->RemoveOutput(node->name(), consumer->name()); + new_nodes->push_back(consumer); + } + } + + // Clear all control inputs to node. + node_map_->RemoveInputs(node->name()); + node->clear_input(); + return ""; + } + + return ""; +} + +Status DependencyOptimizer::OptimizeDependencies(GraphDef* optimized_graph) { + // TODO(rmlarsen,bsteiner): The folloing code is similar to the control loop + // in the ArithmeticOptimizer. Dedup this. + SetVector nodes_to_simplify; + for (int i = 0; i < optimized_graph->node_size(); ++i) { + const NodeDef& node = optimized_graph->node(i); + if (node.op() == "NoOp" || SafeToConvertToNoOp(node)) { + nodes_to_simplify.PushBack(optimized_graph->mutable_node()->Mutable(i)); + } + } + while (!nodes_to_simplify.Empty()) { + NodeDef* node = nodes_to_simplify.PopBack(); + std::vector new_nodes; + const string simplified_tensor = + TryOptimizeDependencies(node, optimized_graph, &new_nodes); + if (simplified_tensor.empty()) { + continue; + } + if (NodeName(simplified_tensor) != node->name()) { + // Always consider simplified_tensor for further optimizations. + NodeDef* simplified_node = node_map_->GetNode(simplified_tensor); + if (simplified_node != nullptr) { + nodes_to_simplify.PushBack(simplified_node); + } + // When `node` is simplifed to another node rather than in-place, the + // consumers of `node` are already redirected to `simplified_tensor`. + // Re-push the consumers into `nodes_to_simplify` for further + // optimizations. + std::set consumers = node_map_->GetOutputs(node->name()); + for (NodeDef* consumer : consumers) { + // Update `consumer`'s use of `node` to `input`'s operand. + for (int i = 0; i < consumer->input_size(); ++i) { + int operand_pos; + string operand_node_name = + ParseNodeName(consumer->input(i), &operand_pos); + if (operand_node_name == node->name()) { + *consumer->mutable_input(i) = + (operand_pos < 0 + ? AsControlDependency(NodeName(simplified_tensor)) + : simplified_tensor); + } + VLOG(2) << "Update input " << consumer->input(i) << " of " + << consumer->name() << " to " << simplified_tensor; + } + node_map_->UpdateInput(consumer->name(), node->name(), + simplified_tensor); + nodes_to_simplify.PushBack(consumer); + } + } + for (auto new_node : new_nodes) { + nodes_to_simplify.PushBack(new_node); + } + } + return Status::OK(); +} + +Status DependencyOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item, + GraphDef* optimized_graph) { + *optimized_graph = item.graph; + nodes_to_preserve_ = item.NodesToPreserve(); + node_map_.reset(new NodeMap(optimized_graph)); + has_fetch_ = !item.fetch.empty(); + VLOG(2) << "Graph before optimization:\n" << optimized_graph->DebugString(); + TF_RETURN_IF_ERROR(OptimizeDependencies(optimized_graph)); + VLOG(2) << "Graph after optimization:\n" << optimized_graph->DebugString(); + + return Status::OK(); +} + +void DependencyOptimizer::Feedback(Cluster* /*cluster*/, + const GrapplerItem& /*item*/, + const GraphDef& /*optimized_graph*/, + double /*result*/) { + // Nothing to do for DependencyOptimizer. +} + +} // end namespace grappler +} // end namespace tensorflow diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer.h b/tensorflow/core/grappler/optimizers/dependency_optimizer.h new file mode 100644 index 00000000000..13ece87aff3 --- /dev/null +++ b/tensorflow/core/grappler/optimizers/dependency_optimizer.h @@ -0,0 +1,68 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef THIRD_PARTY_TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DEPENDENCY_OPTIMIZER_H_ +#define THIRD_PARTY_TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DEPENDENCY_OPTIMIZER_H_ + +#include +#include "tensorflow/core/grappler/optimizers/graph_optimizer.h" +#include "tensorflow/core/grappler/utils.h" +#include "tensorflow/core/protobuf/rewriter_config.pb.h" + +namespace tensorflow { +namespace grappler { + +// Optimize TF computations by removing control dependencies or re-arranging +// them to shorten the critical path for a model step or enable other +// optimizations, such as removing nodes that are effectively noops. +class DependencyOptimizer : public GraphOptimizer { + public: + DependencyOptimizer() : opt_level_(RewriterConfig::ON) {} + explicit DependencyOptimizer(RewriterConfig::Toggle opt_level) + : opt_level_(opt_level) {} + ~DependencyOptimizer() override {} + + string name() const override { return "dependency_optimizer"; }; + + Status Optimize(Cluster* cluster, const GrapplerItem& item, + GraphDef* optimized_graph) override; + + void Feedback(Cluster* cluster, const GrapplerItem& item, + const GraphDef& optimized_graph, double result) override; + + private: + // Returns true if it is safe to convert node to NoOp. + bool SafeToConvertToNoOp(const NodeDef& node); + + Status OptimizeDependencies(GraphDef* optimized_graph); + // Tries to simplify the expression that roots at `node` and replaces the uses + // of `node` to the simplified expression. Returns the name of the simplified + // tensor (e.g. "split:1") or an empty string if no simplification is + // performed. + string TryOptimizeDependencies(NodeDef* node, GraphDef* graph, + std::vector* new_nodes); + + bool HasOnlyControlOutputs(const NodeDef* node); + + bool has_fetch_; + RewriterConfig::Toggle opt_level_; + std::unordered_set nodes_to_preserve_; + std::unique_ptr node_map_; +}; + +} // end namespace grappler +} // end namespace tensorflow + +#endif // THIRD_PARTY_TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DEPENDENCY_OPTIMIZER_H_ diff --git a/tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc b/tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc new file mode 100644 index 00000000000..d54d7b2093e --- /dev/null +++ b/tensorflow/core/grappler/optimizers/dependency_optimizer_test.cc @@ -0,0 +1,201 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/grappler/optimizers/dependency_optimizer.h" +#include "tensorflow/cc/ops/standard_ops.h" +#include "tensorflow/core/framework/node_def.pb.h" +#include "tensorflow/core/grappler/grappler_item.h" +#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h" +#include "tensorflow/core/grappler/optimizers/constant_folding.h" +#include "tensorflow/core/grappler/optimizers/model_pruner.h" +#include "tensorflow/core/grappler/utils.h" +#include "tensorflow/core/lib/core/status_test_util.h" +#include "tensorflow/core/platform/test.h" + +namespace tensorflow { +namespace grappler { +namespace { + +class DependencyOptimizerTest : public ::testing::Test {}; + +void VerifyGraphsEqual(const GraphDef& original_graph, + const GraphDef& optimized_graph, const string& func) { + EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func; + for (int i = 0; i < original_graph.node_size(); ++i) { + const NodeDef& original = original_graph.node(i); + const NodeDef& optimized = optimized_graph.node(i); + EXPECT_EQ(original.name(), optimized.name()) << func; + EXPECT_EQ(original.op(), optimized.op()) << func; + EXPECT_EQ(original.input_size(), optimized.input_size()) << func; + for (int j = 0; j < original.input_size(); ++j) { + EXPECT_EQ(original.input(j), optimized.input(j)) << func; + } + } +} + +TEST_F(DependencyOptimizerTest, NoOp) { + // This trivial graph is so basic there's nothing to optimize. + TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"}); + GrapplerItem item; + CHECK(fake_input.NextItem(&item)); + + DependencyOptimizer optimizer; + GraphDef output; + Status status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + VerifyGraphsEqual(item.graph, output, __FUNCTION__); +} + +TEST_F(DependencyOptimizerTest, ChangeToNoop) { + tensorflow::Scope s = tensorflow::Scope::NewRootScope(); + Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); + Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2}); + Output add = ops::Add(s.WithOpName("add"), x, y); + Output id1 = + ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x); + Output id2 = + ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y); + + GrapplerItem item; + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + item.fetch.push_back("id1"); + item.fetch.push_back("id2"); + + DependencyOptimizer optimizer; + GraphDef output; + Status status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + // Run the optimizer twice to make sure the rewrite is idempotent. + item.graph.Swap(&output); + status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + EXPECT_EQ(item.graph.node_size(), output.node_size()); + for (int i = 0; i < item.graph.node_size(); ++i) { + const NodeDef& original = item.graph.node(i); + const NodeDef& optimized = output.node(i); + EXPECT_EQ(original.name(), optimized.name()); + if (original.name() == "add") { + EXPECT_EQ("NoOp", optimized.op()); + } else { + EXPECT_EQ(original.op(), optimized.op()); + } + EXPECT_EQ(original.input_size(), optimized.input_size()); + for (int j = 0; j < original.input_size(); ++j) { + if (original.name() == "add") { + EXPECT_EQ(AsControlDependency(original.input(j)), optimized.input(j)); + } else { + EXPECT_EQ(original.input(j), optimized.input(j)); + } + } + } +} + +TEST_F(DependencyOptimizerTest, ChangeToNoop_NoFetch) { + tensorflow::Scope s = tensorflow::Scope::NewRootScope(); + Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); + Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2}); + Output add = ops::Add(s.WithOpName("add"), x, y); + Output id1 = + ops::Identity(s.WithOpName("id1").WithControlDependencies(add), x); + Output id2 = + ops::Identity(s.WithOpName("id2").WithControlDependencies(add), y); + + GrapplerItem item; + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + + DependencyOptimizer optimizer; + GraphDef output; + Status status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + VerifyGraphsEqual(item.graph, output, __FUNCTION__); +} + +TEST_F(DependencyOptimizerTest, RemoveNoOps_EmptyInputOrOutput) { + tensorflow::Scope s = tensorflow::Scope::NewRootScope(); + Output x = ops::Const(s, {1.0f, 2.0f}, {1, 2}); + auto noop1 = ops::NoOp(s); + auto noop2 = ops::NoOp(s.WithControlDependencies(x)); + Output id = ops::Identity(s.WithControlDependencies({noop1.operation}), x); + + GrapplerItem item; + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + item.fetch.push_back("Identity"); + + DependencyOptimizer optimizer; + GraphDef output; + Status status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + // Run the optimizer twice to make sure the rewrite is idempotent. + item.graph.Swap(&output); + status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + EXPECT_EQ(item.graph.node_size(), output.node_size()); + for (const NodeDef& node : output.node()) { + if (node.name() == "NoOp" || node.name() == "NoOp_1") { + EXPECT_EQ(0, node.input_size()); + } else if (node.name() == "Identity") { + EXPECT_EQ(1, node.input_size()); + EXPECT_EQ("Const", node.input(0)); + } + } +} + +TEST_F(DependencyOptimizerTest, RemoveNoOps_SingleInputOrOutput) { + tensorflow::Scope s = tensorflow::Scope::NewRootScope(); + Output x = ops::Const(s.WithOpName("x"), {1.0f, 2.0f}, {1, 2}); + Output y = ops::Const(s.WithOpName("y"), {1.0f, 2.0f}, {1, 2}); + // NoOp with a single input- and two output dependencies. + auto noop = ops::NoOp(s.WithControlDependencies(x)); + // NoOp with a two input- and a single output dependency. + auto noop_1 = + ops::NoOp(s.WithControlDependencies(x).WithControlDependencies(y)); + Output id = ops::Identity(s.WithControlDependencies({noop.operation}), x); + Output id_1 = ops::Identity( + s.WithControlDependencies({noop.operation, noop_1.operation}), y); + + GrapplerItem item; + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + item.fetch.push_back("Identity"); + item.fetch.push_back("Identity_1"); + + DependencyOptimizer optimizer; + GraphDef output; + Status status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + // Run the optimizer twice to make sure the rewrite is idempotent. + item.graph.Swap(&output); + status = optimizer.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + EXPECT_EQ(item.graph.node_size(), output.node_size()); + for (const NodeDef& node : output.node()) { + if (node.name() == "NoOp" || node.name() == "NoOp_1") { + EXPECT_EQ(0, node.input_size()); + } else if (node.name() == "Identity") { + EXPECT_EQ("x", node.input(0)); + } else if (node.name() == "Identity_1") { + EXPECT_EQ("y", node.input(0)); + EXPECT_EQ("^x", node.input(1)); + } + } +} + +} // namespace +} // namespace grappler +} // namespace tensorflow diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.cc b/tensorflow/core/grappler/optimizers/meta_optimizer.cc index eb04bc6e9a9..1e93900e6a6 100644 --- a/tensorflow/core/grappler/optimizers/meta_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/meta_optimizer.cc @@ -19,6 +19,7 @@ limitations under the License. #include "tensorflow/core/grappler/optimizers/arithmetic_optimizer.h" #include "tensorflow/core/grappler/optimizers/auto_parallel.h" #include "tensorflow/core/grappler/optimizers/constant_folding.h" +#include "tensorflow/core/grappler/optimizers/dependency_optimizer.h" #include "tensorflow/core/grappler/optimizers/graph_optimizer.h" #include "tensorflow/core/grappler/optimizers/layout_optimizer.h" #include "tensorflow/core/grappler/optimizers/memory_optimizer.h" @@ -53,6 +54,10 @@ std::unique_ptr MetaOptimizer::NewOptimizer( graph_optimizer.reset( new AutoParallel(cfg_.auto_parallel().num_replicas())); } + if (optimizer == "dependency") { + graph_optimizer.reset( + new DependencyOptimizer(cfg_.dependency_optimization())); + } return graph_optimizer; } @@ -71,6 +76,10 @@ Status MetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item, optimizers.push_back(std::unique_ptr( new ArithmeticOptimizer(cfg_.arithmetic_optimization()))); } + if (cfg_.dependency_optimization() != RewriterConfig::OFF) { + optimizers.push_back(std::unique_ptr( + new DependencyOptimizer(cfg_.dependency_optimization()))); + } if (cfg_.layout_optimizer() == RewriterConfig::ON) { optimizers.push_back( std::unique_ptr(new LayoutOptimizer())); @@ -92,9 +101,9 @@ Status MetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item, new AutoParallel(cfg_.auto_parallel().num_replicas()))); } } else { - std::set available_optimizers = {"pruning", "constfold", - "layout", "memory", - "autoparallel", "arithmetic"}; + std::set available_optimizers = { + "pruning", "constfold", "layout", "memory", + "autoparallel", "arithmetic", "dependency"}; for (const auto& optimizer : cfg_.optimizers()) { if (available_optimizers.find(optimizer) != available_optimizers.end()) { optimizers.push_back(NewOptimizer(optimizer)); diff --git a/tensorflow/core/grappler/utils.cc b/tensorflow/core/grappler/utils.cc index 11bd8fa5cb3..66f8c537ede 100644 --- a/tensorflow/core/grappler/utils.cc +++ b/tensorflow/core/grappler/utils.cc @@ -221,8 +221,11 @@ string AsControlDependency(const NodeDef& node) { return strings::StrCat("^", node.name()); } -string AsControlDependency(const string& node) { - return strings::StrCat("^", node); +string AsControlDependency(const string& node_name) { + CHECK(!node_name.empty()); + return (!node_name.empty() && node_name[0] == '^') + ? node_name + : strings::StrCat("^", node_name); } int NumOutputs(const NodeDef& node) { diff --git a/tensorflow/core/grappler/utils_test.cc b/tensorflow/core/grappler/utils_test.cc index 3193b3ec4a6..9d747fe7dc4 100644 --- a/tensorflow/core/grappler/utils_test.cc +++ b/tensorflow/core/grappler/utils_test.cc @@ -181,6 +181,14 @@ TEST_F(UtilsTest, NumOutputs) { EXPECT_EQ(1, NumOutputs(CreateDequeueNode())); } +TEST(AsControlDependency, BasicTest) { + NodeDef node; + node.set_name("foo"); + EXPECT_EQ("^foo", AsControlDependency(node)); + EXPECT_EQ("^foo", AsControlDependency(node.name())); + EXPECT_EQ("^foo", AsControlDependency("^foo")); +} + } // namespace } // namespace grappler } // namespace tensorflow diff --git a/tensorflow/core/protobuf/rewriter_config.proto b/tensorflow/core/protobuf/rewriter_config.proto index eb74d4b1c56..96b55ce04ba 100644 --- a/tensorflow/core/protobuf/rewriter_config.proto +++ b/tensorflow/core/protobuf/rewriter_config.proto @@ -35,6 +35,8 @@ message RewriterConfig { Toggle constant_folding = 3; // Arithmetic optimizations (default is ON) Toggle arithmetic_optimization = 7; + // Control dependency optimizations (default is ON). + Toggle dependency_optimization = 8; // If true, don't remove unnecessary ops from the graph bool disable_model_pruning = 2; From c154d4719eea88e694f4c06bcb1249dbac0f7877 Mon Sep 17 00:00:00 2001 From: Derek Murray Date: Wed, 15 Nov 2017 08:56:17 -0800 Subject: [PATCH 050/104] Do not log an error when `tf.py_func()` raises `StopIteration`. This reduces log spam when using `tf.py_func()` to wrap a Python generator, such as when using `tf.data.Dataset.from_generator()`. PiperOrigin-RevId: 175831781 --- tensorflow/python/lib/core/py_func.cc | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/tensorflow/python/lib/core/py_func.cc b/tensorflow/python/lib/core/py_func.cc index a62847614c6..b30125761fc 100644 --- a/tensorflow/python/lib/core/py_func.cc +++ b/tensorflow/python/lib/core/py_func.cc @@ -176,7 +176,8 @@ string PyExcFetch() { } // Calls the registered py function through the trampoline. -Status DoCallPyFunc(PyCall* call) { +Status DoCallPyFunc(PyCall* call, bool* out_log_on_error) { + *out_log_on_error = true; PyObject* trampoline = GetPyTrampoline(); if (trampoline == nullptr) { return errors::InvalidArgument( @@ -196,6 +197,7 @@ Status DoCallPyFunc(PyCall* call) { PyErr_ExceptionMatches(PyExc_TypeError)) { return errors::InvalidArgument(PyExcFetch()); } else if (PyErr_ExceptionMatches(PyExc_StopIteration)) { + *out_log_on_error = false; return errors::OutOfRange(PyExcFetch()); } else if (PyErr_ExceptionMatches(PyExc_MemoryError)) { return errors::ResourceExhausted(PyExcFetch()); @@ -426,11 +428,19 @@ class PyFuncOp : public OpKernel { PyGILState_STATE py_threadstate; py_threadstate = PyGILState_Ensure(); - Status s = DoCallPyFunc(&call); + bool log_on_error; + Status s = DoCallPyFunc(&call, &log_on_error); PyGILState_Release(py_threadstate); // Ensures that GIL is released even when !s.ok(). - OP_REQUIRES_OK(ctx, s); + if (!s.ok()) { + if (log_on_error) { + ctx->CtxFailureWithWarning(s); + } else { + ctx->CtxFailure(s); + } + return; + } OP_REQUIRES(ctx, static_cast(call.out.size()) == ctx->num_outputs(), errors::InvalidArgument(token_, " returns ", call.out.size(), From ab815ecae8cad8f6776d4e0f38a0b6a24cee23ba Mon Sep 17 00:00:00 2001 From: Derek Murray Date: Wed, 15 Nov 2017 09:41:46 -0800 Subject: [PATCH 051/104] Remove `FunctionDefLibrary` argument from `Device::MaybeRewriteGraph()`. No existing subclass of `Device` reads that argument, and it incurs a protobuf serialization of the function library each time a new subgraph is created in `DirectSession::Run()`. PiperOrigin-RevId: 175837162 --- tensorflow/core/common_runtime/device.h | 5 +---- tensorflow/core/common_runtime/direct_session.cc | 6 +----- tensorflow/core/common_runtime/renamed_device.h | 5 ++--- tensorflow/core/distributed_runtime/graph_mgr.cc | 3 +-- 4 files changed, 5 insertions(+), 14 deletions(-) diff --git a/tensorflow/core/common_runtime/device.h b/tensorflow/core/common_runtime/device.h index 674111dbe69..3912cd177b6 100644 --- a/tensorflow/core/common_runtime/device.h +++ b/tensorflow/core/common_runtime/device.h @@ -110,12 +110,9 @@ class Device : public DeviceBase { // prototyping of TensorFlow device implementations that need to modify // the GraphDef before execution. // - // 'library' provides access to the function library which is shared - // between all device partitions. // 'graph' supplies the partition of the graph assigned to this // device. - virtual Status MaybeRewriteGraph(const FunctionDefLibrary& /*library*/, - std::unique_ptr* /*graph*/) { + virtual Status MaybeRewriteGraph(std::unique_ptr* /*graph*/) { return Status::OK(); } diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc index d652b1004ff..2f57164dcd8 100644 --- a/tensorflow/core/common_runtime/direct_session.cc +++ b/tensorflow/core/common_runtime/direct_session.cc @@ -1419,11 +1419,7 @@ Status DirectSession::CreateGraphs( Device* d; s = device_mgr_->LookupDevice(partition_name, &d); if (!s.ok()) break; - // TODO(pbar) The library is currently shared and immutable. There - // may be possible use cases where a device may want to modify - // function definitions - in which case the library would need to be - // replicated per device. - s = d->MaybeRewriteGraph(client_graph->flib_def->ToProto(), graph); + s = d->MaybeRewriteGraph(graph); if (!s.ok()) { break; } diff --git a/tensorflow/core/common_runtime/renamed_device.h b/tensorflow/core/common_runtime/renamed_device.h index 22a70fbdfae..3103ca07512 100644 --- a/tensorflow/core/common_runtime/renamed_device.h +++ b/tensorflow/core/common_runtime/renamed_device.h @@ -104,9 +104,8 @@ class RenamedDevice : public Device { Status Sync() override { return underlying_->Sync(); } - Status MaybeRewriteGraph(const FunctionDefLibrary& library, - std::unique_ptr* graph) override { - return underlying_->MaybeRewriteGraph(library, graph); + Status MaybeRewriteGraph(std::unique_ptr* graph) override { + return underlying_->MaybeRewriteGraph(graph); } Status FillContextMap(const Graph* graph, diff --git a/tensorflow/core/distributed_runtime/graph_mgr.cc b/tensorflow/core/distributed_runtime/graph_mgr.cc index 391ffda25c0..60d58af61da 100644 --- a/tensorflow/core/distributed_runtime/graph_mgr.cc +++ b/tensorflow/core/distributed_runtime/graph_mgr.cc @@ -208,8 +208,7 @@ Status GraphMgr::InitItem(const string& session, const GraphDef& gdef, } // Give the device an opportunity to rewrite its subgraph. - TF_RETURN_IF_ERROR( - unit->device->MaybeRewriteGraph(gdef.library(), &subgraph)); + TF_RETURN_IF_ERROR(unit->device->MaybeRewriteGraph(&subgraph)); // Top-level nodes in the graph uses the op segment to cache // kernels. Therefore, as long as the executor is alive, we need From b29cf93407acaa8a2f32d118e88d5d9b440a1d5c Mon Sep 17 00:00:00 2001 From: Jianwei Xie Date: Wed, 15 Nov 2017 10:55:12 -0800 Subject: [PATCH 052/104] Correct the logging message as (very likely) we will support queue-based input pipeline. PiperOrigin-RevId: 175848995 --- tensorflow/contrib/tpu/python/tpu/tpu_estimator.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py index 07877fcc761..97b2d25e0cf 100644 --- a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py +++ b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py @@ -946,23 +946,14 @@ class _InputPipeline(object): # user code, so, log a warning. if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS): err_msg = ('Input pipeline contains one or more QueueRunners. ' - 'These are not supported via TPUEstimator. You must convert ' - 'your input pipeline to use `tf.data` instead (see ' + 'It could be slow and not scalable. Please consider ' + 'converting your input pipeline to use `tf.data` instead (see ' 'https://www.tensorflow.org/programmers_guide/datasets for ' 'instructions.') if _WRAP_INPUT_FN_INTO_WHILE_LOOP: raise RuntimeError(err_msg) else: logging.warn(err_msg) - elif ops.get_default_graph().get_collection(ops.GraphKeys.SUMMARIES): - # Queue Runner has summary Ops by default. So here we use elif to do - # necessary checks for Dataset input pipeline only. - err_msg = ('Input pipeline contains `tf.summary` operations. ' - 'These are not currently supported.') - if _WRAP_INPUT_FN_INTO_WHILE_LOOP: - raise RuntimeError(err_msg) - else: - logging.warn(err_msg) class _ModelFnWrapper(object): From 0c9b03ca3d6017734fc6ea7517556c65d7bb9f90 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 11:12:03 -0800 Subject: [PATCH 053/104] Update layout_optimizer gradient test to use a variable for the input instead of a constant. Otherwise the dependency optimizer eliminates the Conv2DBackpropInput. PiperOrigin-RevId: 175851832 --- tensorflow/python/grappler/layout_optimizer_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow/python/grappler/layout_optimizer_test.py b/tensorflow/python/grappler/layout_optimizer_test.py index 9ac33fbb4ad..99a4d23b6aa 100644 --- a/tensorflow/python/grappler/layout_optimizer_test.py +++ b/tensorflow/python/grappler/layout_optimizer_test.py @@ -187,7 +187,8 @@ class LayoutOptimizerTest(test.TestCase): self.skipTest('GPU required') random_seed.set_random_seed(0) - x = random_ops.truncated_normal([1, 200, 200, 3], seed=0) + x = variables.Variable( + random_ops.truncated_normal([1, 200, 200, 3], seed=0)) y = conv_layers.conv2d(x, 32, [3, 3]) z = conv_layers.conv2d(y, 32, [3, 3]) optimizer = gradient_descent.GradientDescentOptimizer(1e-4) From 289af8e7460e69edc106e834b7fbeee17811f1ea Mon Sep 17 00:00:00 2001 From: Sanjoy Das Date: Wed, 15 Nov 2017 11:16:39 -0800 Subject: [PATCH 054/104] Move dot-related helpers into dot_op_emitter.cc This keeps related logic together in a single file. PiperOrigin-RevId: 175852532 --- tensorflow/compiler/xla/service/cpu/BUILD | 4 +- .../compiler/xla/service/cpu/cpu_compiler.cc | 1 + .../xla/service/cpu/dot_op_emitter.cc | 115 ++++++++++++++++- .../compiler/xla/service/cpu/dot_op_emitter.h | 20 +++ .../xla/service/cpu/ir_emission_utils.cc | 117 ------------------ .../xla/service/cpu/ir_emission_utils.h | 21 ---- .../xla/service/cpu/layout_assignment.cc | 1 + .../service/cpu/parallel_task_assignment.cc | 1 + 8 files changed, 140 insertions(+), 140 deletions(-) diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD index 4f6e69ebd4e..89e8d07200f 100644 --- a/tensorflow/compiler/xla/service/cpu/BUILD +++ b/tensorflow/compiler/xla/service/cpu/BUILD @@ -83,6 +83,7 @@ cc_library( ":cpu_options", ":cpu_parallelization_preparation", ":disassembler", + ":dot_op_emitter", ":ir_emission_utils", ":ir_emitter", ":layout_assignment", @@ -282,7 +283,6 @@ cc_library( deps = [ ":cpu_options", ":cpu_runtime", - ":ir_emission_utils", "//tensorflow/compiler/xla:shape_util", "//tensorflow/compiler/xla:status_macros", "//tensorflow/compiler/xla:types", @@ -619,6 +619,7 @@ cc_library( srcs = ["layout_assignment.cc"], hdrs = ["layout_assignment.h"], deps = [ + ":dot_op_emitter", ":ir_emission_utils", "//tensorflow/compiler/xla:util", "//tensorflow/compiler/xla/service:computation_layout", @@ -706,6 +707,7 @@ cc_library( srcs = ["parallel_task_assignment.cc"], hdrs = ["parallel_task_assignment.h"], deps = [ + ":dot_op_emitter", ":ir_emission_utils", ":shape_partition", "//tensorflow/compiler/xla/service:hlo", diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc index d2202252d95..def801d9d69 100644 --- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc +++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc @@ -54,6 +54,7 @@ limitations under the License. #include "tensorflow/compiler/xla/service/cpu/cpu_options.h" #include "tensorflow/compiler/xla/service/cpu/cpu_parallelization_preparation.h" #include "tensorflow/compiler/xla/service/cpu/disassembler.h" +#include "tensorflow/compiler/xla/service/cpu/dot_op_emitter.h" #include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h" #include "tensorflow/compiler/xla/service/cpu/ir_emitter.h" #include "tensorflow/compiler/xla/service/cpu/layout_assignment.h" diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc index 2a447a54b01..4c40dae5122 100644 --- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc +++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc @@ -23,7 +23,6 @@ limitations under the License. #include "llvm/IR/Module.h" #include "llvm/IR/Value.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h" -#include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h" #include "tensorflow/compiler/xla/service/hlo_module.h" #include "tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h" #include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h" @@ -950,5 +949,119 @@ llvm_ir::IrArray::Index DotOpEmitter::EmitOperandArrayLoopNest( return index; } +// Return whether the given shape is a matrix with no padding. +static bool IsRank2WithNoPadding(const Shape& shape) { + return ShapeUtil::Rank(shape) == 2 && !LayoutUtil::IsPadded(shape); +} + +// In a gemm operation where output = lhs * rhs, check whether the given shapes +// are valid for the operation. +static bool AreValidGemmShapes(const Shape& lhs_shape, const Shape& rhs_shape, + const Shape& output_shape) { + // The inputs and the output must + // 1) be matrices with no padding, and + // 2) have an allowed element type. + return output_shape.element_type() == F32 && + IsRank2WithNoPadding(lhs_shape) && IsRank2WithNoPadding(rhs_shape) && + IsRank2WithNoPadding(output_shape); +} + +bool PotentiallyImplementedAsEigenDot(const HloInstruction& hlo) { + // For certain types of Dot, we can call Eigen + if (hlo.opcode() == HloOpcode::kDot) { + const Shape& lhs_shape = hlo.operand(0)->shape(); + const Shape& rhs_shape = hlo.operand(1)->shape(); + + if (ShapeUtil::HasZeroElements(lhs_shape) || + ShapeUtil::HasZeroElements(rhs_shape)) { + return false; + } + + if (ProfitableToImplementDotInUntiledLlvmIr(hlo) == + DotInLlvmIrProfitable::kYes || + ProfitableToImplementDotInTiledLlvmIr(hlo)) { + return false; + } + + // If gemm can accept the operand shapes, use it rather than a custom + // kernel. + if (AreValidGemmShapes(lhs_shape, rhs_shape, hlo.shape())) { + // The size of the reduction dimension should match. The shape inference + // guarantees this invariant, so the check here is for programming + // errors. + CHECK_EQ(lhs_shape.dimensions(1), rhs_shape.dimensions(0)); + return true; + } + } + + if (hlo.opcode() == HloOpcode::kFusion && + hlo.fusion_kind() == HloInstruction::FusionKind::kTransposeDot && + hlo.fused_expression_root()->opcode() == HloOpcode::kDot) { + auto* dot = hlo.fused_expression_root(); + const Shape& lhs_shape = dot->operand(0)->shape(); + const Shape& rhs_shape = dot->operand(1)->shape(); + if (ShapeUtil::HasZeroElements(lhs_shape) || + ShapeUtil::HasZeroElements(rhs_shape)) { + return false; + } + return true; + } + + return false; +} + +DotInLlvmIrProfitable ProfitableToImplementDotInUntiledLlvmIr( + const HloInstruction& dot) { + if (dot.opcode() == HloOpcode::kDot && dot.shape().dimensions_size() == 2) { + const Shape& result_shape = dot.shape(); + // kReductionDimensionThresholdBytes was chosen to be 1/4 of a typical L1 + // cache line size, so that we can have the reduction dimension of both the + // LHS and RHS matrices and still have some space "left over". This needs + // to be tuned further. + const int64 kReductionDimensionThresholdBytes = 8 * 1024; + const bool single_threaded_eigen = + !dot.GetModule()->config().debug_options().xla_cpu_multi_thread_eigen(); + + // This is the point at which it is better to call into Eigen and shard the + // dot across multiple worker threads. This is a rough estimate by running + // a matmult benchmark on my local machine, and it can be tuned further. + const int64 kMaxSingleThreadedFlops = 16 * 1024; + + const int64 M = result_shape.dimensions(0); + const int64 N = result_shape.dimensions(1); + const int64 K = dot.operand(1)->shape().dimensions(0); + const int64 primitive_type_size = + ShapeUtil::ByteSizeOfPrimitiveType(result_shape.element_type()); + if (M == 1 && + K * primitive_type_size <= kReductionDimensionThresholdBytes && + (single_threaded_eigen || M * K * N <= kMaxSingleThreadedFlops)) { + // Heuristics: + // + // - Look for a configuration where we will likely be able to keep LHS in + // L1 and do a cache-optimal traversal of RHS. + // + // - Bail out on matrices that are large enough that Eigen can profitably + // shard the computation across multiple cores. This only applies when + // multi-threading is enabled. + return LayoutUtil::IsMonotonicWithDim0Major( + dot.operand(1)->shape().layout()) + ? DotInLlvmIrProfitable::kWithColumnMajorRhs + : DotInLlvmIrProfitable::kYes; + } + } + return DotInLlvmIrProfitable::kNo; +} + +bool ProfitableToImplementDotInTiledLlvmIr(const HloInstruction& dot) { + // Any Matrix-Vector product of floating point or integral type, or + // a transpose-dot fusion of the same can be lowered to a tiled LLVM + // IR implementation. + const Shape& shape = dot.shape(); + return shape.dimensions_size() == 2 && + (shape.dimensions(0) == 1 || shape.dimensions(1) == 1) && + (primitive_util::IsFloatingPointType(shape.element_type()) || + primitive_util::IsIntegralType(shape.element_type())); +} + } // namespace cpu } // namespace xla diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h index 470bf6ffb4c..c9168ccc0f6 100644 --- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h +++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h @@ -30,6 +30,26 @@ limitations under the License. namespace xla { namespace cpu { +bool PotentiallyImplementedAsEigenDot(const HloInstruction& hlo); + +enum class DotInLlvmIrProfitable { kYes, kNo, kWithColumnMajorRhs }; + +// Returns a value to indicate if (and under what conditions) will lowering +// |dot| as a untiled LLVM IR dot operation be profitable over calling into +// Eigen or emitting a tiled LLVM IR implementation. Possible return values +// are: +// +// * DotInLlvmIrProfitable::kYes - always profitable. +// * DotInLlvmIrProfitable::kNo - never profitable. +// * DotInLlvmIrProfitable::kWithColumnMajorRhs - only if we can manage to make +// the Rhs layout column major. +DotInLlvmIrProfitable ProfitableToImplementDotInUntiledLlvmIr( + const HloInstruction& dot); + +// Returns true to indicate that we can generate a tiled LLVM IR implementation +// for |dot|. +bool ProfitableToImplementDotInTiledLlvmIr(const HloInstruction& dot); + // Helper class for emitting LLVM IR to perform the dot operation. class DotOpEmitter { public: diff --git a/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc b/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc index 7149a193107..cb5cb8a6dd6 100644 --- a/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc +++ b/tensorflow/compiler/xla/service/cpu/ir_emission_utils.cc @@ -74,122 +74,5 @@ bool PotentiallyImplementedAsEigenConvolution( kernel_shape.dimensions_size() - 1; } -namespace { - -// Return whether the given shape is a matrix with no padding. -bool IsRank2WithNoPadding(const Shape& shape) { - return ShapeUtil::Rank(shape) == 2 && !LayoutUtil::IsPadded(shape); -} - -// In a gemm operation where output = lhs * rhs, check whether the given shapes -// are valid for the operation. -bool AreValidGemmShapes(const Shape& lhs_shape, const Shape& rhs_shape, - const Shape& output_shape) { - // The inputs and the output must - // 1) be matrices with no padding, and - // 2) have an allowed element type. - return output_shape.element_type() == F32 && - IsRank2WithNoPadding(lhs_shape) && IsRank2WithNoPadding(rhs_shape) && - IsRank2WithNoPadding(output_shape); -} -} // namespace - -bool PotentiallyImplementedAsEigenDot(const HloInstruction& hlo) { - // For certain types of Dot, we can call Eigen - if (hlo.opcode() == HloOpcode::kDot) { - const Shape& lhs_shape = hlo.operand(0)->shape(); - const Shape& rhs_shape = hlo.operand(1)->shape(); - - if (ShapeUtil::HasZeroElements(lhs_shape) || - ShapeUtil::HasZeroElements(rhs_shape)) { - return false; - } - - if (ProfitableToImplementDotInUntiledLlvmIr(hlo) == - DotInLlvmIrProfitable::kYes || - ProfitableToImplementDotInTiledLlvmIr(hlo)) { - return false; - } - - // If gemm can accept the operand shapes, use it rather than a custom - // kernel. - if (AreValidGemmShapes(lhs_shape, rhs_shape, hlo.shape())) { - // The size of the reduction dimension should match. The shape inference - // guarantees this invariant, so the check here is for programming - // errors. - CHECK_EQ(lhs_shape.dimensions(1), rhs_shape.dimensions(0)); - return true; - } - } - - if (hlo.opcode() == HloOpcode::kFusion && - hlo.fusion_kind() == HloInstruction::FusionKind::kTransposeDot && - hlo.fused_expression_root()->opcode() == HloOpcode::kDot) { - auto* dot = hlo.fused_expression_root(); - const Shape& lhs_shape = dot->operand(0)->shape(); - const Shape& rhs_shape = dot->operand(1)->shape(); - if (ShapeUtil::HasZeroElements(lhs_shape) || - ShapeUtil::HasZeroElements(rhs_shape)) { - return false; - } - return true; - } - - return false; -} - -DotInLlvmIrProfitable ProfitableToImplementDotInUntiledLlvmIr( - const HloInstruction& dot) { - if (dot.opcode() == HloOpcode::kDot && dot.shape().dimensions_size() == 2) { - const Shape& result_shape = dot.shape(); - // kReductionDimensionThresholdBytes was chosen to be 1/4 of a typical L1 - // cache line size, so that we can have the reduction dimension of both the - // LHS and RHS matrices and still have some space "left over". This needs - // to be tuned further. - const int64 kReductionDimensionThresholdBytes = 8 * 1024; - const bool single_threaded_eigen = - !dot.GetModule()->config().debug_options().xla_cpu_multi_thread_eigen(); - - // This is the point at which it is better to call into Eigen and shard the - // dot across multiple worker threads. This is a rough estimate by running - // a matmult benchmark on my local machine, and it can be tuned further. - const int64 kMaxSingleThreadedFlops = 16 * 1024; - - const int64 M = result_shape.dimensions(0); - const int64 N = result_shape.dimensions(1); - const int64 K = dot.operand(1)->shape().dimensions(0); - const int64 primitive_type_size = - ShapeUtil::ByteSizeOfPrimitiveType(result_shape.element_type()); - if (M == 1 && - K * primitive_type_size <= kReductionDimensionThresholdBytes && - (single_threaded_eigen || M * K * N <= kMaxSingleThreadedFlops)) { - // Heuristics: - // - // - Look for a configuration where we will likely be able to keep LHS in - // L1 and do a cache-optimal traversal of RHS. - // - // - Bail out on matrices that are large enough that Eigen can profitably - // shard the computation across multiple cores. This only applies when - // multi-threading is enabled. - return LayoutUtil::IsMonotonicWithDim0Major( - dot.operand(1)->shape().layout()) - ? DotInLlvmIrProfitable::kWithColumnMajorRhs - : DotInLlvmIrProfitable::kYes; - } - } - return DotInLlvmIrProfitable::kNo; -} - -bool ProfitableToImplementDotInTiledLlvmIr(const HloInstruction& dot) { - // Any Matrix-Vector product of floating point or integral type, or - // a transpose-dot fusion of the same can be lowered to a tiled LLVM - // IR implementation. - const Shape& shape = dot.shape(); - return shape.dimensions_size() == 2 && - (shape.dimensions(0) == 1 || shape.dimensions(1) == 1) && - (primitive_util::IsFloatingPointType(shape.element_type()) || - primitive_util::IsIntegralType(shape.element_type())); -} - } // namespace cpu } // namespace xla diff --git a/tensorflow/compiler/xla/service/cpu/ir_emission_utils.h b/tensorflow/compiler/xla/service/cpu/ir_emission_utils.h index cbe07a7c2b9..ac361ddfb4c 100644 --- a/tensorflow/compiler/xla/service/cpu/ir_emission_utils.h +++ b/tensorflow/compiler/xla/service/cpu/ir_emission_utils.h @@ -23,27 +23,6 @@ namespace cpu { bool PotentiallyImplementedAsEigenConvolution( const HloInstruction& convolution); - -bool PotentiallyImplementedAsEigenDot(const HloInstruction& dot); - -enum class DotInLlvmIrProfitable { kYes, kNo, kWithColumnMajorRhs }; - -// Returns a value to indicate if (and under what conditions) will lowering -// |dot| as a untiled LLVM IR dot operation be profitable over calling into -// Eigen or emitting a tiled LLVM IR implementation. Possible return values -// are: -// -// * DotInLlvmIrProfitable::kYes - always profitable. -// * DotInLlvmIrProfitable::kNo - never profitable. -// * DotInLlvmIrProfitable::kWithColumnMajorRhs - only if we can manage to make -// the Rhs layout column major. -DotInLlvmIrProfitable ProfitableToImplementDotInUntiledLlvmIr( - const HloInstruction& dot); - -// Returns true to indicate that we can generate a tiled LLVM IR implementation -// for |dot|. -bool ProfitableToImplementDotInTiledLlvmIr(const HloInstruction& dot); - } // namespace cpu } // namespace xla diff --git a/tensorflow/compiler/xla/service/cpu/layout_assignment.cc b/tensorflow/compiler/xla/service/cpu/layout_assignment.cc index b75ca34e0a8..3f2d101959d 100644 --- a/tensorflow/compiler/xla/service/cpu/layout_assignment.cc +++ b/tensorflow/compiler/xla/service/cpu/layout_assignment.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/compiler/xla/map_util.h" +#include "tensorflow/compiler/xla/service/cpu/dot_op_emitter.h" #include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h" #include "tensorflow/core/lib/core/errors.h" diff --git a/tensorflow/compiler/xla/service/cpu/parallel_task_assignment.cc b/tensorflow/compiler/xla/service/cpu/parallel_task_assignment.cc index 4a62a80fac0..4b44ac8941e 100644 --- a/tensorflow/compiler/xla/service/cpu/parallel_task_assignment.cc +++ b/tensorflow/compiler/xla/service/cpu/parallel_task_assignment.cc @@ -15,6 +15,7 @@ limitations under the License. #include "tensorflow/compiler/xla/service/cpu/parallel_task_assignment.h" +#include "tensorflow/compiler/xla/service/cpu/dot_op_emitter.h" #include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h" #include "tensorflow/compiler/xla/service/cpu/shape_partition.h" #include "tensorflow/compiler/xla/service/hlo_computation.h" From b7b183b90aee8a4f4808f7d90a2c7a54a942e640 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 11:23:55 -0800 Subject: [PATCH 055/104] Add support for Squeeze Op quantization PiperOrigin-RevId: 175853708 --- .../lite/toco/graph_transformations/hardcode_min_max.cc | 5 +++-- .../contrib/lite/toco/graph_transformations/quantize.cc | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc b/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc index d44b5dc7b02..9cb26c8752c 100644 --- a/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc +++ b/tensorflow/contrib/lite/toco/graph_transformations/hardcode_min_max.cc @@ -143,7 +143,7 @@ bool HardcodeMinMaxForAverageOrMaxPool(Model* model, Operator* op) { return true; } -bool HardcodeMinMaxForReshape(Model* model, Operator* op) { +bool HardcodeMinMaxForReshapeOrSqueeze(Model* model, Operator* op) { auto& output_array = model->GetArray(op->outputs[0]); if (output_array.minmax) { return false; @@ -201,8 +201,9 @@ bool HardcodeMinMax::Run(Model* model, std::size_t op_index) { changed = HardcodeMinMaxForAverageOrMaxPool(model, op); break; + case OperatorType::kSqueeze: case OperatorType::kTensorFlowReshape: - changed = HardcodeMinMaxForReshape(model, op); + changed = HardcodeMinMaxForReshapeOrSqueeze(model, op); break; case OperatorType::kLogistic: diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc index 5551755ea7f..d33597d3814 100644 --- a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc +++ b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc @@ -42,6 +42,7 @@ bool SupportsQuantization(const Operator& op) { type == OperatorType::kL2Normalization || type == OperatorType::kAdd || type == OperatorType::kAveragePool || type == OperatorType::kMaxPool || type == OperatorType::kLogistic || type == OperatorType::kSoftmax || + type == OperatorType::kSqueeze || type == OperatorType::kTensorFlowReshape || type == OperatorType::kMul || type == OperatorType::kSpaceToDepth || type == OperatorType::kDepthToSpace; From 6fb721d608c4cd3855fe8793099a629428b9853c Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Wed, 15 Nov 2017 11:31:43 -0800 Subject: [PATCH 056/104] Add graph writer op to contrib/summary This change also defines a simple SQL data model for tf.GraphDef, which should move us closer to a world where TensorBoard can render the graph explorer without having to download the entire thing to the browser, as that could potentially be hundreds of megabytes. PiperOrigin-RevId: 175854921 --- tensorflow/contrib/summary/BUILD | 29 +- tensorflow/contrib/summary/summary.py | 3 + tensorflow/contrib/summary/summary_ops.py | 153 ++++++++-- .../contrib/summary/summary_ops_graph_test.py | 52 ++++ .../contrib/summary/summary_ops_test.py | 47 ++- .../contrib/summary/summary_test_internal.py | 59 ++++ tensorflow/contrib/tensorboard/db/schema.cc | 143 +++++---- .../tensorboard/db/summary_db_writer.cc | 272 +++++++++++++++--- .../tensorboard/db/summary_db_writer_test.cc | 78 +++++ tensorflow/core/kernels/BUILD | 1 + tensorflow/core/kernels/summary_interface.cc | 10 + tensorflow/core/kernels/summary_interface.h | 4 + tensorflow/core/kernels/summary_kernels.cc | 25 ++ tensorflow/core/ops/summary_ops.cc | 13 + .../tools/pip_package/pip_smoke_test.py | 3 + 15 files changed, 754 insertions(+), 138 deletions(-) create mode 100644 tensorflow/contrib/summary/summary_ops_graph_test.py create mode 100644 tensorflow/contrib/summary/summary_test_internal.py diff --git a/tensorflow/contrib/summary/BUILD b/tensorflow/contrib/summary/BUILD index d1beafcb28d..3892654f257 100644 --- a/tensorflow/contrib/summary/BUILD +++ b/tensorflow/contrib/summary/BUILD @@ -25,13 +25,12 @@ py_test( srcs_version = "PY2AND3", deps = [ ":summary_ops", + ":summary_test_internal", ":summary_test_util", "//tensorflow/python:array_ops", "//tensorflow/python:errors", "//tensorflow/python:framework", "//tensorflow/python:framework_test_lib", - "//tensorflow/python:math_ops", - "//tensorflow/python:ops", "//tensorflow/python:platform", "//tensorflow/python:state_ops", "//tensorflow/python:training", @@ -41,6 +40,20 @@ py_test( ], ) +py_test( + name = "summary_ops_graph_test", + srcs = ["summary_ops_graph_test.py"], + srcs_version = "PY2AND3", + deps = [ + ":summary_ops", + ":summary_test_internal", + "//tensorflow/python:client_testlib", + "//tensorflow/python:ops", + "//tensorflow/python:platform", + "//tensorflow/python:training", + ], +) + py_library( name = "summary_ops", srcs = ["summary_ops.py"], @@ -98,3 +111,15 @@ py_library( "//tensorflow/python:platform", ], ) + +py_library( + name = "summary_test_internal", + testonly = 1, + srcs = ["summary_test_internal.py"], + srcs_version = "PY2AND3", + visibility = ["//visibility:private"], + deps = [ + "//tensorflow/python:lib", + "//tensorflow/python:platform", + ], +) diff --git a/tensorflow/contrib/summary/summary.py b/tensorflow/contrib/summary/summary.py index 813e8b2b09d..a73193f4608 100644 --- a/tensorflow/contrib/summary/summary.py +++ b/tensorflow/contrib/summary/summary.py @@ -32,11 +32,14 @@ from tensorflow.contrib.summary.summary_ops import create_summary_db_writer from tensorflow.contrib.summary.summary_ops import create_summary_file_writer from tensorflow.contrib.summary.summary_ops import eval_dir from tensorflow.contrib.summary.summary_ops import generic +from tensorflow.contrib.summary.summary_ops import graph from tensorflow.contrib.summary.summary_ops import histogram from tensorflow.contrib.summary.summary_ops import image from tensorflow.contrib.summary.summary_ops import import_event +from tensorflow.contrib.summary.summary_ops import initialize from tensorflow.contrib.summary.summary_ops import never_record_summaries from tensorflow.contrib.summary.summary_ops import record_summaries_every_n_global_steps from tensorflow.contrib.summary.summary_ops import scalar from tensorflow.contrib.summary.summary_ops import should_record_summaries from tensorflow.contrib.summary.summary_ops import summary_writer_initializer_op +from tensorflow.contrib.summary.summary_ops import SummaryWriter diff --git a/tensorflow/contrib/summary/summary_ops.py b/tensorflow/contrib/summary/summary_ops.py index f6be99f6ae8..a72c0c80aab 100644 --- a/tensorflow/contrib/summary/summary_ops.py +++ b/tensorflow/contrib/summary/summary_ops.py @@ -27,6 +27,7 @@ import time import six from tensorflow.contrib.summary import gen_summary_ops +from tensorflow.core.framework import graph_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes @@ -99,25 +100,32 @@ def never_record_summaries(): class SummaryWriter(object): - """Encapsulates a summary writer.""" + """Encapsulates a stateful summary writer resource. - def __init__(self, resource): + See also: + - @{tf.contrib.summary.create_summary_file_writer} + - @{tf.contrib.summary.create_summary_db_writer} + """ + + def __init__(self, resource): self._resource = resource if context.in_eager_mode(): self._resource_deleter = resource_variable_ops.EagerResourceDeleter( handle=self._resource, handle_device="cpu:0") def set_as_default(self): + """Enables this summary writer for the current thread.""" context.context().summary_writer_resource = self._resource @tf_contextlib.contextmanager def as_default(self): + """Enables summary writing within a `with` block.""" if self._resource is None: - yield + yield self else: old = context.context().summary_writer_resource context.context().summary_writer_resource = self._resource - yield + yield self # Flushes the summary writer in eager mode or in graph functions, but not # in legacy graph mode (you're on your own there). with ops.device("cpu:0"): @@ -125,6 +133,43 @@ class SummaryWriter(object): context.context().summary_writer_resource = old +def initialize( + graph=None, # pylint: disable=redefined-outer-name + session=None): + """Initializes summary writing for graph execution mode. + + This helper method provides a higher-level alternative to using + @{tf.contrib.summary.summary_writer_initializer_op} and + @{tf.contrib.summary.graph}. + + Most users will also want to call @{tf.train.create_global_step} + which can happen before or after this function is called. + + Args: + graph: A @{tf.Graph} or @{tf.GraphDef} to output to the writer. + This function will not write the default graph by default. When + writing to an event log file, the associated step will be zero. + session: So this method can call @{tf.Session.run}. This defaults + to @{tf.get_default_session}. + + Raises: + RuntimeError: If in eager mode, or if the current thread has no + default @{tf.contrib.summary.SummaryWriter}. + ValueError: If session wasn't passed and no default session. + """ + if context.context().summary_writer_resource is None: + raise RuntimeError("No default tf.contrib.summary.SummaryWriter found") + if session is None: + session = ops.get_default_session() + if session is None: + raise ValueError("session must be passed if no default session exists") + session.run(summary_writer_initializer_op()) + if graph is not None: + data = _serialize_graph(graph) + x = array_ops.placeholder(dtypes.string) + session.run(_graph(x, 0), feed_dict={x: data}) + + def create_summary_file_writer(logdir, max_queue=None, flush_millis=None, @@ -192,10 +237,10 @@ def create_summary_db_writer(db_uri, Experiment will not be associated with a User. Must be valid as both a DNS label and Linux username. name: Shared name for this SummaryWriter resource stored to default - Graph. + @{tf.Graph}. Returns: - A new SummaryWriter instance. + A @{tf.contrib.summary.SummaryWriter} instance. """ with ops.device("cpu:0"): if experiment_name is None: @@ -240,7 +285,16 @@ def _nothing(): def all_summary_ops(): - """Graph-mode only. Returns all summary ops.""" + """Graph-mode only. Returns all summary ops. + + Please note this excludes @{tf.contrib.summary.graph} ops. + + Returns: + The summary ops. + + Raises: + RuntimeError: If in Eager mode. + """ if context.in_eager_mode(): raise RuntimeError( "tf.contrib.summary.all_summary_ops is only supported in graph mode.") @@ -248,7 +302,14 @@ def all_summary_ops(): def summary_writer_initializer_op(): - """Graph-mode only. Returns the list of ops to create all summary writers.""" + """Graph-mode only. Returns the list of ops to create all summary writers. + + Returns: + The initializer ops. + + Raises: + RuntimeError: If in Eager mode. + """ if context.in_eager_mode(): raise RuntimeError( "tf.contrib.summary.summary_writer_initializer_op is only " @@ -367,21 +428,72 @@ def audio(name, tensor, sample_rate, max_outputs, family=None, return summary_writer_function(name, tensor, function, family=family) -def import_event(tensor, name=None): - """Writes a tf.Event binary proto. +def graph(param, step=None, name=None): + """Writes a TensorFlow graph to the summary interface. - When using create_summary_db_writer(), this can be used alongside - tf.TFRecordReader to load event logs into the database. Please note - that this is lower level than the other summary functions and will - ignore any conditions set by methods like should_record_summaries(). + The graph summary is, strictly speaking, not a summary. Conditions + like @{tf.contrib.summary.never_record_summaries} do not apply. Only + a single graph can be associated with a particular run. If multiple + graphs are written, then only the last one will be considered by + TensorBoard. + + When not using eager execution mode, the user should consider passing + the `graph` parameter to @{tf.contrib.summary.initialize} instead of + calling this function. Otherwise special care needs to be taken when + using the graph to record the graph. Args: - tensor: A `Tensor` of type `string` containing a serialized `Event` - proto. + param: A @{tf.Tensor} containing a serialized graph proto. When + eager execution is enabled, this function will automatically + coerce @{tf.Graph}, @{tf.GraphDef}, and string types. + step: The global step variable. This doesn't have useful semantics + for graph summaries, but is used anyway, due to the structure of + event log files. This defaults to the global step. name: A name for the operation (optional). Returns: - The created Operation. + The created @{tf.Operation} or a @{tf.no_op} if summary writing has + not been enabled for this context. + + Raises: + TypeError: If `param` isn't already a @{tf.Tensor} in graph mode. + """ + if not context.in_eager_mode() and not isinstance(param, ops.Tensor): + raise TypeError("graph() needs a tf.Tensor (e.g. tf.placeholder) in graph " + "mode, but was: %s" % type(param)) + writer = context.context().summary_writer_resource + if writer is None: + return control_flow_ops.no_op() + with ops.device("cpu:0"): + if step is None: + step = training_util.get_global_step() + else: + step = ops.convert_to_tensor(step, dtypes.int64) + if isinstance(param, (ops.Graph, graph_pb2.GraphDef)): + tensor = ops.convert_to_tensor(_serialize_graph(param), dtypes.string) + else: + tensor = array_ops.identity(param) + return gen_summary_ops.write_graph_summary(writer, step, tensor, name=name) + +_graph = graph # for functions with a graph parameter + + +def import_event(tensor, name=None): + """Writes a @{tf.Event} binary proto. + + When using create_summary_db_writer(), this can be used alongside + @{tf.TFRecordReader} to load event logs into the database. Please + note that this is lower level than the other summary functions and + will ignore any conditions set by methods like + @{tf.contrib.summary.should_record_summaries}. + + Args: + tensor: A @{tf.Tensor} of type `string` containing a serialized + @{tf.Event} proto. + name: A name for the operation (optional). + + Returns: + The created @{tf.Operation}. """ return gen_summary_ops.import_event( context.context().summary_writer_resource, tensor, name=name) @@ -390,3 +502,10 @@ def import_event(tensor, name=None): def eval_dir(model_dir, name=None): """Construct a logdir for an eval summary writer.""" return os.path.join(model_dir, "eval" if not name else "eval_" + name) + + +def _serialize_graph(arbitrary_graph): + if isinstance(arbitrary_graph, ops.Graph): + return arbitrary_graph.as_graph_def(add_shapes=True).SerializeToString() + else: + return arbitrary_graph.SerializeToString() diff --git a/tensorflow/contrib/summary/summary_ops_graph_test.py b/tensorflow/contrib/summary/summary_ops_graph_test.py new file mode 100644 index 00000000000..8f85f67a258 --- /dev/null +++ b/tensorflow/contrib/summary/summary_ops_graph_test.py @@ -0,0 +1,52 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six + +from tensorflow.contrib.summary import summary_ops +from tensorflow.contrib.summary import summary_test_internal +from tensorflow.core.framework import graph_pb2 +from tensorflow.core.framework import node_def_pb2 +from tensorflow.python.framework import ops +from tensorflow.python.platform import test +from tensorflow.python.training import training_util + +get_all = summary_test_internal.get_all + + +class DbTest(summary_test_internal.SummaryDbTest): + + def testGraphPassedToGraph_isForbiddenForThineOwnSafety(self): + with self.assertRaises(TypeError): + summary_ops.graph(ops.Graph()) + with self.assertRaises(TypeError): + summary_ops.graph('') + + def testGraphSummary(self): + training_util.get_or_create_global_step() + name = 'hi' + graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),)) + with self.test_session(): + with self.create_summary_db_writer().as_default(): + summary_ops.initialize(graph=graph) + six.assertCountEqual(self, [name], + get_all(self.db, 'SELECT node_name FROM Nodes')) + + +if __name__ == '__main__': + test.main() diff --git a/tensorflow/contrib/summary/summary_ops_test.py b/tensorflow/contrib/summary/summary_ops_test.py index 6e1a746815f..09169fa6d70 100644 --- a/tensorflow/contrib/summary/summary_ops_test.py +++ b/tensorflow/contrib/summary/summary_ops_test.py @@ -12,20 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== - from __future__ import absolute_import from __future__ import division from __future__ import print_function -import functools -import os import tempfile import six -import sqlite3 from tensorflow.contrib.summary import summary_ops +from tensorflow.contrib.summary import summary_test_internal from tensorflow.contrib.summary import summary_test_util +from tensorflow.core.framework import graph_pb2 +from tensorflow.core.framework import node_def_pb2 from tensorflow.python.eager import function from tensorflow.python.eager import test from tensorflow.python.framework import dtypes @@ -36,6 +35,9 @@ from tensorflow.python.ops import state_ops from tensorflow.python.platform import gfile from tensorflow.python.training import training_util +get_all = summary_test_internal.get_all +get_one = summary_test_internal.get_one + class TargetTest(test_util.TensorFlowTestCase): @@ -108,22 +110,7 @@ class TargetTest(test_util.TensorFlowTestCase): self.assertEqual(events[1].summary.value[0].tag, 'scalar') -class DbTest(test_util.TensorFlowTestCase): - - def setUp(self): - self.db_path = os.path.join(self.get_temp_dir(), 'DbTest.sqlite') - if os.path.exists(self.db_path): - os.unlink(self.db_path) - self.db = sqlite3.connect(self.db_path) - self.create_summary_db_writer = functools.partial( - summary_ops.create_summary_db_writer, - db_uri=self.db_path, - experiment_name='experiment', - run_name='run', - user_name='user') - - def tearDown(self): - self.db.close() +class DbTest(summary_test_internal.SummaryDbTest): def testIntegerSummaries(self): step = training_util.create_global_step() @@ -186,13 +173,15 @@ class DbTest(test_util.TensorFlowTestCase): with self.assertRaises(ValueError): self.create_summary_db_writer(user_name='@') - -def get_one(db, q, *p): - return db.execute(q, p).fetchone()[0] - - -def get_all(db, q, *p): - return unroll(db.execute(q, p).fetchall()) + def testGraphSummary(self): + training_util.get_or_create_global_step() + name = 'hi' + graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),)) + with summary_ops.always_record_summaries(): + with self.create_summary_db_writer().as_default(): + summary_ops.graph(graph) + six.assertCountEqual(self, [name], + get_all(self.db, 'SELECT node_name FROM Nodes')) def get_tensor(db, tag_id, step): @@ -205,9 +194,5 @@ def int64(x): return array_ops.constant(x, dtypes.int64) -def unroll(list_of_tuples): - return sum(list_of_tuples, ()) - - if __name__ == '__main__': test.main() diff --git a/tensorflow/contrib/summary/summary_test_internal.py b/tensorflow/contrib/summary/summary_test_internal.py new file mode 100644 index 00000000000..54233f2f50b --- /dev/null +++ b/tensorflow/contrib/summary/summary_test_internal.py @@ -0,0 +1,59 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Internal helpers for tests in this directory.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os +import sqlite3 + +from tensorflow.contrib.summary import summary_ops +from tensorflow.python.framework import test_util + + +class SummaryDbTest(test_util.TensorFlowTestCase): + """Helper for summary database testing.""" + + def setUp(self): + super(SummaryDbTest, self).setUp() + self.db_path = os.path.join(self.get_temp_dir(), 'DbTest.sqlite') + if os.path.exists(self.db_path): + os.unlink(self.db_path) + self.db = sqlite3.connect(self.db_path) + self.create_summary_db_writer = functools.partial( + summary_ops.create_summary_db_writer, + db_uri=self.db_path, + experiment_name='experiment', + run_name='run', + user_name='user') + + def tearDown(self): + self.db.close() + super(SummaryDbTest, self).tearDown() + + +def get_one(db, q, *p): + return db.execute(q, p).fetchone()[0] + + +def get_all(db, q, *p): + return unroll(db.execute(q, p).fetchall()) + + +def unroll(list_of_tuples): + return sum(list_of_tuples, ()) diff --git a/tensorflow/contrib/tensorboard/db/schema.cc b/tensorflow/contrib/tensorboard/db/schema.cc index 98fff9e0ae4..d63b2c6cc23 100644 --- a/tensorflow/contrib/tensorboard/db/schema.cc +++ b/tensorflow/contrib/tensorboard/db/schema.cc @@ -135,8 +135,7 @@ class SqliteSchema { /// the database. This field will be mutated if the run is /// restarted. /// description: Optional markdown information. - /// graph: Snappy tf.GraphDef proto with node field cleared. That - /// field can be recreated using GraphNodes and NodeDefs. + /// graph_id: ID of associated Graphs row. Status CreateRunsTable() { return Run(R"sql( CREATE TABLE IF NOT EXISTS Runs ( @@ -147,7 +146,7 @@ class SqliteSchema { inserted_time REAL, started_time REAL, description TEXT, - graph BLOB + graph_id INTEGER ) )sql"); } @@ -205,46 +204,78 @@ class SqliteSchema { )sql"); } - /// \brief Creates NodeDefs table. - /// - /// This table stores NodeDef protos which define the GraphDef for a - /// Run. This functions like a hash table so rows can be shared by - /// multiple Runs in an Experiment. + /// \brief Creates Graphs table. /// /// Fields: /// rowid: Ephemeral b-tree ID dictating locality. - /// experiment_id: Optional int64 for grouping rows. - /// node_def_id: Permanent >0 unique ID. - /// fingerprint: Optional farmhash::Fingerprint64() of uncompressed - /// node_def bytes, coerced to int64. - /// node_def: BLOB containing a Snappy tf.NodeDef proto. - Status CreateNodeDefsTable() { + /// graph_id: Permanent >0 unique ID. + /// inserted_time: Float UNIX timestamp with µs precision. This is + /// always the wall time of when the row was inserted into the + /// DB. It may be used as a hint for an archival job. + /// node_def: Contains Snappy tf.GraphDef proto. All fields will be + /// cleared except those not expressed in SQL. + Status CreateGraphsTable() { return Run(R"sql( - CREATE TABLE IF NOT EXISTS NodeDefs ( + CREATE TABLE IF NOT EXISTS Graphs ( rowid INTEGER PRIMARY KEY, - experiment_id INTEGER, - node_def_id INTEGER NOT NULL, - fingerprint INTEGER, - node_def TEXT + graph_id INTEGER NOT NULL, + inserted_time REAL, + graph_def BLOB ) )sql"); } - /// \brief Creates RunNodeDefs table. - /// - /// Table mapping Runs to NodeDefs. This is used to recreate the node - /// field of the GraphDef proto. + /// \brief Creates Nodes table. /// /// Fields: /// rowid: Ephemeral b-tree ID dictating locality. - /// run_id: Mandatory ID of associated Run. - /// node_def_id: Mandatory ID of associated NodeDef. - Status CreateRunNodeDefsTable() { + /// graph_id: Permanent >0 unique ID. + /// node_id: ID for this node. This is more like a 0-index within + /// the Graph. Please note indexes are allowed to be removed. + /// node_name: Unique name for this Node within Graph. This is + /// copied from the proto so it can be indexed. This is allowed + /// to be NULL to save space on the index, in which case the + /// node_def.name proto field must not be cleared. + /// op: Copied from tf.NodeDef proto. + /// device: Copied from tf.NodeDef proto. + /// node_def: Contains Snappy tf.NodeDef proto. All fields will be + /// cleared except those not expressed in SQL. + Status CreateNodesTable() { return Run(R"sql( - CREATE TABLE IF NOT EXISTS RunNodeDefs ( + CREATE TABLE IF NOT EXISTS Nodes ( rowid INTEGER PRIMARY KEY, - run_id INTEGER NOT NULL, - node_def_id INTEGER NOT NULL + graph_id INTEGER NOT NULL, + node_id INTEGER NOT NULL, + node_name TEXT, + op TEXT, + device TEXT, + node_def BLOB + ) + )sql"); + } + + /// \brief Creates NodeInputs table. + /// + /// Fields: + /// rowid: Ephemeral b-tree ID dictating locality. + /// graph_id: Permanent >0 unique ID. + /// node_id: Index of Node in question. This can be considered the + /// 'to' vertex. + /// idx: Used for ordering inputs on a given Node. + /// input_node_id: Nodes.node_id of the corresponding input node. + /// This can be considered the 'from' vertex. + /// is_control: If non-zero, indicates this input is a controlled + /// dependency, which means this isn't an edge through which + /// tensors flow. NULL means 0. + Status CreateNodeInputsTable() { + return Run(R"sql( + CREATE TABLE IF NOT EXISTS NodeInputs ( + rowid INTEGER PRIMARY KEY, + graph_id INTEGER NOT NULL, + node_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + input_node_id INTEGER NOT NULL, + is_control INTEGER ) )sql"); } @@ -297,11 +328,27 @@ class SqliteSchema { )sql"); } - /// \brief Uniquely indexes node_def_id on NodeDefs table. - Status CreateNodeDefIdIndex() { + /// \brief Uniquely indexes graph_id on Graphs table. + Status CreateGraphIdIndex() { return Run(R"sql( - CREATE UNIQUE INDEX IF NOT EXISTS NodeDefIdIndex - ON NodeDefs (node_def_id) + CREATE UNIQUE INDEX IF NOT EXISTS GraphIdIndex + ON Graphs (graph_id) + )sql"); + } + + /// \brief Uniquely indexes (graph_id, node_id) on Nodes table. + Status CreateNodeIdIndex() { + return Run(R"sql( + CREATE UNIQUE INDEX IF NOT EXISTS NodeIdIndex + ON Nodes (graph_id, node_id) + )sql"); + } + + /// \brief Uniquely indexes (graph_id, node_id, idx) on NodeInputs table. + Status CreateNodeInputsIndex() { + return Run(R"sql( + CREATE UNIQUE INDEX IF NOT EXISTS NodeInputsIndex + ON NodeInputs (graph_id, node_id, idx) )sql"); } @@ -350,20 +397,12 @@ class SqliteSchema { )sql"); } - /// \brief Indexes (experiment_id, fingerprint) on NodeDefs table. - Status CreateNodeDefFingerprintIndex() { + /// \brief Uniquely indexes (graph_id, node_name) on Nodes table. + Status CreateNodeNameIndex() { return Run(R"sql( - CREATE INDEX IF NOT EXISTS NodeDefFingerprintIndex - ON NodeDefs (experiment_id, fingerprint) - WHERE fingerprint IS NOT NULL - )sql"); - } - - /// \brief Uniquely indexes (run_id, node_def_id) on RunNodeDefs table. - Status CreateRunNodeDefIndex() { - return Run(R"sql( - CREATE UNIQUE INDEX IF NOT EXISTS RunNodeDefIndex - ON RunNodeDefs (run_id, node_def_id) + CREATE UNIQUE INDEX IF NOT EXISTS NodeNameIndex + ON Nodes (graph_id, node_name) + WHERE node_name IS NOT NULL )sql"); } @@ -387,22 +426,24 @@ Status SetupTensorboardSqliteDb(std::shared_ptr db) { TF_RETURN_IF_ERROR(s.CreateRunsTable()); TF_RETURN_IF_ERROR(s.CreateExperimentsTable()); TF_RETURN_IF_ERROR(s.CreateUsersTable()); - TF_RETURN_IF_ERROR(s.CreateNodeDefsTable()); - TF_RETURN_IF_ERROR(s.CreateRunNodeDefsTable()); + TF_RETURN_IF_ERROR(s.CreateGraphsTable()); + TF_RETURN_IF_ERROR(s.CreateNodeInputsTable()); + TF_RETURN_IF_ERROR(s.CreateNodesTable()); TF_RETURN_IF_ERROR(s.CreateTensorIndex()); TF_RETURN_IF_ERROR(s.CreateTensorChunkIndex()); TF_RETURN_IF_ERROR(s.CreateTagIdIndex()); TF_RETURN_IF_ERROR(s.CreateRunIdIndex()); TF_RETURN_IF_ERROR(s.CreateExperimentIdIndex()); TF_RETURN_IF_ERROR(s.CreateUserIdIndex()); - TF_RETURN_IF_ERROR(s.CreateNodeDefIdIndex()); + TF_RETURN_IF_ERROR(s.CreateGraphIdIndex()); + TF_RETURN_IF_ERROR(s.CreateNodeIdIndex()); + TF_RETURN_IF_ERROR(s.CreateNodeInputsIndex()); TF_RETURN_IF_ERROR(s.CreateTagNameIndex()); TF_RETURN_IF_ERROR(s.CreateRunNameIndex()); TF_RETURN_IF_ERROR(s.CreateExperimentNameIndex()); TF_RETURN_IF_ERROR(s.CreateUserNameIndex()); TF_RETURN_IF_ERROR(s.CreateUserEmailIndex()); - TF_RETURN_IF_ERROR(s.CreateNodeDefFingerprintIndex()); - TF_RETURN_IF_ERROR(s.CreateRunNodeDefIndex()); + TF_RETURN_IF_ERROR(s.CreateNodeNameIndex()); return Status::OK(); } diff --git a/tensorflow/contrib/tensorboard/db/summary_db_writer.cc b/tensorflow/contrib/tensorboard/db/summary_db_writer.cc index a26ad616603..ae063d24efe 100644 --- a/tensorflow/contrib/tensorboard/db/summary_db_writer.cc +++ b/tensorflow/contrib/tensorboard/db/summary_db_writer.cc @@ -15,17 +15,29 @@ limitations under the License. #include "tensorflow/contrib/tensorboard/db/summary_db_writer.h" #include "tensorflow/contrib/tensorboard/db/schema.h" +#include "tensorflow/core/framework/graph.pb.h" +#include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/summary.pb.h" +#include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/db/sqlite.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/lib/strings/stringprintf.h" +#include "tensorflow/core/platform/fingerprint.h" #include "tensorflow/core/platform/snappy.h" #include "tensorflow/core/util/event.pb.h" namespace tensorflow { namespace { +double GetWallTime(Env* env) { + // TODO(@jart): Follow precise definitions for time laid out in schema. + // TODO(@jart): Use monotonic clock from gRPC codebase. + return static_cast(env->NowMicros()) / 1.0e6; +} + int64 MakeRandomId() { + // TODO(@jart): Try generating ID in 2^24 space, falling back to 2^63 + // https://sqlite.org/src4/doc/trunk/www/varint.wiki int64 id = static_cast(random::New64() & ((1ULL << 63) - 1)); if (id == 0) { ++id; @@ -33,10 +45,201 @@ int64 MakeRandomId() { return id; } +Status Serialize(const protobuf::MessageLite& proto, string* output) { + output->clear(); + if (!proto.SerializeToString(output)) { + return errors::DataLoss("SerializeToString failed"); + } + return Status::OK(); +} + +Status Compress(const string& data, string* output) { + output->clear(); + if (!port::Snappy_Compress(data.data(), data.size(), output)) { + return errors::FailedPrecondition("TensorBase needs Snappy"); + } + return Status::OK(); +} + +Status BindProto(SqliteStatement* stmt, int parameter, + const protobuf::MessageLite& proto) { + string serialized; + TF_RETURN_IF_ERROR(Serialize(proto, &serialized)); + string compressed; + TF_RETURN_IF_ERROR(Compress(serialized, &compressed)); + stmt->BindBlobUnsafe(parameter, compressed); + return Status::OK(); +} + +Status BindTensor(SqliteStatement* stmt, int parameter, const Tensor& t) { + // TODO(@jart): Make portable between little and big endian systems. + // TODO(@jart): Use TensorChunks with minimal copying for big tensors. + // TODO(@jart): Add field to indicate encoding. + // TODO(@jart): Allow crunch tool to re-compress with zlib instead. + TensorProto p; + t.AsProtoTensorContent(&p); + return BindProto(stmt, parameter, p); +} + +class Transactor { + public: + explicit Transactor(std::shared_ptr db) + : db_(std::move(db)), + begin_(db_->Prepare("BEGIN TRANSACTION")), + commit_(db_->Prepare("COMMIT TRANSACTION")), + rollback_(db_->Prepare("ROLLBACK TRANSACTION")) {} + + template + Status Transact(T callback, Args&&... args) { + TF_RETURN_IF_ERROR(begin_.StepAndReset()); + Status s = callback(std::forward(args)...); + if (s.ok()) { + TF_RETURN_IF_ERROR(commit_.StepAndReset()); + } else { + TF_RETURN_WITH_CONTEXT_IF_ERROR(rollback_.StepAndReset(), s.ToString()); + } + return s; + } + + private: + std::shared_ptr db_; + SqliteStatement begin_; + SqliteStatement commit_; + SqliteStatement rollback_; +}; + +class GraphSaver { + public: + static Status SaveToRun(Env* env, Sqlite* db, GraphDef* graph, int64 run_id) { + auto get = db->Prepare("SELECT graph_id FROM Runs WHERE run_id = ?"); + get.BindInt(1, run_id); + bool is_done; + TF_RETURN_IF_ERROR(get.Step(&is_done)); + int64 graph_id = is_done ? 0 : get.ColumnInt(0); + if (graph_id == 0) { + graph_id = MakeRandomId(); + // TODO(@jart): Check for ID collision. + auto set = db->Prepare("UPDATE Runs SET graph_id = ? WHERE run_id = ?"); + set.BindInt(1, graph_id); + set.BindInt(2, run_id); + TF_RETURN_IF_ERROR(set.StepAndReset()); + } + return Save(env, db, graph, graph_id); + } + + static Status Save(Env* env, Sqlite* db, GraphDef* graph, int64 graph_id) { + GraphSaver saver{env, db, graph, graph_id}; + saver.MapNameToNodeId(); + TF_RETURN_IF_ERROR(saver.SaveNodeInputs()); + TF_RETURN_IF_ERROR(saver.SaveNodes()); + TF_RETURN_IF_ERROR(saver.SaveGraph()); + return Status::OK(); + } + + private: + GraphSaver(Env* env, Sqlite* db, GraphDef* graph, int64 graph_id) + : env_(env), db_(db), graph_(graph), graph_id_(graph_id) {} + + void MapNameToNodeId() { + size_t toto = static_cast(graph_->node_size()); + name_copies_.reserve(toto); + name_to_node_id_.reserve(toto); + for (int node_id = 0; node_id < graph_->node_size(); ++node_id) { + // Copy name into memory region, since we call clear_name() later. + // Then wrap in StringPiece so we can compare slices without copy. + name_copies_.emplace_back(graph_->node(node_id).name()); + name_to_node_id_.emplace(name_copies_.back(), node_id); + } + } + + Status SaveNodeInputs() { + auto purge = db_->Prepare("DELETE FROM NodeInputs WHERE graph_id = ?"); + purge.BindInt(1, graph_id_); + TF_RETURN_IF_ERROR(purge.StepAndReset()); + auto insert = db_->Prepare(R"sql( + INSERT INTO NodeInputs (graph_id, node_id, idx, input_node_id, is_control) + VALUES (?, ?, ?, ?, ?) + )sql"); + for (int node_id = 0; node_id < graph_->node_size(); ++node_id) { + const NodeDef& node = graph_->node(node_id); + for (int idx = 0; idx < node.input_size(); ++idx) { + StringPiece name = node.input(idx); + insert.BindInt(1, graph_id_); + insert.BindInt(2, node_id); + insert.BindInt(3, idx); + if (!name.empty() && name[0] == '^') { + name.remove_prefix(1); + insert.BindInt(5, 1); + } + auto e = name_to_node_id_.find(name); + if (e == name_to_node_id_.end()) { + return errors::DataLoss("Could not find node: ", name); + } + insert.BindInt(4, e->second); + TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node.name(), + " -> ", name); + } + } + return Status::OK(); + } + + Status SaveNodes() { + auto purge = db_->Prepare("DELETE FROM Nodes WHERE graph_id = ?"); + purge.BindInt(1, graph_id_); + TF_RETURN_IF_ERROR(purge.StepAndReset()); + auto insert = db_->Prepare(R"sql( + INSERT INTO Nodes (graph_id, node_id, node_name, op, device, node_def) + VALUES (?, ?, ?, ?, ?, ?) + )sql"); + for (int node_id = 0; node_id < graph_->node_size(); ++node_id) { + NodeDef* node = graph_->mutable_node(node_id); + insert.BindInt(1, graph_id_); + insert.BindInt(2, node_id); + insert.BindText(3, node->name()); + node->clear_name(); + if (!node->op().empty()) { + insert.BindText(4, node->op()); + node->clear_op(); + } + if (!node->device().empty()) { + insert.BindText(5, node->device()); + node->clear_device(); + } + node->clear_input(); + TF_RETURN_IF_ERROR(BindProto(&insert, 6, *node)); + TF_RETURN_WITH_CONTEXT_IF_ERROR(insert.StepAndReset(), node->name()); + } + return Status::OK(); + } + + Status SaveGraph() { + auto insert = db_->Prepare(R"sql( + INSERT OR REPLACE INTO Graphs (graph_id, inserted_time, graph_def) + VALUES (?, ?, ?) + )sql"); + insert.BindInt(1, graph_id_); + insert.BindDouble(2, GetWallTime(env_)); + graph_->clear_node(); + TF_RETURN_IF_ERROR(BindProto(&insert, 3, *graph_)); + return insert.StepAndReset(); + } + + Env* env_; + Sqlite* db_; + GraphDef* graph_; + int64 graph_id_; + std::vector name_copies_; + std::unordered_map name_to_node_id_; +}; + class SummaryDbWriter : public SummaryWriterInterface { public: SummaryDbWriter(Env* env, std::shared_ptr db) - : SummaryWriterInterface(), env_(env), db_(std::move(db)), run_id_(-1) {} + : SummaryWriterInterface(), + env_(env), + db_(std::move(db)), + txn_(db_), + run_id_{0LL} {} ~SummaryDbWriter() override {} Status Initialize(const string& experiment_name, const string& run_name, @@ -76,7 +279,7 @@ class SummaryDbWriter : public SummaryWriterInterface { // TODO(@jart): Check for random ID collisions without needing txn retry. insert_tensor_.BindInt(1, tag_id); insert_tensor_.BindInt(2, global_step); - insert_tensor_.BindDouble(3, GetWallTime()); + insert_tensor_.BindDouble(3, GetWallTime(env_)); switch (t.dtype()) { case DT_INT64: insert_tensor_.BindInt(4, t.scalar()()); @@ -85,22 +288,41 @@ class SummaryDbWriter : public SummaryWriterInterface { insert_tensor_.BindDouble(4, t.scalar()()); break; default: - TF_RETURN_IF_ERROR(BindTensor(t)); + TF_RETURN_IF_ERROR(BindTensor(&insert_tensor_, 4, t)); break; } return insert_tensor_.StepAndReset(); } - Status WriteEvent(std::unique_ptr e) override { + Status WriteGraph(int64 global_step, std::unique_ptr g) override { mutex_lock ml(mu_); TF_RETURN_IF_ERROR(InitializeParents()); - if (e->what_case() == Event::WhatCase::kSummary) { - const Summary& summary = e->summary(); - for (int i = 0; i < summary.value_size(); ++i) { - TF_RETURN_IF_ERROR(WriteSummary(e.get(), summary.value(i))); + return txn_.Transact(GraphSaver::SaveToRun, env_, db_.get(), g.get(), + run_id_); + } + + Status WriteEvent(std::unique_ptr e) override { + switch (e->what_case()) { + case Event::WhatCase::kSummary: { + mutex_lock ml(mu_); + TF_RETURN_IF_ERROR(InitializeParents()); + const Summary& summary = e->summary(); + for (int i = 0; i < summary.value_size(); ++i) { + TF_RETURN_IF_ERROR(WriteSummary(e.get(), summary.value(i))); + } + return Status::OK(); } + case Event::WhatCase::kGraphDef: { + std::unique_ptr graph{new GraphDef}; + if (!ParseProtoUnlimited(graph.get(), e->graph_def())) { + return errors::DataLoss("parse event.graph_def failed"); + } + return WriteGraph(e->step(), std::move(graph)); + } + default: + // TODO(@jart): Handle other stuff. + return Status::OK(); } - return Status::OK(); } Status WriteScalar(int64 global_step, Tensor t, const string& tag) override { @@ -136,33 +358,8 @@ class SummaryDbWriter : public SummaryWriterInterface { string DebugString() override { return "SummaryDbWriter"; } private: - double GetWallTime() { - // TODO(@jart): Follow precise definitions for time laid out in schema. - // TODO(@jart): Use monotonic clock from gRPC codebase. - return static_cast(env_->NowMicros()) / 1.0e6; - } - - Status BindTensor(const Tensor& t) EXCLUSIVE_LOCKS_REQUIRED(mu_) { - // TODO(@jart): Make portable between little and big endian systems. - // TODO(@jart): Use TensorChunks with minimal copying for big tensors. - TensorProto p; - t.AsProtoTensorContent(&p); - string encoded; - if (!p.SerializeToString(&encoded)) { - return errors::DataLoss("SerializeToString failed"); - } - // TODO(@jart): Put byte at beginning of blob to indicate encoding. - // TODO(@jart): Allow crunch tool to re-compress with zlib instead. - string compressed; - if (!port::Snappy_Compress(encoded.data(), encoded.size(), &compressed)) { - return errors::FailedPrecondition("TensorBase needs Snappy"); - } - insert_tensor_.BindBlobUnsafe(4, compressed); - return Status::OK(); - } - Status InitializeParents() EXCLUSIVE_LOCKS_REQUIRED(mu_) { - if (run_id_ >= 0) { + if (run_id_ > 0) { return Status::OK(); } int64 user_id; @@ -195,7 +392,7 @@ class SummaryDbWriter : public SummaryWriterInterface { )sql"); insert_user.BindInt(1, *user_id); insert_user.BindText(2, user_name); - insert_user.BindDouble(3, GetWallTime()); + insert_user.BindDouble(3, GetWallTime(env_)); TF_RETURN_IF_ERROR(insert_user.StepAndReset()); } return Status::OK(); @@ -249,7 +446,7 @@ class SummaryDbWriter : public SummaryWriterInterface { } insert.BindInt(2, *id); insert.BindText(3, name); - insert.BindDouble(4, GetWallTime()); + insert.BindDouble(4, GetWallTime(env_)); TF_RETURN_IF_ERROR(insert.StepAndReset()); } return Status::OK(); @@ -276,6 +473,7 @@ class SummaryDbWriter : public SummaryWriterInterface { mutex mu_; Env* env_; std::shared_ptr db_ GUARDED_BY(mu_); + Transactor txn_ GUARDED_BY(mu_); SqliteStatement insert_tensor_ GUARDED_BY(mu_); SqliteStatement update_metadata_ GUARDED_BY(mu_); string user_name_ GUARDED_BY(mu_); diff --git a/tensorflow/contrib/tensorboard/db/summary_db_writer_test.cc b/tensorflow/contrib/tensorboard/db/summary_db_writer_test.cc index c1af51e7b7a..3431842ca21 100644 --- a/tensorflow/contrib/tensorboard/db/summary_db_writer_test.cc +++ b/tensorflow/contrib/tensorboard/db/summary_db_writer_test.cc @@ -14,6 +14,8 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/contrib/tensorboard/db/summary_db_writer.h" +#include "tensorflow/core/framework/graph.pb.h" +#include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/summary.pb.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/db/sqlite.h" @@ -212,5 +214,81 @@ TEST_F(SummaryDbWriterTest, WriteEvent_Scalar) { kTolerance); } +TEST_F(SummaryDbWriterTest, WriteGraph) { + TF_ASSERT_OK(CreateSummaryDbWriter(db_, "", "R", "", &env_, &writer_)); + env_.AdvanceByMillis(23); + GraphDef graph; + NodeDef* node = graph.add_node(); + node->set_name("x"); + node->set_op("Placeholder"); + node = graph.add_node(); + node->set_name("y"); + node->set_op("Placeholder"); + node = graph.add_node(); + node->set_name("z"); + node->set_op("Love"); + node = graph.add_node(); + node->set_name("+"); + node->set_op("Add"); + node->add_input("x"); + node->add_input("y"); + node->add_input("^z"); + node->set_device("tpu/lol"); + std::unique_ptr e{new Event}; + graph.SerializeToString(e->mutable_graph_def()); + TF_ASSERT_OK(writer_->WriteEvent(std::move(e))); + TF_ASSERT_OK(writer_->Flush()); + ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Runs")); + ASSERT_EQ(1LL, QueryInt("SELECT COUNT(*) FROM Graphs")); + ASSERT_EQ(4LL, QueryInt("SELECT COUNT(*) FROM Nodes")); + ASSERT_EQ(3LL, QueryInt("SELECT COUNT(*) FROM NodeInputs")); + + int64 graph_id = QueryInt("SELECT graph_id FROM Graphs"); + EXPECT_GT(graph_id, 0LL); + EXPECT_EQ(graph_id, QueryInt("SELECT graph_id FROM Runs")); + EXPECT_EQ(0.023, QueryDouble("SELECT inserted_time FROM Graphs")); + EXPECT_FALSE(QueryString("SELECT graph_def FROM Graphs").empty()); + + EXPECT_EQ("x", QueryString("SELECT node_name FROM Nodes WHERE node_id = 0")); + EXPECT_EQ("y", QueryString("SELECT node_name FROM Nodes WHERE node_id = 1")); + EXPECT_EQ("z", QueryString("SELECT node_name FROM Nodes WHERE node_id = 2")); + EXPECT_EQ("+", QueryString("SELECT node_name FROM Nodes WHERE node_id = 3")); + + EXPECT_EQ("Placeholder", + QueryString("SELECT op FROM Nodes WHERE node_id = 0")); + EXPECT_EQ("Placeholder", + QueryString("SELECT op FROM Nodes WHERE node_id = 1")); + EXPECT_EQ("Love", QueryString("SELECT op FROM Nodes WHERE node_id = 2")); + EXPECT_EQ("Add", QueryString("SELECT op FROM Nodes WHERE node_id = 3")); + + EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 0")); + EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 1")); + EXPECT_EQ("", QueryString("SELECT device FROM Nodes WHERE node_id = 2")); + EXPECT_EQ("tpu/lol", + QueryString("SELECT device FROM Nodes WHERE node_id = 3")); + + EXPECT_EQ(graph_id, + QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 0")); + EXPECT_EQ(graph_id, + QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 1")); + EXPECT_EQ(graph_id, + QueryInt("SELECT graph_id FROM NodeInputs WHERE idx = 2")); + + EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 0")); + EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 1")); + EXPECT_EQ(3LL, QueryInt("SELECT node_id FROM NodeInputs WHERE idx = 2")); + + EXPECT_EQ(0LL, + QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 0")); + EXPECT_EQ(1LL, + QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 1")); + EXPECT_EQ(2LL, + QueryInt("SELECT input_node_id FROM NodeInputs WHERE idx = 2")); + + EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 0")); + EXPECT_EQ(0LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 1")); + EXPECT_EQ(1LL, QueryInt("SELECT is_control FROM NodeInputs WHERE idx = 2")); +} + } // namespace } // namespace tensorflow diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD index 5e19effe3de..b7386abdeaa 100644 --- a/tensorflow/core/kernels/BUILD +++ b/tensorflow/core/kernels/BUILD @@ -6247,6 +6247,7 @@ tf_kernel_library( "//tensorflow/contrib/tensorboard/db:summary_db_writer", "//tensorflow/core:framework", "//tensorflow/core:lib", + "//tensorflow/core:protos_all_cc", "//tensorflow/core:summary_ops_op_lib", "//tensorflow/core/lib/db:sqlite", ], diff --git a/tensorflow/core/kernels/summary_interface.cc b/tensorflow/core/kernels/summary_interface.cc index cd366f8c137..ad28d77ffde 100644 --- a/tensorflow/core/kernels/summary_interface.cc +++ b/tensorflow/core/kernels/summary_interface.cc @@ -17,6 +17,7 @@ limitations under the License. #include #include "tensorflow/compiler/xla/ptr_util.h" +#include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/summary.pb.h" @@ -393,6 +394,15 @@ class SummaryWriterImpl : public SummaryWriterInterface { return WriteEvent(std::move(e)); } + Status WriteGraph(int64 global_step, + std::unique_ptr graph) override { + std::unique_ptr e{new Event}; + e->set_step(global_step); + e->set_wall_time(GetWallTime()); + graph->SerializeToString(e->mutable_graph_def()); + return WriteEvent(std::move(e)); + } + Status WriteEvent(std::unique_ptr event) override { mutex_lock ml(mu_); queue_.emplace_back(std::move(event)); diff --git a/tensorflow/core/kernels/summary_interface.h b/tensorflow/core/kernels/summary_interface.h index ccf3459e56b..da1c28709fb 100644 --- a/tensorflow/core/kernels/summary_interface.h +++ b/tensorflow/core/kernels/summary_interface.h @@ -17,6 +17,7 @@ limitations under the License. #include +#include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/util/event.pb.h" @@ -46,6 +47,9 @@ class SummaryWriterInterface : public ResourceBase { virtual Status WriteAudio(int64 global_step, Tensor t, const string& tag, int max_outputs_, float sample_rate) = 0; + virtual Status WriteGraph(int64 global_step, + std::unique_ptr graph) = 0; + virtual Status WriteEvent(std::unique_ptr e) = 0; }; diff --git a/tensorflow/core/kernels/summary_kernels.cc b/tensorflow/core/kernels/summary_kernels.cc index 1fe2fc5b666..3706f51cf40 100644 --- a/tensorflow/core/kernels/summary_kernels.cc +++ b/tensorflow/core/kernels/summary_kernels.cc @@ -14,6 +14,7 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/contrib/tensorboard/db/summary_db_writer.h" +#include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/kernels/summary_interface.h" @@ -268,4 +269,28 @@ class WriteAudioSummaryOp : public OpKernel { REGISTER_KERNEL_BUILDER(Name("WriteAudioSummary").Device(DEVICE_CPU), WriteAudioSummaryOp); +class WriteGraphSummaryOp : public OpKernel { + public: + explicit WriteGraphSummaryOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} + + void Compute(OpKernelContext* ctx) override { + SummaryWriterInterface* s; + OP_REQUIRES_OK(ctx, LookupResource(ctx, HandleFromInput(ctx, 0), &s)); + core::ScopedUnref unref(s); + const Tensor* t; + OP_REQUIRES_OK(ctx, ctx->input("global_step", &t)); + const int64 global_step = t->scalar()(); + OP_REQUIRES_OK(ctx, ctx->input("tensor", &t)); + std::unique_ptr graph{new GraphDef}; + if (!ParseProtoUnlimited(graph.get(), t->scalar()())) { + ctx->CtxFailureWithWarning( + errors::DataLoss("Bad tf.GraphDef binary proto tensor string")); + return; + } + OP_REQUIRES_OK(ctx, s->WriteGraph(global_step, std::move(graph))); + } +}; +REGISTER_KERNEL_BUILDER(Name("WriteGraphSummary").Device(DEVICE_CPU), + WriteGraphSummaryOp); + } // namespace tensorflow diff --git a/tensorflow/core/ops/summary_ops.cc b/tensorflow/core/ops/summary_ops.cc index 5efbac7ad76..7f6d8b06cd3 100644 --- a/tensorflow/core/ops/summary_ops.cc +++ b/tensorflow/core/ops/summary_ops.cc @@ -256,4 +256,17 @@ sample_rate: The sample rate of the signal in hertz. max_outputs: Max number of batch elements to generate audio for. )doc"); +REGISTER_OP("WriteGraphSummary") + .Input("writer: resource") + .Input("global_step: int64") + .Input("tensor: string") + .SetShapeFn(shape_inference::NoOutputs) + .Doc(R"doc( +Writes a `GraphDef` protocol buffer to a `SummaryWriter`. + +writer: Handle of `SummaryWriter`. +global_step: The step to write the summary for. +tensor: A scalar string of the serialized tf.GraphDef proto. +)doc"); + } // namespace tensorflow diff --git a/tensorflow/tools/pip_package/pip_smoke_test.py b/tensorflow/tools/pip_package/pip_smoke_test.py index cc46dd5162b..3677aaa886f 100644 --- a/tensorflow/tools/pip_package/pip_smoke_test.py +++ b/tensorflow/tools/pip_package/pip_smoke_test.py @@ -66,6 +66,9 @@ BLACKLIST = [ "//tensorflow/contrib/timeseries/examples:data/period_trend.csv", # pylint:disable=line-too-long "//tensorflow/contrib/timeseries/python/timeseries:test_utils", "//tensorflow/contrib/timeseries/python/timeseries/state_space_models:test_utils", # pylint:disable=line-too-long + + # TODO(yifeif): Remove when py_library(testonly=1) is ignored. + "//tensorflow/contrib/summary:summary_test_internal", ] From 9adc48d3083d33c3674b02787a2f1beeb66a4583 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 11:45:06 -0800 Subject: [PATCH 057/104] Don't enable dependency optimizer by default. PiperOrigin-RevId: 175857095 --- tensorflow/core/grappler/optimizers/meta_optimizer.cc | 3 ++- tensorflow/core/protobuf/rewriter_config.proto | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/grappler/optimizers/meta_optimizer.cc b/tensorflow/core/grappler/optimizers/meta_optimizer.cc index 1e93900e6a6..1fa639ad33d 100644 --- a/tensorflow/core/grappler/optimizers/meta_optimizer.cc +++ b/tensorflow/core/grappler/optimizers/meta_optimizer.cc @@ -76,7 +76,7 @@ Status MetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item, optimizers.push_back(std::unique_ptr( new ArithmeticOptimizer(cfg_.arithmetic_optimization()))); } - if (cfg_.dependency_optimization() != RewriterConfig::OFF) { + if (cfg_.dependency_optimization() == RewriterConfig::ON) { optimizers.push_back(std::unique_ptr( new DependencyOptimizer(cfg_.dependency_optimization()))); } @@ -187,6 +187,7 @@ bool MetaOptimizerEnabled(const RewriterConfig& cfg) { return !cfg.disable_model_pruning() || cfg.layout_optimizer() == RewriterConfig::ON || cfg.constant_folding() != RewriterConfig::OFF || + cfg.dependency_optimization() == RewriterConfig::ON || cfg.arithmetic_optimization() != RewriterConfig::OFF || cfg.auto_parallel().enable() || cfg.memory_optimization() > 1 || !cfg.optimizers().empty(); diff --git a/tensorflow/core/protobuf/rewriter_config.proto b/tensorflow/core/protobuf/rewriter_config.proto index 96b55ce04ba..3b5d1563a26 100644 --- a/tensorflow/core/protobuf/rewriter_config.proto +++ b/tensorflow/core/protobuf/rewriter_config.proto @@ -35,7 +35,7 @@ message RewriterConfig { Toggle constant_folding = 3; // Arithmetic optimizations (default is ON) Toggle arithmetic_optimization = 7; - // Control dependency optimizations (default is ON). + // Control dependency optimizations (default is OFF). Toggle dependency_optimization = 8; // If true, don't remove unnecessary ops from the graph bool disable_model_pruning = 2; From 51e8b01c126d76a161b1957e8e1c6e87e5409910 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 12:13:13 -0800 Subject: [PATCH 058/104] Go: Update generated wrapper functions for TensorFlow ops. PiperOrigin-RevId: 175861269 --- tensorflow/go/op/wrappers.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index 5a6ae4fa5ff..b43c9782453 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -20553,6 +20553,27 @@ func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) { return op.Output(0) } +// Writes a `GraphDef` protocol buffer to a `SummaryWriter`. +// +// Arguments: +// writer: Handle of `SummaryWriter`. +// global_step: The step to write the summary for. +// tensor: A scalar string of the serialized tf.GraphDef proto. +// +// Returns the created operation. +func WriteGraphSummary(scope *Scope, writer tf.Output, global_step tf.Output, tensor tf.Output) (o *tf.Operation) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "WriteGraphSummary", + Input: []tf.Input{ + writer, global_step, tensor, + }, + } + return scope.AddOperation(opspec) +} + // MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad. type MaxPool3DGradGradAttr func(optionalAttr) From b0a49cd0f46cbc4d326ee87ab92c28b4b7b9ead7 Mon Sep 17 00:00:00 2001 From: Eugene Brevdo Date: Wed, 15 Nov 2017 12:24:51 -0800 Subject: [PATCH 059/104] TensorArray changes: respect infer_shape argument inside the TensorArray object. This adds a new attr to the TensorArrayV3 construction op: identical_element_shapes (default False). If True, then shape for all elements is inferred at runtime when any single element is written. The Python TensorArray constructor's "infer_shape" argument is piped through to this attribute. Since it is true by default, this enables runtime element consistency checking on top of the existing graph build time static shape checking. PiperOrigin-RevId: 175862771 --- tensorflow/core/framework/node_def_util.cc | 4 +++ tensorflow/core/framework/node_def_util.h | 3 +++ tensorflow/core/framework/op_kernel.cc | 4 +++ tensorflow/core/framework/op_kernel.h | 3 +++ tensorflow/core/kernels/tensor_array.h | 23 ++++++++++++---- tensorflow/core/kernels/tensor_array_ops.cc | 27 ++++++++++++++----- tensorflow/core/ops/data_flow_ops.cc | 7 +++++ .../kernel_tests/tensor_array_ops_test.py | 23 +++++++++++++++- tensorflow/python/ops/tensor_array_ops.py | 10 ++++++- 9 files changed, 90 insertions(+), 14 deletions(-) diff --git a/tensorflow/core/framework/node_def_util.cc b/tensorflow/core/framework/node_def_util.cc index f039497f13b..477184022df 100644 --- a/tensorflow/core/framework/node_def_util.cc +++ b/tensorflow/core/framework/node_def_util.cc @@ -243,6 +243,10 @@ DEFINE_GET_ATTR(Tensor, tensor, "tensor", emplace_back, t, Tensor t; DEFINE_GET_ATTR(NameAttrList, func, "func", emplace_back, v, ;); #undef DEFINE_GET_ATTR +bool HasNodeAttr(const NodeDef& node_def, StringPiece attr_name) { + return node_def.attr().find(attr_name.ToString()) != node_def.attr().end(); +} + static const string& kEmptyString = *new string(); const string& GetNodeAttrString(const AttrSlice& attrs, StringPiece attr_name) { diff --git a/tensorflow/core/framework/node_def_util.h b/tensorflow/core/framework/node_def_util.h index 523b5382954..f6f28aac481 100644 --- a/tensorflow/core/framework/node_def_util.h +++ b/tensorflow/core/framework/node_def_util.h @@ -157,6 +157,9 @@ class AttrSlice { const AttrValueMap* attrs_; }; +// Return true if the attr with the name attr_name is defined in node_def. +bool HasNodeAttr(const NodeDef& node_def, StringPiece attr_name); + // Look up the attr with name attr_name and set *value to its value. If no // attr with attr_name is found in node_def, or the attr does not have // a matching type, a non-ok status will be returned. diff --git a/tensorflow/core/framework/op_kernel.cc b/tensorflow/core/framework/op_kernel.cc index c23692409c6..4d410809e77 100644 --- a/tensorflow/core/framework/op_kernel.cc +++ b/tensorflow/core/framework/op_kernel.cc @@ -192,6 +192,10 @@ OpKernelConstruction::OpKernelConstruction( graph_def_version_(graph_def_version), status_(status) {} +bool OpKernelConstruction::HasAttr(StringPiece attr_name) const { + return HasNodeAttr(def(), attr_name); +} + void OpKernelConstruction::SetStatus(const Status& status) { status_->Update(status); } diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h index 7eec84e26c7..da0dc549435 100644 --- a/tensorflow/core/framework/op_kernel.h +++ b/tensorflow/core/framework/op_kernel.h @@ -301,6 +301,9 @@ class OpKernelConstruction { template Status GetAttr(StringPiece attr_name, T* value) const; + // Return true if the attr_name is defined in def(). + bool HasAttr(StringPiece attr_name) const; + // Return the device type. const DeviceType& device_type() const { return device_type_; } diff --git a/tensorflow/core/kernels/tensor_array.h b/tensorflow/core/kernels/tensor_array.h index 2a41d4c419a..90b71e370c4 100644 --- a/tensorflow/core/kernels/tensor_array.h +++ b/tensorflow/core/kernels/tensor_array.h @@ -138,8 +138,9 @@ class TensorArray : public ResourceBase { // users to construct this many Tensors for storage in a TensorArray. TensorArray(const string& key, const DataType& dtype, const Tensor& handle, int32 N, const PartialTensorShape& element_shape, - bool dynamic_size, bool multiple_writes_aggregate, bool is_grad, - int32 marked_size, bool clear_after_read) + bool identical_element_shapes, bool dynamic_size, + bool multiple_writes_aggregate, bool is_grad, int32 marked_size, + bool clear_after_read) : key_(key), dtype_(dtype), handle_(handle), @@ -151,6 +152,7 @@ class TensorArray : public ResourceBase { is_grad_(is_grad), marked_size_(marked_size), element_shape_(element_shape), + identical_element_shapes_(identical_element_shapes), tensors_(N) {} // Write PersistentTensor 'value' to index 'index'. @@ -320,6 +322,8 @@ class TensorArray : public ResourceBase { return !gradients_disallowed_; } + bool HasIdenticalElementShapes() const { return identical_element_shapes_; } + // Copy the TensorShapes from another TensorArray into this one. // The sizes of the two TensorArrays must match and this one // may not have any entries filled in. This performs a "soft copy", @@ -379,7 +383,7 @@ class TensorArray : public ResourceBase { // Multiple writes to the same index will result in summation of the // values (used by backprop) - bool multiple_writes_aggregate_; + const bool multiple_writes_aggregate_; // If multiple Writes were attempted (e.g. via attribute // multiple_writes_aggregate), then gradients are disallowed. @@ -387,10 +391,10 @@ class TensorArray : public ResourceBase { // After a read at an index, clear away its PersistentTensor to // release memory. - bool clear_after_read_; + const bool clear_after_read_; // True iff this is a gradient tensor array. - bool is_grad_; + const bool is_grad_; // The size of the TensorArray after a (legacy) unpack or split is performed. // -1 if there has been no unpack or split performed on the TensorArray. @@ -400,6 +404,13 @@ class TensorArray : public ResourceBase { // known at all. PartialTensorShape element_shape_ GUARDED_BY(mu_); + // Whether all elements in the TensorArray have identical shapes. + // This allows certain behaviors, like dynamically checking for + // consistent shapes on write, and being able to fill in properly + // shaped zero tensors on stack -- even if the initial element_shape + // was not fully defined. + const bool identical_element_shapes_; + // TensorAndState is used to keep track of the PersistentTensors // stored in the TensorArray, along with their shapes, and a boolean // that determines whether they have already been read or not. @@ -463,6 +474,8 @@ Status TensorArray::LockedWriteOrAggregate(OpKernelContext* ctx, " which is incompatible with the TensorArray's inferred element " "shape: ", element_shape_.DebugString(), " (consider setting infer_shape=False)."); + } else if (identical_element_shapes_ && !element_shape_.IsFullyDefined()) { + element_shape_ = PartialTensorShape(value_t->shape().dim_sizes()); } if (t.read) { diff --git a/tensorflow/core/kernels/tensor_array_ops.cc b/tensorflow/core/kernels/tensor_array_ops.cc index 2191e4e8c5f..cca6d0e35f2 100644 --- a/tensorflow/core/kernels/tensor_array_ops.cc +++ b/tensorflow/core/kernels/tensor_array_ops.cc @@ -162,6 +162,14 @@ class TensorArrayOp : public TensorArrayCreationOp { OP_REQUIRES_OK(context, context->GetAttr("dtype", &dtype_)); OP_REQUIRES_OK(context, context->GetAttr("element_shape", &element_shape_)); OP_REQUIRES_OK(context, context->GetAttr("dynamic_size", &dynamic_size_)); + // The HasAttr check is for backwards compatibility with older op + // versions which do not have this attribute. + if (context->HasAttr("identical_element_shapes")) { + OP_REQUIRES_OK(context, context->GetAttr("identical_element_shapes", + &identical_element_shapes_)); + } else { + identical_element_shapes_ = false; + } OP_REQUIRES_OK(context, context->GetAttr("clear_after_read", &clear_after_read_)); OP_REQUIRES_OK(context, @@ -196,8 +204,9 @@ class TensorArrayOp : public TensorArrayCreationOp { TensorArray* tensor_array = new TensorArray( key, dtype_, *tensor_array_output_handle, size, element_shape_, - dynamic_size_, false /* multiple_writes_aggregate */, - false /* is_grad */, -1 /* marked_size */, clear_after_read_); + identical_element_shapes_, dynamic_size_, + false /* multiple_writes_aggregate */, false /* is_grad */, + -1 /* marked_size */, clear_after_read_); TF_RETURN_IF_ERROR( rm->Create(ctx->step_container()->name(), key, tensor_array)); @@ -210,6 +219,7 @@ class TensorArrayOp : public TensorArrayCreationOp { private: DataType dtype_; PartialTensorShape element_shape_; + bool identical_element_shapes_; bool dynamic_size_; bool clear_after_read_; string tensor_array_name_; // The name used to create the TensorArray. @@ -322,7 +332,8 @@ class TensorArrayGradOp : public TensorArrayCreationOp { output_handle](TensorArray** ret) -> Status { *ret = new TensorArray( key, tensor_array->ElemType(), *tensor_array_output_handle, - array_size, tensor_array->ElemShape(), false /* dynamic_size */, + array_size, tensor_array->ElemShape(), + tensor_array->HasIdenticalElementShapes(), false /* dynamic_size */, true /* multiple_writes_aggregate */, true /* is_grad */, marked_size /* marked_size */, true /* close_after_read */); TF_RETURN_IF_ERROR((*ret)->CopyShapesFrom(tensor_array)); @@ -1003,8 +1014,9 @@ class TensorArrayUnpackOrScatterOp : public OpKernel { OP_REQUIRES_OK(ctx, ctx->input("value", &tensor_value)); TensorShape element_shape(tensor_value->shape()); - OP_REQUIRES(ctx, FastBoundsCheck(element_shape.dim_size(0), - std::numeric_limits::max()), + OP_REQUIRES(ctx, + FastBoundsCheck(element_shape.dim_size(0), + std::numeric_limits::max()), errors::InvalidArgument("tensor dim0 too large to unpack")); OP_REQUIRES( @@ -1204,8 +1216,9 @@ class TensorArraySplitOp : public OpKernel { errors::InvalidArgument( "Expected lengths to be a vector, received shape: ", tensor_lengths->shape().DebugString())); - OP_REQUIRES(ctx, FastBoundsCheck(tensor_lengths->NumElements(), - std::numeric_limits::max()), + OP_REQUIRES(ctx, + FastBoundsCheck(tensor_lengths->NumElements(), + std::numeric_limits::max()), errors::InvalidArgument( "Expected lengths to have < max int32 entries")); diff --git a/tensorflow/core/ops/data_flow_ops.cc b/tensorflow/core/ops/data_flow_ops.cc index 3b1ed217ce1..ac2dc601f1f 100644 --- a/tensorflow/core/ops/data_flow_ops.cc +++ b/tensorflow/core/ops/data_flow_ops.cc @@ -1346,6 +1346,7 @@ REGISTER_OP("TensorArrayV3") .Attr("element_shape: shape = { unknown_rank: true }") .Attr("dynamic_size: bool = false") .Attr("clear_after_read: bool = true") + .Attr("identical_element_shapes: bool = false") .Attr("tensor_array_name: string = ''") .Output("handle: resource") .Output("flow: float") @@ -1374,6 +1375,12 @@ dynamic_size: A boolean that determines whether writes to the TensorArray clear_after_read: If true (default), Tensors in the TensorArray are cleared after being read. This disables multiple read semantics but allows early release of memory. +identical_element_shapes: If true (default is false), then all + elements in the TensorArray will be expected to have have identical shapes. + This allows certain behaviors, like dynamically checking for + consistent shapes on write, and being able to fill in properly + shaped zero tensors on stack -- even if the element_shape attribute + is not fully defined. tensor_array_name: Overrides the name used for the temporary tensor_array resource. Default value is the name of the 'TensorArray' op (which is guaranteed unique). diff --git a/tensorflow/python/kernel_tests/tensor_array_ops_test.py b/tensorflow/python/kernel_tests/tensor_array_ops_test.py index 0f3b11e7f9f..835fdbe2aa5 100644 --- a/tensorflow/python/kernel_tests/tensor_array_ops_test.py +++ b/tensorflow/python/kernel_tests/tensor_array_ops_test.py @@ -43,6 +43,10 @@ import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test +# TODO(ebrevdo): Delete this line after Dec. 4, 2017. +tensor_array_ops._ENABLE_IDENTICAL_ELEMENT_SHAPES = True + + def _make_converter(tf_dtype): def _converter(x): if tf_dtype == dtypes.string: @@ -186,6 +190,22 @@ class TensorArrayTest(test.TestCase): def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self): self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros() + def _testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self): + ta = tensor_array_ops.TensorArray( + dtype=dtypes.float32, + tensor_array_name="foo", + size=3) + self.assertAllEqual( + [[0.0, 0.0]], self.evaluate(ta.write(1, [[4.0, 5.0]]).read(0))) + self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]], + self.evaluate(ta.write(1, [[4.0, 5.0]]).stack())) + self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]], + self.evaluate(ta.write(1, [[4.0, 5.0]]).concat())) + + @test_util.run_in_graph_and_eager_modes() + def testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self): + self._testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros() + def _testTensorArrayUnpackRead(self, tf_dtype): with self.test_session(use_gpu=True): convert = _make_converter(tf_dtype) @@ -739,7 +759,8 @@ class TensorArrayTest(test.TestCase): def testTensorArrayGradientSplitConcat(self): with self.test_session(use_gpu=True) as session: ta = tensor_array_ops.TensorArray( - dtype=dtypes.float32, tensor_array_name="foo", size=2) + dtype=dtypes.float32, tensor_array_name="foo", size=2, + infer_shape=False) value = constant_op.constant( [[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]]) diff --git a/tensorflow/python/ops/tensor_array_ops.py b/tensorflow/python/ops/tensor_array_ops.py index ea5354c1d6a..605654d9be7 100644 --- a/tensorflow/python/ops/tensor_array_ops.py +++ b/tensorflow/python/ops/tensor_array_ops.py @@ -36,6 +36,9 @@ from tensorflow.python.ops import gen_data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import tf_should_use +# TODO(ebrevdo): Set to True in Dec. 4, 2017. +_ENABLE_IDENTICAL_ELEMENT_SHAPES = False + # _GraphTensorArray accesses many of the hidden generated ops, but is in # fact built to wrap these methods. @@ -146,6 +149,10 @@ class _GraphTensorArray(object): # write into the TensorArray from a Tensor with a set device # will retroactively set the device value of this op. def create(): + """Create the TensorArray op.""" + ta_kwargs = {} + if _ENABLE_IDENTICAL_ELEMENT_SHAPES: + ta_kwargs["identical_element_shapes"] = infer_shape return gen_data_flow_ops._tensor_array_v3( dtype=dtype, size=size, @@ -153,7 +160,8 @@ class _GraphTensorArray(object): dynamic_size=dynamic_size, clear_after_read=clear_after_read, tensor_array_name=tensor_array_name, - name=scope) + name=scope, + **ta_kwargs) if colocate_with_first_write_call: with ops.device(None), ops.colocate_with(None, ignore_existing=True): self._handle, self._flow = create() From 50b1bc79f640b08633ed970719ee46c17509af98 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Wed, 15 Nov 2017 12:36:51 -0800 Subject: [PATCH 060/104] Add test util for setting init_value of SumReduce, ReduceWindow, and SelectAndScatter ops to a Constant 0.0f. PiperOrigin-RevId: 175864310 --- tensorflow/compiler/xla/tests/BUILD | 3 + tensorflow/compiler/xla/tests/test_utils.cc | 69 ++++++++++++++++++++- tensorflow/compiler/xla/tests/test_utils.h | 11 ++++ 3 files changed, 82 insertions(+), 1 deletion(-) diff --git a/tensorflow/compiler/xla/tests/BUILD b/tensorflow/compiler/xla/tests/BUILD index 3e62481629a..63c3541e14f 100644 --- a/tensorflow/compiler/xla/tests/BUILD +++ b/tensorflow/compiler/xla/tests/BUILD @@ -69,7 +69,10 @@ cc_library( "//tensorflow/compiler/xla:util", "//tensorflow/compiler/xla:xla_data_proto", "//tensorflow/compiler/xla/service:hlo", + "//tensorflow/compiler/xla/service:hlo_verifier", + "//tensorflow/compiler/xla/service:transfer_manager", "//tensorflow/core:lib", + "//tensorflow/core:stream_executor_headers_lib", ], ) diff --git a/tensorflow/compiler/xla/tests/test_utils.cc b/tensorflow/compiler/xla/tests/test_utils.cc index cdd3d66bbba..0d56c9f4836 100644 --- a/tensorflow/compiler/xla/tests/test_utils.cc +++ b/tensorflow/compiler/xla/tests/test_utils.cc @@ -14,8 +14,9 @@ limitations under the License. ==============================================================================*/ #include "tensorflow/compiler/xla/tests/test_utils.h" - #include "tensorflow/compiler/xla/primitive_util.h" +#include "tensorflow/compiler/xla/service/hlo_verifier.h" +#include "tensorflow/compiler/xla/service/transfer_manager.h" namespace xla { @@ -46,6 +47,44 @@ void PopulateWithRandomIntegralData(Literal* literal) { })); } +bool LooksLikeSum(const HloInstruction& instruction) { + return instruction.opcode() == HloOpcode::kAdd && + instruction.operand(0)->opcode() == HloOpcode::kParameter && + instruction.operand(1)->opcode() == HloOpcode::kParameter && + instruction.operand(0) != instruction.operand(1); +} + +// Given an instruction and operand number, replace the given operand with +// a Literal Constant Zero. Handle the case of a fusion instruction by +// replacing the fusion's parent's parameter with a Literal Constant Zero, +// unless the fusion's parent is itself a fusion. +Status MaybeReplaceParameterInputWithZero(HloInstruction* const instruction, + const int64 operand_number) { + CHECK_LT(operand_number, instruction->operand_count()); + if (instruction->operand(operand_number)->opcode() != HloOpcode::kParameter) { + return Status::OK(); + } + + HloComputation* const computation = instruction->parent(); + std::unique_ptr zero = HloInstruction::CreateConstant( + MakeUnique(Literal::Zero(instruction->shape().element_type()))); + + if (computation->IsFusionComputation()) { + HloInstruction* const fusion_instruction = computation->FusionInstruction(); + if (fusion_instruction->IsFused()) { + return Unimplemented( + "Unable to replace fused parameter of fusion instruction"); + } + TF_RETURN_IF_ERROR(fusion_instruction->ReplaceOperandWith( + instruction->operand(operand_number)->parameter_number(), + fusion_instruction->parent()->AddInstruction(std::move(zero)))); + } else { + TF_RETURN_IF_ERROR(instruction->ReplaceOperandWith( + operand_number, computation->AddInstruction(std::move(zero)))); + } + return Status::OK(); +} + } // namespace StatusOr> MakeFakeLiteral(const Shape& shape) { @@ -117,4 +156,32 @@ StatusOr>> MakeFakeArguments( return std::move(arguments); } +Status ReplaceInitsWithConstants(HloModule* const module) { + for (HloComputation* const computation : module->computations()) { + for (HloInstruction* const instruction : computation->instructions()) { + const HloOpcode opcode = instruction->opcode(); + if ((opcode == HloOpcode::kReduce || + opcode == HloOpcode::kReduceWindow) && + LooksLikeSum(*instruction->to_apply()->root_instruction())) { + TF_RETURN_IF_ERROR(MaybeReplaceParameterInputWithZero(instruction, 1)); + } else if (opcode == HloOpcode::kSelectAndScatter && + LooksLikeSum(*instruction->scatter()->root_instruction())) { + TF_RETURN_IF_ERROR(MaybeReplaceParameterInputWithZero(instruction, 2)); + } + } + } + return Status::OK(); +} + +Status VerifyHloModule(const perftools::gputools::Platform& platform, + HloModule* const module) { + return HloVerifier( + std::bind( + &TransferManager::GetByteSizeRequirement, + TransferManager::GetForPlatform(&platform).ConsumeValueOrDie(), + std::placeholders::_1)) + .Run(module) + .status(); +} + } // namespace xla diff --git a/tensorflow/compiler/xla/tests/test_utils.h b/tensorflow/compiler/xla/tests/test_utils.h index 12d5255fce5..9aca162a185 100644 --- a/tensorflow/compiler/xla/tests/test_utils.h +++ b/tensorflow/compiler/xla/tests/test_utils.h @@ -27,6 +27,7 @@ limitations under the License. #include "tensorflow/compiler/xla/xla_data.pb.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/types.h" +#include "tensorflow/stream_executor/platform.h" namespace xla { @@ -62,6 +63,16 @@ StatusOr> MakeFakeLiteral(const Shape& shape); StatusOr>> MakeFakeArguments( const HloModule& module); +// Reductions using Adds, ReduceWindow, and SelectAndScatter, require their +// init_value to be replaced with the constant 0.0f when testing, otherwise we +// may generate a bad init_value when looking at the op in isolation. +Status ReplaceInitsWithConstants(HloModule* const module); + +// Check that a given module satisfies various constraints before trying to +// execute it. +Status VerifyHloModule(const perftools::gputools::Platform& platform, + HloModule* const module); + } // namespace xla #endif // TENSORFLOW_COMPILER_XLA_TESTS_TEST_UTILS_H_ From 38526cfaebf42f00da5a745ce0647f67f6076c58 Mon Sep 17 00:00:00 2001 From: Neal Wu Date: Wed, 15 Nov 2017 12:39:56 -0800 Subject: [PATCH 061/104] Add a link to the guide on "Using savedmodel with estimators" to the documentation for export_savedmodel PiperOrigin-RevId: 175864747 --- tensorflow/python/estimator/estimator.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tensorflow/python/estimator/estimator.py b/tensorflow/python/estimator/estimator.py index 2d036e2cfba..f267f4a54e5 100644 --- a/tensorflow/python/estimator/estimator.py +++ b/tensorflow/python/estimator/estimator.py @@ -461,8 +461,12 @@ class Estimator(object): assets_extra=None, as_text=False, checkpoint_path=None): + # pylint: disable=line-too-long """Exports inference graph as a SavedModel into given dir. + For a detailed guide, see + @{$saved_model#using_savedmodel_with_estimators$Using SavedModel with Estimators}. + This method builds a new graph by first calling the serving_input_receiver_fn to obtain feature `Tensor`s, and then calling this `Estimator`'s model_fn to generate the model graph based on those @@ -506,6 +510,7 @@ class Estimator(object): ValueError: if no serving_input_receiver_fn is provided, no export_outputs are provided, or no checkpoint can be found. """ + # pylint: enable=line-too-long if serving_input_receiver_fn is None: raise ValueError('serving_input_receiver_fn must be defined.') From 04a63c763e25c4f21f22d6d27757f4022d138b8d Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 12:41:43 -0800 Subject: [PATCH 062/104] Adds a SetInvalidOutput method to XlaOpKernelContext. PiperOrigin-RevId: 175865046 --- tensorflow/compiler/tf2xla/kernels/const_op.cc | 5 +++++ tensorflow/compiler/tf2xla/xla_op_kernel.cc | 10 ++++++++++ tensorflow/compiler/tf2xla/xla_op_kernel.h | 4 ++++ 3 files changed, 19 insertions(+) diff --git a/tensorflow/compiler/tf2xla/kernels/const_op.cc b/tensorflow/compiler/tf2xla/kernels/const_op.cc index 9833323d851..8f78b4c8f90 100644 --- a/tensorflow/compiler/tf2xla/kernels/const_op.cc +++ b/tensorflow/compiler/tf2xla/kernels/const_op.cc @@ -40,6 +40,11 @@ class ConstOp : public XlaOpKernel { void Compile(XlaOpKernelContext* ctx) override { TensorShape shape(proto_.tensor_shape()); + if (proto_.dtype() == DT_STRING) { + LOG(WARNING) << "Not computing Const of type DT_STRING"; + ctx->SetInvalidOutput(0); + return; + } xla::ComputationBuilder* b = ctx->builder(); // To avoid blowups for large constants filled with the same value, diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.cc b/tensorflow/compiler/tf2xla/xla_op_kernel.cc index b948dfee6ab..a052bb105e7 100644 --- a/tensorflow/compiler/tf2xla/xla_op_kernel.cc +++ b/tensorflow/compiler/tf2xla/xla_op_kernel.cc @@ -345,6 +345,16 @@ void XlaOpKernelContext::SetConstantOutput(int index, const Tensor& constant) { expression->set_constant_value(constant); } +void XlaOpKernelContext::SetInvalidOutput(int index) { + const TensorShape shape; + Tensor* output = nullptr; + OP_REQUIRES_OK(context_, context_->allocate_output(index, shape, &output)); + XlaExpression* expression = CastExpressionFromUninitializedTensor(output); + xla::ComputationDataHandle handle; + handle.set_handle(0); + expression->set_handle(handle); +} + void XlaOpKernelContext::SetResourceOutput(int index, XlaResource* resource) { Tensor* output = nullptr; // The shape of the output tensor is the shape of the resource itself diff --git a/tensorflow/compiler/tf2xla/xla_op_kernel.h b/tensorflow/compiler/tf2xla/xla_op_kernel.h index 5519e89252c..76bcf594e6a 100644 --- a/tensorflow/compiler/tf2xla/xla_op_kernel.h +++ b/tensorflow/compiler/tf2xla/xla_op_kernel.h @@ -142,6 +142,10 @@ class XlaOpKernelContext { // SetConstantOutput where possible. void SetConstantOutput(int index, const Tensor& host_tensor); + // Sets output 'index' to an invalid value. + // Any subsequent attempt to consume this output will cause an error. + void SetInvalidOutput(int index); + // Status handling. void SetStatus(const Status& status) { context_->SetStatus(status); } Status status() { return context_->status(); } From 82bc287dd183118f5048b10ec473e5b4ea939c7f Mon Sep 17 00:00:00 2001 From: Anna R Date: Wed, 15 Nov 2017 12:43:59 -0800 Subject: [PATCH 063/104] Internal change. PiperOrigin-RevId: 175865309 --- tensorflow/core/api_def/api_test.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tensorflow/core/api_def/api_test.cc b/tensorflow/core/api_def/api_test.cc index d95d958d5af..f222d345abe 100644 --- a/tensorflow/core/api_def/api_test.cc +++ b/tensorflow/core/api_def/api_test.cc @@ -272,7 +272,10 @@ void RunApiTest(bool update_api_def, const string& api_files_dir) { for (auto new_api_entry : new_api_defs_map) { const auto& file_path = new_api_entry.first; - const auto& golden_api_defs_str = golden_api_defs_map.at(file_path); + std::string golden_api_defs_str = ""; + if (golden_api_defs_map.find(file_path) != golden_api_defs_map.end()) { + golden_api_defs_str = golden_api_defs_map.at(file_path); + } string new_api_defs_str = new_api_entry.second.DebugString(); new_api_defs_str = PBTxtToMultiline(new_api_defs_str, multi_line_fields); if (golden_api_defs_str == new_api_defs_str) { From 2411c68c35849559efb97ce2392d4505ac4d8cf0 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 12:50:05 -0800 Subject: [PATCH 064/104] Update ops-related pbtxt files. PiperOrigin-RevId: 175866123 --- .../core/ops/compat/ops_history.v1.pbtxt | 57 +++++++++++++++++++ tensorflow/core/ops/ops.pbtxt | 8 +++ 2 files changed, 65 insertions(+) diff --git a/tensorflow/core/ops/compat/ops_history.v1.pbtxt b/tensorflow/core/ops/compat/ops_history.v1.pbtxt index 6833c8e0ea3..ffb608d6007 100644 --- a/tensorflow/core/ops/compat/ops_history.v1.pbtxt +++ b/tensorflow/core/ops/compat/ops_history.v1.pbtxt @@ -40245,6 +40245,63 @@ op { } is_stateful: true } +op { + name: "TensorArrayV3" + input_arg { + name: "size" + type: DT_INT32 + } + output_arg { + name: "handle" + type: DT_RESOURCE + } + output_arg { + name: "flow" + type: DT_FLOAT + } + attr { + name: "dtype" + type: "type" + } + attr { + name: "element_shape" + type: "shape" + default_value { + shape { + unknown_rank: true + } + } + } + attr { + name: "dynamic_size" + type: "bool" + default_value { + b: false + } + } + attr { + name: "clear_after_read" + type: "bool" + default_value { + b: true + } + } + attr { + name: "identical_element_shapes" + type: "bool" + default_value { + b: false + } + } + attr { + name: "tensor_array_name" + type: "string" + default_value { + s: "" + } + } + is_stateful: true +} op { name: "TensorArrayWrite" input_arg { diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt index 68fc61150c6..30b293a28a1 100644 --- a/tensorflow/core/ops/ops.pbtxt +++ b/tensorflow/core/ops/ops.pbtxt @@ -31796,6 +31796,14 @@ op { } description: "If true (default), Tensors in the TensorArray are cleared\nafter being read. This disables multiple read semantics but allows early\nrelease of memory." } + attr { + name: "identical_element_shapes" + type: "bool" + default_value { + b: false + } + description: "If true (default is false), then all\nelements in the TensorArray will be expected to have have identical shapes.\nThis allows certain behaviors, like dynamically checking for\nconsistent shapes on write, and being able to fill in properly\nshaped zero tensors on stack -- even if the element_shape attribute\nis not fully defined." + } attr { name: "tensor_array_name" type: "string" From f4b6effba238fbce2c3c66d24ab276c61eda9fc1 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 12:57:13 -0800 Subject: [PATCH 065/104] Go: Update generated wrapper functions for TensorFlow ops. PiperOrigin-RevId: 175867164 --- tensorflow/go/op/wrappers.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index b43c9782453..869213eb172 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -14797,6 +14797,21 @@ func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr { } } +// TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value. +// +// value: If true (default is false), then all +// elements in the TensorArray will be expected to have have identical shapes. +// This allows certain behaviors, like dynamically checking for +// consistent shapes on write, and being able to fill in properly +// shaped zero tensors on stack -- even if the element_shape attribute +// is not fully defined. +// If not specified, defaults to false +func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr { + return func(m optionalAttr) { + m["identical_element_shapes"] = value + } +} + // TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value. // // value: Overrides the name used for the temporary tensor_array From a86ce6a3255ce0aa1e0d237c6235ea3e4cafd739 Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Wed, 15 Nov 2017 13:17:31 -0800 Subject: [PATCH 066/104] Removes eager memory trace. Has been broken and unrunnable for a while (since EagerTensor went to C). If we want its functionality we can recover it from the C tape code I believe. PiperOrigin-RevId: 175869936 --- tensorflow/contrib/eager/python/tfe.py | 4 -- tensorflow/python/eager/BUILD | 7 -- tensorflow/python/eager/core.py | 26 -------- tensorflow/python/eager/execute.py | 8 --- tensorflow/python/eager/memory_trace.py | 89 ------------------------- tensorflow/python/framework/ops.py | 5 -- 6 files changed, 139 deletions(-) delete mode 100644 tensorflow/python/eager/memory_trace.py diff --git a/tensorflow/contrib/eager/python/tfe.py b/tensorflow/contrib/eager/python/tfe.py index 577d3efef63..1697c879def 100644 --- a/tensorflow/contrib/eager/python/tfe.py +++ b/tensorflow/contrib/eager/python/tfe.py @@ -30,9 +30,6 @@ To use, at program startup, call `tfe.enable_eager_execution()`. @@value_and_gradients_function @@GradientTape -@@enable_tracing -@@flush_trace - @@run @@enable_eager_execution @@ -91,7 +88,6 @@ from tensorflow.python.eager.context import in_eager_mode from tensorflow.python.eager.context import in_graph_mode from tensorflow.python.eager.context import list_devices from tensorflow.python.eager.context import num_gpus -from tensorflow.python.eager.core import enable_tracing from tensorflow.python.eager.custom_gradient import custom_gradient from tensorflow.python.eager.execution_callbacks import add_execution_callback from tensorflow.python.eager.execution_callbacks import clear_execution_callbacks diff --git a/tensorflow/python/eager/BUILD b/tensorflow/python/eager/BUILD index 912aa4c1951..b491a637bac 100644 --- a/tensorflow/python/eager/BUILD +++ b/tensorflow/python/eager/BUILD @@ -61,7 +61,6 @@ py_library( visibility = ["//tensorflow:internal"], deps = [ ":context", - ":memory_trace", "//tensorflow/python:errors", "//tensorflow/python:pywrap_tensorflow", ], @@ -88,12 +87,6 @@ py_library( visibility = ["//tensorflow:internal"], ) -py_library( - name = "memory_trace", - srcs = ["memory_trace.py"], - srcs_version = "PY2AND3", -) - cuda_py_test( name = "tensor_test", srcs = ["tensor_test.py"], diff --git a/tensorflow/python/eager/core.py b/tensorflow/python/eager/core.py index 3f3d38b9510..483b7172107 100644 --- a/tensorflow/python/eager/core.py +++ b/tensorflow/python/eager/core.py @@ -19,7 +19,6 @@ from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow -from tensorflow.python.eager import memory_trace from tensorflow.python.framework import errors # Trace of execution and memory usage. @@ -48,28 +47,3 @@ class _NotOkStatusException(Exception): pywrap_tensorflow.TFE_Py_RegisterExceptionClass(_NotOkStatusException) - - -def enable_tracing(): - """Enables tracing of execution and memory usage. - - WARNING: tracing is not thread-safe. - """ - # TODO(alive): Add code example in doc string. - global _active_trace - _active_trace = memory_trace.MemoryTrace() - - -def flush_trace(): - """Flushes the active trace, if it exists. - - WARNING: tracing is not thread-safe. - """ - # TODO(alive): Add code example in doc string. - if _active_trace is not None: - _active_trace.flush_trace() - - -def active_trace(): - """Returns the current global active trace of execution and memory usage.""" - return _active_trace diff --git a/tensorflow/python/eager/execute.py b/tensorflow/python/eager/execute.py index 1b5f3f7f9d1..e392c6bb53b 100644 --- a/tensorflow/python/eager/execute.py +++ b/tensorflow/python/eager/execute.py @@ -65,15 +65,7 @@ def execute(op_name, num_outputs, inputs, attrs, ctx, name=None): message = e.message six.raise_from(core._status_to_exception(e.code, message), None) - # TODO(alive, cais): Use the execution callback mechanism. - if core.active_trace() is not None: - for t in tensors: - core.active_trace().record_tensor(op_name, - ops.tensor_id(t), - t.device, - t.shape.num_elements()) # pylint: enable=protected-access - # TODO(cais): Optimize this, perhaps by replacing this execute function with # a different one when there are execution callback(s). for callback in ctx.post_execution_callbacks: diff --git a/tensorflow/python/eager/memory_trace.py b/tensorflow/python/eager/memory_trace.py deleted file mode 100644 index 094bcab9e2e..00000000000 --- a/tensorflow/python/eager/memory_trace.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utility to trace per-device memory consumption across time over execution.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections - -TraceEntry = collections.namedtuple( - "TraceEntry", ["op_name", "tensor_id", "mem_usage", "device", "size"]) -TensorData = collections.namedtuple( - "TensorData", ["op_name", "tensor_size", "device"]) - - -class MemoryTrace(object): - """Records a trace of memory usage over operation execution.""" - - def __init__(self): - - self.trace = [] - self.tensor_to_data = {} - self.current_device_mem_usage = collections.defaultdict(int) - - def record_tensor(self, op_name, tensor_id, device, size): - self.current_device_mem_usage[device] += size - self.tensor_to_data[tensor_id] = TensorData(op_name, size, device) - self.trace.append(TraceEntry(op_name, - tensor_id, - dict(self.current_device_mem_usage.items()), - device, - size)) - - def delete_tensor(self, tensor_id): - if tensor_id not in self.tensor_to_data: - return - data = self.tensor_to_data.pop(tensor_id, None) - if data is None: return - self.current_device_mem_usage[data.device] -= data.tensor_size - self.trace.append(TraceEntry(data.op_name, - tensor_id, - dict(self.current_device_mem_usage.items()), - data.device, - -data.tensor_size)) - - def flush_trace(self): - """Prints the formatted trace recorded so far.""" - longest_op_name = max(len(t.op_name) for t in self.trace) - longest_op_name = max(longest_op_name, len("op_name")) - longest_heap_size = max(max(len(str(d)) for d in t.mem_usage) - for t in self.trace) - longest_heap_size = max(longest_heap_size, len("d0")) - longest_id_len = max(len(str(t.tensor_id)) for t in self.trace) - longest_id_len = max(longest_id_len, 2) - first_line = [] - first_line.append("+/-") - first_line.append("op_name".ljust(longest_op_name)) - first_line.append("id".ljust(longest_id_len)) - for i in range(len(self.current_device_mem_usage)): - first_line.append(("d"+str(i)).ljust(longest_heap_size)) - first_line.append("size") - print(" | ".join(first_line)) - for t in self.trace: - line = [] - if t.size > 0: - line.append("+ ") - else: - line.append("- ") - line.append(t.op_name.ljust(longest_op_name)) - line.append(str(t.tensor_id).ljust(longest_id_len)) - for d in t.mem_usage: - line.append(str(d).ljust(longest_heap_size)) - line.append(str(t.size)) - print(" | ".join(line)) - self.trace = [] - print() diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index 09e0a83c760..0e647a27f56 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -715,11 +715,6 @@ class _EagerTensorBase(Tensor): new_tensor = self._copy_to_device(context=ctx._handle, device=device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) - if core.active_trace() is not None: - core.active_trace().record_tensor("COPY", - tensor_id(new_tensor), - new_tensor.device, - new_tensor.shape.num_elements()) # Record the copy on tape and define backprop copy as well. if not context.in_graph_mode(): From 3caceafb9e6db900e11eccf697a56144e019cf9c Mon Sep 17 00:00:00 2001 From: Francois Chollet Date: Wed, 15 Nov 2017 13:30:20 -0800 Subject: [PATCH 067/104] Final steps for updating tf.keras to the Keras 2.1.1 API. PiperOrigin-RevId: 175871749 --- tensorflow/python/keras/BUILD | 12 ++ .../python/keras/_impl/keras/__init__.py | 2 +- .../python/keras/_impl/keras/backend.py | 25 ++- .../python/keras/_impl/keras/backend_test.py | 49 +++++ .../keras/_impl/keras/callbacks_test.py | 4 +- .../keras/_impl/keras/datasets/cifar10.py | 9 +- .../keras/_impl/keras/engine/training.py | 196 +++++++++++------- .../python/keras/_impl/keras/layers/merge.py | 2 +- .../keras/_impl/keras/layers/recurrent.py | 113 ++++------ .../_impl/keras/layers/recurrent_test.py | 29 ++- .../keras/_impl/keras/layers/wrappers.py | 3 +- tensorflow/python/keras/_impl/keras/losses.py | 5 +- .../python/keras/_impl/keras/losses_test.py | 54 +++++ tensorflow/python/keras/_impl/keras/models.py | 195 ++++++++++------- .../python/keras/_impl/keras/models_test.py | 18 ++ .../keras/_impl/keras/utils/data_utils.py | 78 +++++-- .../_impl/keras/utils/data_utils_test.py | 66 +++++- .../keras/_impl/keras/utils/generic_utils.py | 3 +- .../keras/_impl/keras/utils/np_utils.py | 8 +- .../keras/_impl/keras/utils/np_utils_test.py | 52 +++++ .../keras/_impl/keras/utils/training_utils.py | 10 +- .../golden/tensorflow.keras.-sequential.pbtxt | 6 +- .../tensorflow.keras.models.-sequential.pbtxt | 6 +- 23 files changed, 671 insertions(+), 274 deletions(-) create mode 100644 tensorflow/python/keras/_impl/keras/utils/np_utils_test.py diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD index a46a92cd0c7..e4992afbca7 100644 --- a/tensorflow/python/keras/BUILD +++ b/tensorflow/python/keras/BUILD @@ -590,6 +590,18 @@ py_test( ], ) +py_test( + name = "np_utils_test", + size = "small", + srcs = ["_impl/keras/utils/np_utils_test.py"], + srcs_version = "PY2AND3", + deps = [ + ":keras", + "//tensorflow/python:client_testlib", + "//third_party/py/numpy", + ], +) + py_test( name = "training_utils_test", size = "medium", diff --git a/tensorflow/python/keras/_impl/keras/__init__.py b/tensorflow/python/keras/_impl/keras/__init__.py index f0e8d91a929..74cc9d0488c 100644 --- a/tensorflow/python/keras/_impl/keras/__init__.py +++ b/tensorflow/python/keras/_impl/keras/__init__.py @@ -40,4 +40,4 @@ from tensorflow.python.keras._impl.keras.layers import Input from tensorflow.python.keras._impl.keras.models import Model from tensorflow.python.keras._impl.keras.models import Sequential -__version__ = '2.0.8-tf' +__version__ = '2.1.1-tf' diff --git a/tensorflow/python/keras/_impl/keras/backend.py b/tensorflow/python/keras/_impl/keras/backend.py index f9a53c4eb4d..b029e5161f7 100644 --- a/tensorflow/python/keras/_impl/keras/backend.py +++ b/tensorflow/python/keras/_impl/keras/backend.py @@ -2486,11 +2486,21 @@ def print_tensor(x, message=''): class Function(object): """Runs a computation graph. + It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`. + In particular additonal operations via `fetches` argument and additional + tensor substitutions via `feed_dict` arguments. Note that given + substitutions are merged with substitutions from `inputs`. Even though + `feed_dict` is passed once in the constructor (called in `model.compile()`) + we can modify the values in the dictionary. Through this feed_dict we can + provide additional substitutions besides Keras inputs. + Arguments: inputs: Feed placeholders to the computation graph. outputs: Output tensors to fetch. updates: Additional update ops to be run at function call. - name: a name to help users identify what this function does. + name: A name to help users identify what this function does. + session_kwargs: Arguments to `tf.Session.run()`: `fetches`, `feed_dict`, + `options`, `run_metadata` """ def __init__(self, inputs, outputs, updates=None, name=None, @@ -2518,12 +2528,18 @@ class Function(object): updates_ops.append(update) self.updates_op = control_flow_ops.group(*updates_ops) self.name = name + # additional tensor substitutions + self.feed_dict = session_kwargs.pop('feed_dict', {}) + # additional operations + self.fetches = session_kwargs.pop('fetches', []) + if not isinstance(self.fetches, list): + self.fetches = [self.fetches] self.session_kwargs = session_kwargs def __call__(self, inputs): if not isinstance(inputs, (list, tuple)): raise TypeError('`inputs` should be a list or tuple.') - feed_dict = {} + feed_dict = self.feed_dict.copy() for tensor, value in zip(self.inputs, inputs): if is_sparse(tensor): sparse_coo = value.tocoo() @@ -2531,11 +2547,10 @@ class Function(object): np.expand_dims(sparse_coo.col, 1)), 1) value = (indices, sparse_coo.data, sparse_coo.shape) feed_dict[tensor] = value + fetches = self.outputs + [self.updates_op] + self.fetches session = get_session() updated = session.run( - self.outputs + [self.updates_op], - feed_dict=feed_dict, - **self.session_kwargs) + fetches=fetches, feed_dict=feed_dict, **self.session_kwargs) return updated[:len(self.outputs)] diff --git a/tensorflow/python/keras/_impl/keras/backend_test.py b/tensorflow/python/keras/_impl/keras/backend_test.py index 5eaae31d921..e45e566dcac 100644 --- a/tensorflow/python/keras/_impl/keras/backend_test.py +++ b/tensorflow/python/keras/_impl/keras/backend_test.py @@ -165,6 +165,55 @@ class BackendUtilsTest(test.TestCase): for y in ys: self.assertEqual(y.op.name[:12], 'StopGradient') + def test_function_tf_fetches(self): + # Additional operations can be passed to tf.Session().run() via its + # `fetches` arguments. In contrast to `updates` argument of + # keras.backend.function() these do not have control dependency on `outputs` + # so they can run in parallel. Also they should not contribute to output of + # keras.backend.function(). + with self.test_session(): + x = keras.backend.variable(0.) + y = keras.backend.variable(0.) + x_placeholder = keras.backend.placeholder(shape=()) + y_placeholder = keras.backend.placeholder(shape=()) + + f = keras.backend.function(inputs=[x_placeholder, y_placeholder], + outputs=[x_placeholder + y_placeholder], + updates=[(x, x_placeholder + 1.)], + fetches=[keras.backend.update(y, 5.)]) + output = f([10., 20.]) + assert output == [30.] + assert keras.backend.get_session().run(fetches=[x, y]) == [11., 5.] + + def test_function_tf_feed_dict(self): + # Additional substitutions can be passed to `tf.Session().run()` via its + # `feed_dict` arguments. Note that the feed_dict is passed once in the + # constructor but we can modify the values in the dictionary. Through + # this feed_dict we can provide additional substitutions besides Keras + # inputs. + with self.test_session(): + x = keras.backend.variable(0.) + y = keras.backend.variable(0.) + x_placeholder = keras.backend.placeholder(shape=()) + y_placeholder = keras.backend.placeholder(shape=()) + + feed_dict = {y_placeholder: 3.} + fetches = [keras.backend.update(y, y_placeholder * 10.)] + f = keras.backend.function(inputs=[x_placeholder], + outputs=[x_placeholder + 1.], + updates=[(x, x_placeholder + 10.)], + feed_dict=feed_dict, + fetches=fetches) + output = f([10.]) + assert output == [11.] + assert keras.backend.get_session().run(fetches=[x, y]) == [20., 30.] + + # updated value in feed_dict will be modified within the K.function() + feed_dict[y_placeholder] = 4. + output = f([20.]) + assert output == [21.] + assert keras.backend.get_session().run(fetches=[x, y]) == [30., 40.] + class BackendVariableTest(test.TestCase): diff --git a/tensorflow/python/keras/_impl/keras/callbacks_test.py b/tensorflow/python/keras/_impl/keras/callbacks_test.py index 6924a8926b6..97a650a9920 100644 --- a/tensorflow/python/keras/_impl/keras/callbacks_test.py +++ b/tensorflow/python/keras/_impl/keras/callbacks_test.py @@ -273,12 +273,12 @@ class KerasCallbacksTest(test.TestCase): stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience) weights = model.get_weights() - hist = model.fit(data, labels, callbacks=[stopper], verbose=0) + hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) >= patience # This should allow training to go for at least `patience` epochs model.set_weights(weights) - hist = model.fit(data, labels, callbacks=[stopper], verbose=0) + hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20) assert len(hist.epoch) >= patience def test_RemoteMonitor(self): diff --git a/tensorflow/python/keras/_impl/keras/datasets/cifar10.py b/tensorflow/python/keras/_impl/keras/datasets/cifar10.py index 4a687890158..7905da66c1e 100644 --- a/tensorflow/python/keras/_impl/keras/datasets/cifar10.py +++ b/tensorflow/python/keras/_impl/keras/datasets/cifar10.py @@ -39,14 +39,13 @@ def load_data(): num_train_samples = 50000 - x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8') - y_train = np.zeros((num_train_samples,), dtype='uint8') + x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8') + y_train = np.empty((num_train_samples,), dtype='uint8') for i in range(1, 6): fpath = os.path.join(path, 'data_batch_' + str(i)) - data, labels = load_batch(fpath) - x_train[(i - 1) * 10000:i * 10000, :, :, :] = data - y_train[(i - 1) * 10000:i * 10000] = labels + (x_train[(i - 1) * 10000:i * 10000, :, :, :], + y_train[(i - 1) * 10000:i * 10000]) = load_batch(fpath) fpath = os.path.join(path, 'test_batch') x_test, y_test = load_batch(fpath) diff --git a/tensorflow/python/keras/_impl/keras/engine/training.py b/tensorflow/python/keras/_impl/keras/engine/training.py index e6d29c49684..b4205bf4a39 100644 --- a/tensorflow/python/keras/_impl/keras/engine/training.py +++ b/tensorflow/python/keras/_impl/keras/engine/training.py @@ -108,7 +108,7 @@ def _standardize_input_data(data, arrays = data elif data.__class__.__name__ == 'DataFrame': # test if data is a DataFrame, without pandas installed - data = data.values + arrays = data.values else: if not hasattr(data, 'shape'): raise TypeError('Error when checking model ' + exception_prefix + @@ -271,12 +271,13 @@ def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes): is incompatible with an output. """ key_losses = { - 'mean_squared_error', 'binary_crossentropy', 'categorical_crossentropy' + losses.mean_squared_error, losses.binary_crossentropy, + losses.categorical_crossentropy } for y, loss, shape in zip(targets, loss_fns, output_shapes): if loss is None: continue - if loss.__name__ == 'categorical_crossentropy': + if loss is losses.categorical_crossentropy: if y.shape[-1] == 1: raise ValueError('You are passing a target array of shape ' + str( y.shape) + ' while using as loss `categorical_crossentropy`. ' @@ -286,14 +287,14 @@ def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes): 'If your targets are integer classes, ' 'you can convert them to the expected format via:\n' '```\n' - 'from keras.utils.np_utils import to_categorical\n' + 'from keras.utils import to_categorical\n' 'y_binary = to_categorical(y_int)\n' '```\n' '\n' 'Alternatively, you can use the loss function ' '`sparse_categorical_crossentropy` instead, ' 'which does expect integer targets.') - if loss.__name__ in key_losses: + if loss in key_losses: for target_dim, out_dim in zip(y.shape[1:], shape[1:]): if out_dim is not None and target_dim != out_dim: raise ValueError('A target array with shape ' + str(y.shape) + @@ -584,7 +585,7 @@ class Model(Network): """Configures the model for training. Arguments: - optimizer: String (name of optimizer) or optimizer object. + optimizer: String (name of optimizer) or optimizer instance. See [optimizers](/optimizers). loss: String (name of objective function) or objective function. See [losses](/losses). @@ -623,9 +624,7 @@ class Model(Network): can specify them via the `target_tensors` argument. It can be a single tensor (for a single-output model), a list of tensors, or a dict mapping output names to target tensors. - **kwargs: When using the Theano/CNTK backends, these arguments - are passed into K.function. When using the TensorFlow backend, - these arguments are passed into `tf.Session.run`. + **kwargs: These arguments are passed to `tf.Session.run`. Raises: ValueError: In case of invalid arguments for @@ -1413,10 +1412,8 @@ class Model(Network): output_shapes = [] for output_shape, loss_fn in zip(self._feed_output_shapes, self._feed_loss_fns): - if loss_fn.__name__ == 'sparse_categorical_crossentropy': + if loss_fn is losses.sparse_categorical_crossentropy: output_shapes.append(output_shape[:-1] + (1,)) - elif getattr(losses, loss_fn.__name__, None) is None: - output_shapes.append(None) else: output_shapes.append(output_shape) x = _standardize_input_data( @@ -1484,60 +1481,76 @@ class Model(Network): """Trains the model for a fixed number of epochs (iterations on a dataset). Arguments: - x: Numpy array of training data, - or list of Numpy arrays if the model has multiple inputs. - If all inputs in the model are named, - you can also pass a dictionary - mapping input names to Numpy arrays. - Can be `None` (default) if feeding from framework-native tensors. - y: Numpy array of target data, - or list of Numpy arrays if the model has multiple outputs. - If all outputs in the model are named, - you can also pass a dictionary - mapping output names to Numpy arrays. + x: Numpy array of training data (if the model has a single input), + or list of Numpy arrays (if the model has multiple inputs). + If input layers in the model are named, you can also pass a + dictionary mapping input names to Numpy arrays. + `x` can be `None` (default) if feeding from + TensorFlow data tensors. + y: Numpy array of target (label) data + (if the model has a single output), + or list of Numpy arrays (if the model has multiple outputs). + If output layers in the model are named, you can also pass a + dictionary mapping output names to Numpy arrays. + `y` can be `None` (default) if feeding from + TensorFlow data tensors. Can be `None` (default) if feeding from framework-native tensors. batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, it will default to 32. - epochs: Integer, the number of times to iterate - over the training data arrays. + epochs: Integer. Number of epochs to train the model. + An epoch is an iteration over the entire `x` and `y` + data provided. + Note that in conjunction with `initial_epoch`, + `epochs` is to be understood as "final epoch". + The model is not trained for a number of iterations + given by `epochs`, but merely until the epoch + of index `epochs` is reached. verbose: 0, 1, or 2. Verbosity mode. - 0 = silent, 1 = verbose, 2 = one log line per epoch. - callbacks: List of callbacks to be called during training. + 0 = silent, 1 = progress bar, 2 = one line per epoch. + callbacks: List of `keras.callbacks.Callback` instances. + List of callbacks to apply during training. See [callbacks](/callbacks). - validation_split: Float between 0 and 1: - fraction of the training data to be used as validation data. + validation_split: Float between 0 and 1. + Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. - validation_data: Data on which to evaluate - the loss and any model metrics - at the end of each epoch. The model will not - be trained on this data. - This could be a tuple (x_val, y_val) - or a tuple (x_val, y_val, val_sample_weights). - shuffle: Boolean, whether to shuffle the training data - before each epoch. Has no effect when `steps_per_epoch` - is not `None`. - class_weight: Optional dictionary mapping - class indices (integers) to - a weight (float) to apply to the model's loss for the samples - from this class during training. - This can be useful to tell the model to "pay more attention" to - samples from an under-represented class. - sample_weight: Optional array of the same length as x, containing - weights to apply to the model's loss for each sample. - In the case of temporal data, you can pass a 2D array - with shape (samples, sequence_length), + The validation data is selected from the last samples + in the `x` and `y` data provided, before shuffling. + validation_data: tuple `(x_val, y_val)` or tuple + `(x_val, y_val, val_sample_weights)` on which to evaluate + the loss and any model metrics at the end of each epoch. + The model will not be trained on this data. + This will override `validation_split`. + shuffle: Boolean (whether to shuffle the training data + before each epoch) or str (for 'batch'). + 'batch' is a special option for dealing with the + limitations of HDF5 data; it shuffles in batch-sized chunks. + Has no effect when `steps_per_epoch` is not `None`. + class_weight: Optional dictionary mapping class indices (integers) + to a weight (float) value, used for weighting the loss function + (during training only). + This can be useful to tell the model to + "pay more attention" to samples from + an under-represented class. + sample_weight: Optional Numpy array of weights for + the training samples, used for weighting the loss function + (during training only). You can either pass a flat (1D) + Numpy array with the same length as the input samples + (1:1 mapping between weights and samples), + or in the case of temporal data, + you can pass a 2D array with shape + `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify - sample_weight_mode="temporal" in compile(). + `sample_weight_mode="temporal"` in `compile()`. initial_epoch: Epoch at which to start training - (useful for resuming a previous training run) + (useful for resuming a previous training run). steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the - next epoch. When training with Input Tensors such as + next epoch. When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of unique samples in your dataset divided by the batch size, or 1 if that cannot be determined. @@ -1546,8 +1559,10 @@ class Model(Network): to validate before stopping. Returns: - A `History` instance. Its `history` attribute contains - all information collected during training. + A `History` object. Its `History.history` attribute is + a record of training loss values and metrics values + at successive epochs, as well as validation loss values + and validation metrics values (if applicable). Raises: ValueError: In case of mismatch between the provided input data @@ -1667,25 +1682,40 @@ class Model(Network): Computation is done in batches. Arguments: - x: Numpy array of test data, - or list of Numpy arrays if the model has multiple inputs. - If all inputs in the model are named, - you can also pass a dictionary - mapping input names to Numpy arrays. - Can be `None` (default) if feeding from framework-native tensors. - y: Numpy array of target data, - or list of Numpy arrays if the model has multiple outputs. - If all outputs in the model are named, - you can also pass a dictionary - mapping output names to Numpy arrays. - Can be `None` (default) if feeding from framework-native tensors. - batch_size: Integer. If unspecified, it will default to 32. - verbose: Verbosity mode, 0 or 1. - sample_weight: Array of weights to weight the contribution - of different samples to the loss and metrics. - steps: Total number of steps (batches of samples) + x: Numpy array of test data (if the model has a single input), + or list of Numpy arrays (if the model has multiple inputs). + If input layers in the model are named, you can also pass a + dictionary mapping input names to Numpy arrays. + `x` can be `None` (default) if feeding from + framework-native tensors (e.g. TensorFlow data tensors). + y: Numpy array of target (label) data + (if the model has a single output), + or list of Numpy arrays (if the model has multiple outputs). + If output layers in the model are named, you can also pass a + dictionary mapping output names to Numpy arrays. + `y` can be `None` (default) if feeding from + framework-native tensors (e.g. TensorFlow data tensors). + batch_size: Integer or `None`. + Number of samples per evaluation step. + If unspecified, `batch_size` will default to 32. + verbose: 0 or 1. Verbosity mode. + 0 = silent, 1 = progress bar. + sample_weight: Optional Numpy array of weights for + the test samples, used for weighting the loss function. + You can either pass a flat (1D) + Numpy array with the same length as the input samples + (1:1 mapping between weights and samples), + or in the case of temporal data, + you can pass a 2D array with shape + `(samples, sequence_length)`, + to apply a different weight to every timestep of every sample. + In this case you should make sure to specify + `sample_weight_mode="temporal"` in `compile()`. + steps: Integer or `None`. + Total number of steps (batches of samples) before declaring the evaluation round finished. - Ignored with the default value of `None`. + The default `None` is equal to the number of unique samples in + your dataset divided by the batch size. Returns: Scalar test loss (if the model has a single output and no metrics) @@ -1694,7 +1724,7 @@ class Model(Network): the display labels for the scalar outputs. Raises: - ValueError: In case of invalid argument values. + ValueError: In case of invalid arguments. """ # Backwards compatibility. if batch_size is None and steps is None: @@ -1926,7 +1956,7 @@ class Model(Network): to yield from `generator` before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of unique samples of your dataset - divided by the batch size. + divided by the batch size. Not used if using `Sequence`. epochs: Integer, total number of iterations on the data. verbose: Verbosity mode, 0, 1, or 2. callbacks: List of callbacks to be called during training. @@ -1941,7 +1971,7 @@ class Model(Network): for the class. max_queue_size: Maximum size for the generator queue workers: Maximum number of processes to spin up - when using process based threading + when using process-based threading. use_multiprocessing: If True, use process based threading. Note that because this implementation relies on multiprocessing, @@ -1949,9 +1979,9 @@ class Model(Network): non picklable arguments to the generator as they can't be passed easily to children processes. - shuffle: Whether to shuffle the order of the batches at the - beginning of each epoch. Only used with instances - of `Sequence` (keras.utils.Sequence). + shuffle: Whether to shuffle the data at the beginning of each + epoch. Only used with instances of `Sequence` + (`keras.utils.Sequence`). initial_epoch: Epoch at which to start training (useful for resuming a previous training run) **kwargs: support for legacy arguments. @@ -2061,6 +2091,8 @@ class Model(Network): ' and multiple workers may duplicate your data.' ' Please consider using the`keras.utils.Sequence' ' class.')) + if is_sequence: + steps_per_epoch = len(generator) enqueuer = None try: @@ -2182,9 +2214,10 @@ class Model(Network): when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. + Not used if using `Sequence`. max_queue_size: maximum size for the generator queue workers: maximum number of processes to spin up - when using process based threading + when using process-based threading. use_multiprocessing: if True, use process based threading. Note that because this implementation relies on multiprocessing, @@ -2230,6 +2263,8 @@ class Model(Network): ' and multiple workers may duplicate your data.' ' Please consider using the`keras.utils.Sequence' ' class.')) + if is_sequence: + steps = len(generator) enqueuer = None try: @@ -2309,8 +2344,9 @@ class Model(Network): steps: Total number of steps (batches of samples) to yield from `generator` before stopping. max_queue_size: Maximum size for the generator queue. + Not used if using `Sequence`. workers: Maximum number of processes to spin up - when using process based threading + when using process-based threading. use_multiprocessing: If `True`, use process based threading. Note that because this implementation relies on multiprocessing, @@ -2351,6 +2387,8 @@ class Model(Network): ' and multiple workers may duplicate your data.' ' Please consider using the`keras.utils.Sequence' ' class.')) + if is_sequence: + steps = len(generator) enqueuer = None try: diff --git a/tensorflow/python/keras/_impl/keras/layers/merge.py b/tensorflow/python/keras/_impl/keras/layers/merge.py index 5f26ce44e39..888be273693 100644 --- a/tensorflow/python/keras/_impl/keras/layers/merge.py +++ b/tensorflow/python/keras/_impl/keras/layers/merge.py @@ -318,7 +318,7 @@ class Concatenate(_Merge): """Layer that concatenates a list of inputs. It takes as input a list of tensors, - all of the same shape expect for the concatenation axis, + all of the same shape except for the concatenation axis, and returns a single tensor, the concatenation of all inputs. Arguments: diff --git a/tensorflow/python/keras/_impl/keras/layers/recurrent.py b/tensorflow/python/keras/_impl/keras/layers/recurrent.py index 2bc74d5f807..8df1840b4cb 100644 --- a/tensorflow/python/keras/_impl/keras/layers/recurrent.py +++ b/tensorflow/python/keras/_impl/keras/layers/recurrent.py @@ -756,6 +756,8 @@ class RNN(Layer): @property def trainable_weights(self): + if not self.trainable: + return [] if isinstance(self.cell, Layer): return self.cell.trainable_weights return [] @@ -763,6 +765,8 @@ class RNN(Layer): @property def non_trainable_weights(self): if isinstance(self.cell, Layer): + if not self.trainable: + return self.cell.weights return self.cell.non_trainable_weights return [] @@ -1048,7 +1052,6 @@ class SimpleRNN(RNN): unroll=unroll, activity_regularizer=regularizers.get(activity_regularizer), **kwargs) - # self.activity_regularizer = regularizers.get(activity_regularizer) def call(self, inputs, mask=None, training=None, initial_state=None): self.cell._generate_dropout_mask(inputs, training=training) @@ -1114,36 +1117,25 @@ class SimpleRNN(RNN): def get_config(self): config = { - 'units': - self.units, - 'activation': - activations.serialize(self.activation), - 'use_bias': - self.use_bias, - 'kernel_initializer': - initializers.serialize(self.kernel_initializer), + 'units': self.units, + 'activation': activations.serialize(self.activation), + 'use_bias': self.use_bias, + 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), - 'bias_initializer': - initializers.serialize(self.bias_initializer), - 'kernel_regularizer': - regularizers.serialize(self.kernel_regularizer), + 'bias_initializer': initializers.serialize(self.bias_initializer), + 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), - 'bias_regularizer': - regularizers.serialize(self.bias_regularizer), + 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), - 'kernel_constraint': - constraints.serialize(self.kernel_constraint), + 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), - 'bias_constraint': - constraints.serialize(self.bias_constraint), - 'dropout': - self.dropout, - 'recurrent_dropout': - self.recurrent_dropout + 'bias_constraint': constraints.serialize(self.bias_constraint), + 'dropout': self.dropout, + 'recurrent_dropout': self.recurrent_dropout } base_config = super(SimpleRNN, self).get_config() del base_config['cell'] @@ -1597,40 +1589,28 @@ class GRU(RNN): def get_config(self): config = { - 'units': - self.units, - 'activation': - activations.serialize(self.activation), + 'units': self.units, + 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), - 'use_bias': - self.use_bias, - 'kernel_initializer': - initializers.serialize(self.kernel_initializer), + 'use_bias': self.use_bias, + 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), - 'bias_initializer': - initializers.serialize(self.bias_initializer), - 'kernel_regularizer': - regularizers.serialize(self.kernel_regularizer), + 'bias_initializer': initializers.serialize(self.bias_initializer), + 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), - 'bias_regularizer': - regularizers.serialize(self.bias_regularizer), + 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), - 'kernel_constraint': - constraints.serialize(self.kernel_constraint), + 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), - 'bias_constraint': - constraints.serialize(self.bias_constraint), - 'dropout': - self.dropout, - 'recurrent_dropout': - self.recurrent_dropout, - 'implementation': - self.implementation + 'bias_constraint': constraints.serialize(self.bias_constraint), + 'dropout': self.dropout, + 'recurrent_dropout': self.recurrent_dropout, + 'implementation': self.implementation } base_config = super(GRU, self).get_config() del base_config['cell'] @@ -2125,42 +2105,29 @@ class LSTM(RNN): def get_config(self): config = { - 'units': - self.units, - 'activation': - activations.serialize(self.activation), + 'units': self.units, + 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), - 'use_bias': - self.use_bias, - 'kernel_initializer': - initializers.serialize(self.kernel_initializer), + 'use_bias': self.use_bias, + 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), - 'bias_initializer': - initializers.serialize(self.bias_initializer), - 'unit_forget_bias': - self.unit_forget_bias, - 'kernel_regularizer': - regularizers.serialize(self.kernel_regularizer), + 'bias_initializer': initializers.serialize(self.bias_initializer), + 'unit_forget_bias': self.unit_forget_bias, + 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), - 'bias_regularizer': - regularizers.serialize(self.bias_regularizer), + 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), - 'kernel_constraint': - constraints.serialize(self.kernel_constraint), + 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), - 'bias_constraint': - constraints.serialize(self.bias_constraint), - 'dropout': - self.dropout, - 'recurrent_dropout': - self.recurrent_dropout, - 'implementation': - self.implementation + 'bias_constraint': constraints.serialize(self.bias_constraint), + 'dropout': self.dropout, + 'recurrent_dropout': self.recurrent_dropout, + 'implementation': self.implementation } base_config = super(LSTM, self).get_config() del base_config['cell'] diff --git a/tensorflow/python/keras/_impl/keras/layers/recurrent_test.py b/tensorflow/python/keras/_impl/keras/layers/recurrent_test.py index b1f89a30bb3..7dc4c1db9b4 100644 --- a/tensorflow/python/keras/_impl/keras/layers/recurrent_test.py +++ b/tensorflow/python/keras/_impl/keras/layers/recurrent_test.py @@ -359,19 +359,38 @@ class RNNTest(test.TestCase): layer.build((None, None, 5)) # Test regularization losses - assert len(layer.losses) == 1 + self.assertEqual(len(layer.losses), 1) # Test weights - assert len(layer.trainable_weights) == 6 + self.assertEqual(len(layer.trainable_weights), 6) cells[0].trainable = False - assert len(layer.trainable_weights) == 3 - assert len(layer.non_trainable_weights) == 3 + self.assertEqual(len(layer.trainable_weights), 3) + self.assertEqual(len(layer.non_trainable_weights), 3) # Test `get_losses_for` x = keras.Input((None, 5)) y = keras.backend.sum(x) cells[0].add_loss(y, inputs=x) - assert layer.get_losses_for(x) == [y] + self.assertEqual(layer.get_losses_for(x), [y]) + + def test_rnn_dynamic_trainability(self): + layer_class = keras.layers.SimpleRNN + embedding_dim = 4 + units = 3 + + layer = layer_class(units) + layer.build((None, None, embedding_dim)) + self.assertEqual(len(layer.weights), 3) + self.assertEqual(len(layer.trainable_weights), 3) + self.assertEqual(len(layer.non_trainable_weights), 0) + layer.trainable = False + self.assertEqual(len(layer.weights), 3) + self.assertEqual(len(layer.trainable_weights), 0) + self.assertEqual(len(layer.non_trainable_weights), 3) + layer.trainable = True + self.assertEqual(len(layer.weights), 3) + self.assertEqual(len(layer.trainable_weights), 3) + self.assertEqual(len(layer.non_trainable_weights), 0) if __name__ == '__main__': diff --git a/tensorflow/python/keras/_impl/keras/layers/wrappers.py b/tensorflow/python/keras/_impl/keras/layers/wrappers.py index 6f786b78500..0e82005caad 100644 --- a/tensorflow/python/keras/_impl/keras/layers/wrappers.py +++ b/tensorflow/python/keras/_impl/keras/layers/wrappers.py @@ -336,7 +336,8 @@ class Bidirectional(Wrapper): output = [y, y_rev] # Properly set learning phase - if 0 < self.layer.dropout + self.layer.recurrent_dropout: + if (getattr(y, '_uses_learning_phase', False) or + getattr(y_rev, '_uses_learning_phase', False)): if self.merge_mode is None: for out in output: out._uses_learning_phase = True diff --git a/tensorflow/python/keras/_impl/keras/losses.py b/tensorflow/python/keras/_impl/keras/losses.py index da0984d3c33..19212aeee8c 100644 --- a/tensorflow/python/keras/_impl/keras/losses.py +++ b/tensorflow/python/keras/_impl/keras/losses.py @@ -22,6 +22,7 @@ import six from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object +from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object def mean_squared_error(y_true, y_pred): @@ -105,7 +106,7 @@ cosine = cosine_proximity def serialize(loss): - return loss.__name__ + return serialize_keras_object(loss) def deserialize(name, custom_objects=None): @@ -122,6 +123,8 @@ def get(identifier): if isinstance(identifier, six.string_types): identifier = str(identifier) return deserialize(identifier) + if isinstance(identifier, dict): + return deserialize(identifier) elif callable(identifier): return identifier else: diff --git a/tensorflow/python/keras/_impl/keras/losses_test.py b/tensorflow/python/keras/_impl/keras/losses_test.py index b295356ec19..1884c0fdca7 100644 --- a/tensorflow/python/keras/_impl/keras/losses_test.py +++ b/tensorflow/python/keras/_impl/keras/losses_test.py @@ -18,11 +18,18 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import os +import shutil + import numpy as np from tensorflow.python.keras._impl import keras from tensorflow.python.platform import test +try: + import h5py # pylint:disable=g-import-not-at-top +except ImportError: + h5py = None ALL_LOSSES = [keras.losses.mean_squared_error, keras.losses.mean_absolute_error, @@ -39,6 +46,20 @@ ALL_LOSSES = [keras.losses.mean_squared_error, keras.losses.categorical_hinge] +class _MSEMAELoss(object): + """Loss function with internal state, for testing serialization code.""" + + def __init__(self, mse_fraction): + self.mse_fraction = mse_fraction + + def __call__(self, y_true, y_pred): + return (self.mse_fraction * keras.losses.mse(y_true, y_pred) + + (1 - self.mse_fraction) * keras.losses.mae(y_true, y_pred)) + + def get_config(self): + return {'mse_fraction': self.mse_fraction} + + class KerasLossesTest(test.TestCase): def test_objective_shapes_3d(self): @@ -83,6 +104,39 @@ class KerasLossesTest(test.TestCase): loss = keras.backend.eval(keras.losses.categorical_hinge(y_true, y_pred)) self.assertAllClose(expected_loss, np.mean(loss)) + def test_serializing_loss_class(self): + orig_loss_class = _MSEMAELoss(0.3) + with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): + serialized = keras.losses.serialize(orig_loss_class) + + with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): + deserialized = keras.losses.deserialize(serialized) + assert isinstance(deserialized, _MSEMAELoss) + assert deserialized.mse_fraction == 0.3 + + def test_serializing_model_with_loss_class(self): + tmpdir = self.get_temp_dir() + self.addCleanup(shutil.rmtree, tmpdir) + model_filename = os.path.join(tmpdir, 'custom_loss.h5') + + with self.test_session(): + with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): + loss = _MSEMAELoss(0.3) + inputs = keras.layers.Input((2,)) + outputs = keras.layers.Dense(1, name='model_output')(inputs) + model = keras.models.Model(inputs, outputs) + model.compile(optimizer='sgd', loss={'model_output': loss}) + model.fit(np.random.rand(256, 2), np.random.rand(256, 1)) + + if h5py is None: + return + + model.save(model_filename) + + with keras.utils.custom_object_scope({'_MSEMAELoss': _MSEMAELoss}): + loaded_model = keras.models.load_model(model_filename) + loaded_model.predict(np.random.rand(128, 2)) + if __name__ == '__main__': test.main() diff --git a/tensorflow/python/keras/_impl/keras/models.py b/tensorflow/python/keras/_impl/keras/models.py index 046fd116337..ba202827ce3 100644 --- a/tensorflow/python/keras/_impl/keras/models.py +++ b/tensorflow/python/keras/_impl/keras/models.py @@ -31,6 +31,7 @@ from tensorflow.python.keras._impl.keras import layers as layer_module from tensorflow.python.keras._impl.keras import optimizers from tensorflow.python.keras._impl.keras.engine import topology from tensorflow.python.keras._impl.keras.engine.topology import Input +from tensorflow.python.keras._impl.keras.engine.topology import InputLayer from tensorflow.python.keras._impl.keras.engine.topology import Layer from tensorflow.python.keras._impl.keras.engine.topology import TFBaseLayer from tensorflow.python.keras._impl.keras.engine.training import Model @@ -456,38 +457,48 @@ class Sequential(Model): 'an instance of class Layer. ' 'Found: ' + str(layer)) if not self.outputs: - # first layer in model: check that it is an input layer - if not layer._inbound_nodes: - # create an input layer - if not hasattr(layer, '_batch_input_shape'): - raise ValueError('The first layer in a ' - 'Sequential model must ' - 'get an `input_shape` or ' - '`batch_input_shape` argument.') + # First layer in model: check that it is an input layer. + if not isinstance(layer, InputLayer): + # Create an input layer. + # First, we need to infer its expected input shape and dtype. + if isinstance(layer, (Model, Sequential)): + # We were passed a model as first layer. + # This requires a specific way to figure out the + # input shape and dtype. + if not layer.layers: + raise ValueError('Cannot add an empty model ' + 'to a `Sequential` model.') + # In case of nested models: recover the first layer + # of the deepest model to infer input shape and dtype. + first_layer = layer.layers[0] + while isinstance(first_layer, (Model, Sequential)): + first_layer = first_layer.layers[0] + batch_shape = first_layer._batch_input_shape + dtype = first_layer.dtype + else: + # We were passed a regular layer, and it should + # know about its input shape. Otherwise, that's an error. + if not hasattr(layer, '_batch_input_shape'): + raise ValueError('The first layer in a ' + 'Sequential model must ' + 'get an `input_shape` argument.') + batch_shape = layer._batch_input_shape + dtype = layer.dtype # Instantiate the input layer. x = Input( - batch_shape=layer._batch_input_shape, - dtype=layer.dtype, - name=layer.name + '_input') + batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input') # This will build the current layer # and create the node connecting the current layer # to the input layer we just created. layer(x) - if len(layer._inbound_nodes) != 1: - raise ValueError('A layer added to a Sequential model must ' - 'not already be connected somewhere else. ' - 'Model received layer ' + layer.name + ' which has ' + - str(len(layer._inbound_nodes)) + - ' pre-existing inbound connections.') - - if len(layer._inbound_nodes[0].output_tensors) != 1: + if len(layer.inbound_nodes[-1].output_tensors) != 1: raise ValueError('All layers in a Sequential model ' 'should have a single output tensor. ' 'For multi-output layers, ' 'use the functional API.') - self.outputs = [layer._inbound_nodes[0].output_tensors[0]] + self.outputs = [layer.inbound_nodes[-1].output_tensors[0]] self.inputs = topology.get_source_inputs(self.outputs[0]) # We create an input node, which we will keep updated @@ -741,21 +752,18 @@ class Sequential(Model): If the model has multiple outputs, you can use a different `sample_weight_mode` on each output by passing a dictionary or a list of modes. - weighted_metrics: List of metrics to be evaluated and weighted - by sample_weight or class_weight during training and testing. - target_tensors: By default, Keras will create placeholders for the + weighted_metrics: list of metrics to be evaluated and weighted + by `sample_weight` or `class_weight` during training and testing. + target_tensors: By default, Keras will create a placeholder for the model's target, which will be fed with the target data during training. If instead you would like to use your own - target tensors (in turn, Keras will not expect external + target tensor (in turn, Keras will not expect external Numpy data for these targets at training time), you - can specify them via the `target_tensors` argument. It can be - a single tensor (for a single-output model), a list of tensors, - or a dict mapping output names to target tensors. - **kwargs: When using the Theano/CNTK backends, these arguments - are passed into K.function. When using the TensorFlow backend, - these arguments are passed into `tf.Session.run`. - Raises: - ValueError: In case of invalid arguments for + can specify them via the `target_tensors` argument. + It should be a single tensor + (for a single-output `Sequential` model). + **kwargs: These arguments are passed into `tf.Session.run`. + Example: ```python model = Sequential() @@ -790,10 +798,10 @@ class Sequential(Model): self.total_loss = self.model.total_loss def fit(self, - x, - y, - batch_size=32, - epochs=10, + x=None, + y=None, + batch_size=None, + epochs=1, verbose=1, callbacks=None, validation_split=0., @@ -801,47 +809,86 @@ class Sequential(Model): shuffle=True, class_weight=None, sample_weight=None, - initial_epoch=0): + initial_epoch=0, + steps_per_epoch=None, + validation_steps=None, + **kwargs): """Trains the model for a fixed number of epochs. Arguments: - x: input data, as a Numpy array or list of Numpy arrays - (if the model has multiple inputs). - y: labels, as a Numpy array. - batch_size: integer. Number of samples per gradient update. - epochs: integer. Number of epochs to train the model. - Note that in conjunction with initial_epoch, the parameter - epochs is to be understood as "final epoch". The model is - not trained for a number of steps given by epochs, but - until the epoch epochs is reached. - verbose: 0 for no logging to stdout, - 1 for progress bar logging, 2 for one log line per epoch. - callbacks: list of `keras.callbacks.Callback` instances. + x: Numpy array of training data. + If the input layer in the model is named, you can also pass a + dictionary mapping the input name to a Numpy array. + `x` can be `None` (default) if feeding from + TensorFlow data tensors. + y: Numpy array of target (label) data. + If the output layer in the model is named, you can also pass a + dictionary mapping the output name to a Numpy array. + `y` can be `None` (default) if feeding from + TensorFlow data tensors. + batch_size: Integer or `None`. + Number of samples per gradient update. + If unspecified, it will default to 32. + epochs: Integer. Number of epochs to train the model. + An epoch is an iteration over the entire `x` and `y` + data provided. + Note that in conjunction with `initial_epoch`, + `epochs` is to be understood as "final epoch". + The model is not trained for a number of iterations + given by `epochs`, but merely until the epoch + of index `epochs` is reached. + verbose: 0, 1, or 2. Verbosity mode. + 0 = silent, 1 = progress bar, 2 = one line per epoch. + callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. See [callbacks](/callbacks). - validation_split: float (0. < x < 1). - Fraction of the data to use as held-out validation data. - validation_data: tuple (x_val, y_val) or tuple - (x_val, y_val, val_sample_weights) to be used as held-out - validation data. Will override validation_split. - shuffle: boolean or str (for 'batch'). - Whether to shuffle the samples at each epoch. + validation_split: Float between 0 and 1: + Fraction of the training data to be used as validation data. + The model will set apart this fraction of the training data, + will not train on it, and will evaluate + the loss and any model metrics + on this data at the end of each epoch. + The validation data is selected from the last samples + in the `x` and `y` data provided, before shuffling. + validation_data: tuple `(x_val, y_val)` or tuple + `(x_val, y_val, val_sample_weights)` on which to evaluate + the loss and any model metrics at the end of each epoch. + The model will not be trained on this data. + This will override `validation_split`. + shuffle: Boolean (whether to shuffle the training data + before each epoch) or str (for 'batch'). 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. - class_weight: dictionary mapping classes to a weight value, - used for scaling the loss function (during training only). - sample_weight: Numpy array of weights for - the training samples, used for scaling the loss function + Has no effect when `steps_per_epoch` is not `None`. + class_weight: Optional dictionary mapping class indices (integers) + to a weight (float) value, used for weighting the loss function + (during training only). + This can be useful to tell the model to + "pay more attention" to samples from + an under-represented class. + sample_weight: Optional Numpy array of weights for + the training samples, used for weighting the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, - you can pass a 2D array with shape (samples, sequence_length), + you can pass a 2D array with shape + `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify - sample_weight_mode="temporal" in compile(). + `sample_weight_mode="temporal"` in `compile()`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run). + steps_per_epoch: Total number of steps (batches of samples) + before declaring one epoch finished and starting the + next epoch. When training with input tensors such as + TensorFlow data tensors, the default `None` is equal to + the number of unique samples in your dataset divided by + the batch size, or 1 if that cannot be determined. + validation_steps: Only relevant if `steps_per_epoch` + is specified. Total number of steps (batches of samples) + to validate before stopping. + **kwargs: Used for backwards compatibility support. Returns: A `History` object. Its `History.history` attribute is @@ -850,10 +897,12 @@ class Sequential(Model): and validation metrics values (if applicable). Raises: - RuntimeError: if the model was never compiled. + RuntimeError: If the model was never compiled. + ValueError: In case of mismatch between the provided input data + and what the model expects. """ if not self.built: - raise RuntimeError('The model needs to be compiled ' 'before being used.') + raise RuntimeError('The model needs to be compiled before being used.') return self.model.fit( x, y, @@ -866,7 +915,9 @@ class Sequential(Model): shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight, - initial_epoch=initial_epoch) + initial_epoch=initial_epoch, + steps_per_epoch=steps_per_epoch, + validation_steps=validation_steps) def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None): """Computes the loss on some input data, batch by batch. @@ -889,7 +940,7 @@ class Sequential(Model): RuntimeError: if the model was never compiled. """ if not self.built: - raise RuntimeError('The model needs to be compiled ' 'before being used.') + raise RuntimeError('The model needs to be compiled before being used.') return self.model.evaluate( x, y, @@ -949,7 +1000,7 @@ class Sequential(Model): RuntimeError: if the model was never compiled. """ if not self.built: - raise RuntimeError('The model needs to be compiled ' 'before being used.') + raise RuntimeError('The model needs to be compiled before being used.') return self.model.train_on_batch( x, y, sample_weight=sample_weight, class_weight=class_weight) @@ -972,10 +1023,10 @@ class Sequential(Model): RuntimeError: if the model was never compiled. """ if not self.built: - raise RuntimeError('The model needs to be compiled ' 'before being used.') + raise RuntimeError('The model needs to be compiled before being used.') return self.model.test_on_batch(x, y, sample_weight=sample_weight) - def predict_proba(self, x, batch_size=32, verbose=1): + def predict_proba(self, x, batch_size=32, verbose=0): """Generates class probability predictions for the input samples. The input samples are processed batch by batch. @@ -997,7 +1048,7 @@ class Sequential(Model): '(like softmax or sigmoid would).') return preds - def predict_classes(self, x, batch_size=32, verbose=1): + def predict_classes(self, x, batch_size=32, verbose=0): """Generate class predictions for the input samples. The input samples are processed batch by batch. @@ -1126,7 +1177,7 @@ class Sequential(Model): raise ValueError('Unrecognized keyword arguments: ' + str(kwargs)) if not self.built: - raise RuntimeError('The model needs to be compiled ' 'before being used.') + raise RuntimeError('The model needs to be compiled before being used.') return self.model.fit_generator( generator, steps_per_epoch, @@ -1193,7 +1244,7 @@ class Sequential(Model): raise ValueError('Unrecognized keyword arguments: ' + str(kwargs)) if not self.built: - raise RuntimeError('The model needs to be compiled ' 'before being used.') + raise RuntimeError('The model needs to be compiled before being used.') return self.model.evaluate_generator( generator, steps, diff --git a/tensorflow/python/keras/_impl/keras/models_test.py b/tensorflow/python/keras/_impl/keras/models_test.py index fd6b20e0edc..86acac4604a 100644 --- a/tensorflow/python/keras/_impl/keras/models_test.py +++ b/tensorflow/python/keras/_impl/keras/models_test.py @@ -315,6 +315,24 @@ class TestSequential(test.TestCase): with self.assertRaises(TypeError): model.build() + def test_nested_sequential_trainability(self): + input_dim = 20 + num_units = 10 + num_classes = 2 + + inner_model = keras.models.Sequential() + inner_model.add(keras.layers.Dense(num_units, input_shape=(input_dim,))) + + model = keras.models.Sequential() + model.add(inner_model) + model.add(keras.layers.Dense(num_classes)) + + self.assertEqual(len(model.trainable_weights), 4) + inner_model.trainable = False + self.assertEqual(len(model.trainable_weights), 2) + inner_model.trainable = True + self.assertEqual(len(model.trainable_weights), 4) + class TestModelCloning(test.TestCase): diff --git a/tensorflow/python/keras/_impl/keras/utils/data_utils.py b/tensorflow/python/keras/_impl/keras/utils/data_utils.py index b3a1f640423..4f335af62e0 100644 --- a/tensorflow/python/keras/_impl/keras/utils/data_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/data_utils.py @@ -378,17 +378,27 @@ class Sequence(object): pass -def get_index(ds, i): - """Quick fix for Python2, otherwise, it cannot be pickled. +# Global variables to be shared across processes +_SHARED_SEQUENCES = {} +# We use a Value to provide unique id to different processes. +_SEQUENCE_COUNTER = multiprocessing.Value('i', 0) + + +def get_index(uid, i): + """Get the value from the Sequence `uid` at index `i`. + + To allow multiple Sequences to be used at the same time, we use `uid` to + get a specific one. A single Sequence would cause the validation to + overwrite the training Sequence. Arguments: - ds: a Holder or Sequence object. + uid: int, Sequence identifier i: index Returns: The value at index `i`. """ - return ds[i] + return _SHARED_SEQUENCES[uid][i] class SequenceEnqueuer(object): @@ -459,17 +469,17 @@ class OrderedEnqueuer(SequenceEnqueuer): Arguments: sequence: A `keras.utils.data_utils.Sequence` object. - use_multiprocessing: use multiprocessing if True, otherwise threading - scheduling: Sequential querying of datas if 'sequential', random - otherwise. - shuffle: Whether to shuffle the data at the beginning of each epoch. + use_multiprocessing: Use multiprocessing if True, otherwise threading + shuffle: Whether to shuffle the data at the beginning of each epoch """ - def __init__(self, - sequence, - use_multiprocessing=False, - shuffle=False): + def __init__(self, sequence, use_multiprocessing=False, shuffle=False): self.sequence = sequence + + # Doing Multiprocessing.Value += x is not process-safe. + with _SEQUENCE_COUNTER.get_lock(): + self.uid = _SEQUENCE_COUNTER.value + _SEQUENCE_COUNTER.value += 1 self.use_multiprocessing = use_multiprocessing self.shuffle = shuffle self.workers = 0 @@ -493,15 +503,24 @@ class OrderedEnqueuer(SequenceEnqueuer): self.executor = multiprocessing.Pool(workers) else: self.executor = ThreadPool(workers) + self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start() + def _wait_queue(self): + """Wait for the queue to be empty.""" + while True: + time.sleep(0.1) + if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set(): + return + def _run(self): - """Submits requests to the executor and queues the `Future` objects.""" + """Function to submit request to the executor & queue `Future` objects.""" sequence = list(range(len(self.sequence))) + self._send_sequence() # Share the initial sequence while True: if self.shuffle: random.shuffle(sequence) @@ -509,9 +528,18 @@ class OrderedEnqueuer(SequenceEnqueuer): if self.stop_signal.is_set(): return self.queue.put( - self.executor.apply_async(get_index, (self.sequence, i)), - block=True) + self.executor.apply_async(get_index, (self.uid, i)), block=True) + + # Done with the current epoch, waiting for the final batches + self._wait_queue() + + if self.stop_signal.is_set(): + # We're done + return + + # Call the internal on epoch end. self.sequence.on_epoch_end() + self._send_sequence() # Update the pool def get(self): """Creates a generator to extract data from the queue. @@ -520,17 +548,29 @@ class OrderedEnqueuer(SequenceEnqueuer): Yields: Tuples (inputs, targets) - or (inputs, targets, sample_weights) + or (inputs, targets, sample_weights) """ try: while self.is_running(): inputs = self.queue.get(block=True).get() + self.queue.task_done() if inputs is not None: yield inputs except Exception as e: self.stop() raise StopIteration(e) + def _send_sequence(self): + """Send current Sequence to all workers.""" + _SHARED_SEQUENCES[ + self.uid] = self.sequence # For new processes that may spawn + + self._close_pool() + if self.use_multiprocessing: + self.executor = multiprocessing.Pool(self.workers) + else: + self.executor = ThreadPool(self.workers) + def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. @@ -544,9 +584,13 @@ class OrderedEnqueuer(SequenceEnqueuer): self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() + self._close_pool() + self.run_thread.join(timeout) + _SHARED_SEQUENCES[self.uid] = None + + def _close_pool(self): self.executor.close() self.executor.join() - self.run_thread.join(timeout) class GeneratorEnqueuer(SequenceEnqueuer): diff --git a/tensorflow/python/keras/_impl/keras/utils/data_utils_test.py b/tensorflow/python/keras/_impl/keras/utils/data_utils_test.py index 45322f1f29c..14b2f084423 100644 --- a/tensorflow/python/keras/_impl/keras/utils/data_utils_test.py +++ b/tensorflow/python/keras/_impl/keras/utils/data_utils_test.py @@ -115,15 +115,19 @@ def threadsafe_generator(f): class TestSequence(keras.utils.data_utils.Sequence): - def __init__(self, shape): + def __init__(self, shape, value=1.): self.shape = shape + self.inner = value def __getitem__(self, item): - return np.ones(self.shape, dtype=np.uint8) * item + return np.ones(self.shape, dtype=np.uint32) * item * self.inner def __len__(self): return 100 + def on_epoch_end(self): + self.inner *= 5.0 + class FaultSequence(keras.utils.data_utils.Sequence): @@ -228,6 +232,64 @@ class TestEnqueuers(test.TestCase): with self.assertRaises(StopIteration): next(gen_output) + def test_on_epoch_end_processes(self): + enqueuer = keras.utils.data_utils.OrderedEnqueuer( + TestSequence([3, 200, 200, 3]), use_multiprocessing=True) + enqueuer.start(3, 10) + gen_output = enqueuer.get() + acc = [] + for _ in range(200): + acc.append(next(gen_output)[0, 0, 0, 0]) + # Check that order was keep in GeneratorEnqueuer with processes + self.assertEqual(acc[100:], list([k * 5 for k in range(100)])) + enqueuer.stop() + + def test_context_switch(self): + enqueuer = keras.utils.data_utils.OrderedEnqueuer( + TestSequence([3, 200, 200, 3]), use_multiprocessing=True) + enqueuer2 = keras.utils.data_utils.OrderedEnqueuer( + TestSequence([3, 200, 200, 3], value=15), use_multiprocessing=True) + enqueuer.start(3, 10) + enqueuer2.start(3, 10) + gen_output = enqueuer.get() + gen_output2 = enqueuer2.get() + acc = [] + for _ in range(100): + acc.append(next(gen_output)[0, 0, 0, 0]) + self.assertEqual(acc[-1], 99) + # One epoch is completed so enqueuer will switch the Sequence + + acc = [] + for _ in range(100): + acc.append(next(gen_output2)[0, 0, 0, 0]) + self.assertEqual(acc[-1], 99 * 15) + # One epoch has been completed so enqueuer2 will switch + + # Be sure that both Sequence were updated + self.assertEqual(next(gen_output)[0, 0, 0, 0], 0) + self.assertEqual(next(gen_output)[0, 0, 0, 0], 5) + self.assertEqual(next(gen_output2)[0, 0, 0, 0], 0) + self.assertEqual(next(gen_output2)[0, 0, 0, 0], 15 * 5) + + # Tear down everything + enqueuer.stop() + enqueuer2.stop() + + def test_on_epoch_end_threads(self): + enqueuer = keras.utils.data_utils.OrderedEnqueuer( + TestSequence([3, 200, 200, 3]), use_multiprocessing=False) + enqueuer.start(3, 10) + gen_output = enqueuer.get() + acc = [] + for _ in range(100): + acc.append(next(gen_output)[0, 0, 0, 0]) + acc = [] + for _ in range(100): + acc.append(next(gen_output)[0, 0, 0, 0]) + # Check that order was keep in GeneratorEnqueuer with processes + self.assertEqual(acc, list([k * 5 for k in range(100)])) + enqueuer.stop() + if __name__ == '__main__': test.main() diff --git a/tensorflow/python/keras/_impl/keras/utils/generic_utils.py b/tensorflow/python/keras/_impl/keras/utils/generic_utils.py index efa79b1612f..025e5d30a59 100644 --- a/tensorflow/python/keras/_impl/keras/utils/generic_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/generic_utils.py @@ -271,7 +271,8 @@ class Progbar(object): self.total_width = 0 self.seen_so_far = 0 self.verbose = verbose - self._dynamic_display = (sys.stdout.isatty() or + self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and + sys.stdout.isatty()) or 'ipykernel' in sys.modules) def update(self, current, values=None, force=False): diff --git a/tensorflow/python/keras/_impl/keras/utils/np_utils.py b/tensorflow/python/keras/_impl/keras/utils/np_utils.py index a23172d342a..896016d4d8b 100644 --- a/tensorflow/python/keras/_impl/keras/utils/np_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/np_utils.py @@ -33,12 +33,18 @@ def to_categorical(y, num_classes=None): Returns: A binary matrix representation of the input. """ - y = np.array(y, dtype='int').ravel() + y = np.array(y, dtype='int') + input_shape = y.shape + if input_shape and input_shape[-1] == 1: + input_shape = tuple(input_shape[:-1]) + y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes)) categorical[np.arange(n), y] = 1 + output_shape = input_shape + (num_classes,) + categorical = np.reshape(categorical, output_shape) return categorical diff --git a/tensorflow/python/keras/_impl/keras/utils/np_utils_test.py b/tensorflow/python/keras/_impl/keras/utils/np_utils_test.py new file mode 100644 index 00000000000..9680c295cd3 --- /dev/null +++ b/tensorflow/python/keras/_impl/keras/utils/np_utils_test.py @@ -0,0 +1,52 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for np_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensorflow.python.keras._impl import keras +from tensorflow.python.platform import test + + +class TestNPUtils(test.TestCase): + + def test_to_categorical(self): + num_classes = 5 + shapes = [(3,), (4, 3), (5, 4, 3), (3, 1), (3, 2, 1)] + expected_shapes = [(3, num_classes), + (4, 3, num_classes), + (5, 4, 3, num_classes), + (3, num_classes)] + labels = [np.random.randint(0, num_classes, shape) for shape in shapes] + one_hots = [ + keras.utils.to_categorical(label, num_classes) for label in labels] + for label, one_hot, expected_shape in zip(labels, + one_hots, + expected_shapes): + # Check shape + self.assertEqual(one_hot.shape, expected_shape) + # Make sure there is only one 1 in a row + self.assertTrue(np.all(one_hot.sum(axis=-1) == 1)) + # Get original labels back from one hots + self.assertTrue(np.all( + np.argmax(one_hot, -1).reshape(label.shape) == label)) + + +if __name__ == '__main__': + test.main() diff --git a/tensorflow/python/keras/_impl/keras/utils/training_utils.py b/tensorflow/python/keras/_impl/keras/utils/training_utils.py index b993a16394d..8939c814cf3 100644 --- a/tensorflow/python/keras/_impl/keras/utils/training_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/training_utils.py @@ -77,8 +77,11 @@ def multi_gpu_model(model, gpus): width = 224 num_classes = 1000 - # Instantiate the base model - # (here, we do it on CPU, for better efficiency). + # Instantiate the base model (or "template" model). + # We recommend doing this with under a CPU device scope, + # so that the model's weights are hosted on CPU memory. + # Otherwise they may end up hosted on a GPU, which would + # complicate weight sharing. with tf.device('/cpu:0'): model = Xception(weights=None, input_shape=(height, width, 3), @@ -97,6 +100,9 @@ def multi_gpu_model(model, gpus): # This `fit` call will be distributed on 8 GPUs. # Since the batch size is 256, each GPU will process 32 samples. parallel_model.fit(x, y, epochs=20, batch_size=256) + + # Save model via the template model (which shares the same weights): + model.save('my_model.h5') ``` Raises: diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt index 04fe46cedcb..f69800b918c 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt @@ -173,7 +173,7 @@ tf_class { } member_method { name: "fit" - argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'32\', \'10\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\'], " + argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\'], " } member_method { name: "fit_generator" @@ -241,7 +241,7 @@ tf_class { } member_method { name: "predict_classes" - argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'1\'], " + argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], " } member_method { name: "predict_generator" @@ -253,7 +253,7 @@ tf_class { } member_method { name: "predict_proba" - argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'1\'], " + argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], " } member_method { name: "reset_states" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt index 3946ff4d5f1..8397b373f42 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt @@ -173,7 +173,7 @@ tf_class { } member_method { name: "fit" - argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\'], varargs=None, keywords=None, defaults=[\'32\', \'10\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\'], " + argspec: "args=[\'self\', \'x\', \'y\', \'batch_size\', \'epochs\', \'verbose\', \'callbacks\', \'validation_split\', \'validation_data\', \'shuffle\', \'class_weight\', \'sample_weight\', \'initial_epoch\', \'steps_per_epoch\', \'validation_steps\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'None\', \'None\', \'1\', \'1\', \'None\', \'0.0\', \'None\', \'True\', \'None\', \'None\', \'0\', \'None\', \'None\'], " } member_method { name: "fit_generator" @@ -241,7 +241,7 @@ tf_class { } member_method { name: "predict_classes" - argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'1\'], " + argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], " } member_method { name: "predict_generator" @@ -253,7 +253,7 @@ tf_class { } member_method { name: "predict_proba" - argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'1\'], " + argspec: "args=[\'self\', \'x\', \'batch_size\', \'verbose\'], varargs=None, keywords=None, defaults=[\'32\', \'0\'], " } member_method { name: "reset_states" From b0bcf675a4b5d6217f3b58fd27b344f20e7bf25d Mon Sep 17 00:00:00 2001 From: Sanjoy Das Date: Wed, 15 Nov 2017 13:50:54 -0800 Subject: [PATCH 068/104] Use a static "linker initialized" tensorflow::mutex when possible. There is no need to use a lazily created tensorflow::mutex since the tensorflow::LINKER_INITIALIZED constructor is a no-op. PiperOrigin-RevId: 175874749 --- tensorflow/compiler/xla/service/compiler.cc | 16 ++++------------ tensorflow/compiler/xla/service/compiler.h | 3 +-- .../compiler/xla/service/computation_placer.cc | 12 +++++------- .../compiler/xla/service/computation_placer.h | 7 ++----- .../compiler/xla/service/transfer_manager.cc | 13 +++++-------- .../compiler/xla/service/transfer_manager.h | 7 ++----- 6 files changed, 19 insertions(+), 39 deletions(-) diff --git a/tensorflow/compiler/xla/service/compiler.cc b/tensorflow/compiler/xla/service/compiler.cc index 3b1900428af..e2e9d2a0c04 100644 --- a/tensorflow/compiler/xla/service/compiler.cc +++ b/tensorflow/compiler/xla/service/compiler.cc @@ -27,14 +27,8 @@ namespace se = ::perftools::gputools; namespace xla { -/* static */ tensorflow::mutex* Compiler::platform_compiler_mutex_; - -/* static */ void Compiler::LazyInitMutex() { - static std::once_flag mutex_init_flag; - std::call_once(mutex_init_flag, []() { - Compiler::platform_compiler_mutex_ = new tensorflow::mutex; - }); -} +/* static */ tensorflow::mutex Compiler::platform_compiler_mutex_( + tensorflow::LINKER_INITIALIZED); /* static */ std::map* @@ -55,8 +49,7 @@ Compiler::GetPlatformCompilers() { /* static */ void Compiler::RegisterCompilerFactory( se::Platform::Id platform_id, std::function()> compiler_factory) { - LazyInitMutex(); - tensorflow::mutex_lock lock(*platform_compiler_mutex_); + tensorflow::mutex_lock lock(platform_compiler_mutex_); auto* factories = GetPlatformCompilerFactories(); CHECK(factories->find(platform_id) == factories->end()) << "Compiler factory already registered for platform"; @@ -65,8 +58,7 @@ Compiler::GetPlatformCompilers() { /* static */ StatusOr Compiler::GetForPlatform( const se::Platform* platform) { - LazyInitMutex(); - tensorflow::mutex_lock lock(*platform_compiler_mutex_); + tensorflow::mutex_lock lock(platform_compiler_mutex_); auto* compilers = GetPlatformCompilers(); // See if we already instantiated a compiler for this platform. diff --git a/tensorflow/compiler/xla/service/compiler.h b/tensorflow/compiler/xla/service/compiler.h index 4c2d9600d90..5f021900c8b 100644 --- a/tensorflow/compiler/xla/service/compiler.h +++ b/tensorflow/compiler/xla/service/compiler.h @@ -157,8 +157,7 @@ class Compiler { private: // Mutex that guards the platform-compiler map. - static tensorflow::mutex* platform_compiler_mutex_; - static void LazyInitMutex(); + static tensorflow::mutex platform_compiler_mutex_; // Map from platform kind to compiler factory. static std::map* diff --git a/tensorflow/compiler/xla/service/computation_placer.cc b/tensorflow/compiler/xla/service/computation_placer.cc index cdfa30dd9a7..6b7b0d25e87 100644 --- a/tensorflow/compiler/xla/service/computation_placer.cc +++ b/tensorflow/compiler/xla/service/computation_placer.cc @@ -94,7 +94,7 @@ StatusOr ComputationPlacer::AssignDevices( se::Platform::Id platform_id, ComputationPlacerCreationFunction creation_function) { tensorflow::mutex_lock lock( - *ComputationPlacer::platform_computation_placer_mutex()); + ComputationPlacer::platform_computation_placer_mutex_); auto* computation_placers = GetPlatformComputationPlacers(); CHECK(computation_placers->find(platform_id) == computation_placers->end()); (*computation_placers)[platform_id].creation_function = creation_function; @@ -103,7 +103,7 @@ StatusOr ComputationPlacer::AssignDevices( /* static */ StatusOr ComputationPlacer::GetForPlatform( const se::Platform* platform) { tensorflow::mutex_lock lock( - *ComputationPlacer::platform_computation_placer_mutex()); + ComputationPlacer::platform_computation_placer_mutex_); auto* computation_placers = GetPlatformComputationPlacers(); auto it = computation_placers->find(platform->id()); @@ -122,11 +122,9 @@ StatusOr ComputationPlacer::AssignDevices( return it->second.placer.get(); } -/* static */ tensorflow::mutex* -ComputationPlacer::platform_computation_placer_mutex() { - static tensorflow::mutex* m = new tensorflow::mutex; - return m; -} +/* static */ tensorflow::mutex + ComputationPlacer::platform_computation_placer_mutex_( + tensorflow::LINKER_INITIALIZED); /* static */ std::map* diff --git a/tensorflow/compiler/xla/service/computation_placer.h b/tensorflow/compiler/xla/service/computation_placer.h index 7d9abcd100d..737ccabaa7a 100644 --- a/tensorflow/compiler/xla/service/computation_placer.h +++ b/tensorflow/compiler/xla/service/computation_placer.h @@ -89,11 +89,8 @@ class ComputationPlacer { const perftools::gputools::Platform* platform); private: - // Routine that returns the mutex that guards the platform-to-computation - // placer map. Done as a routine to ensure correct initialization ordering, - // since RegisterComputationPlacer can be called during program initialization - // time. - static tensorflow::mutex* platform_computation_placer_mutex(); + // The mutex that guards the platform-to-computation placer map. + static tensorflow::mutex platform_computation_placer_mutex_; // State kept for each kind of ComputationPlacer. Registration functions set // up creation_function, and then we use that to lazily create "placer" the diff --git a/tensorflow/compiler/xla/service/transfer_manager.cc b/tensorflow/compiler/xla/service/transfer_manager.cc index 4da0a0d3684..fef131d19fc 100644 --- a/tensorflow/compiler/xla/service/transfer_manager.cc +++ b/tensorflow/compiler/xla/service/transfer_manager.cc @@ -28,12 +28,9 @@ limitations under the License. namespace se = ::perftools::gputools; namespace xla { - -/* static */ tensorflow::mutex* -TransferManager::platform_transfer_manager_mutex() { - static tensorflow::mutex* m = new tensorflow::mutex; - return m; -} +/* static */ tensorflow::mutex + TransferManager::platform_transfer_manager_mutex_( + tensorflow::LINKER_INITIALIZED); /* static */ std::map* @@ -47,7 +44,7 @@ TransferManager::GetPlatformTransferManagers() { se::Platform::Id platform_id, TransferManagerCreationFunction creation_function) { tensorflow::mutex_lock lock( - *TransferManager::platform_transfer_manager_mutex()); + TransferManager::platform_transfer_manager_mutex_); auto* managers = GetPlatformTransferManagers(); CHECK(managers->find(platform_id) == managers->end()); (*managers)[platform_id].creation_function = creation_function; @@ -56,7 +53,7 @@ TransferManager::GetPlatformTransferManagers() { /* static */ StatusOr TransferManager::GetForPlatform( const se::Platform* platform) { tensorflow::mutex_lock lock( - *TransferManager::platform_transfer_manager_mutex()); + TransferManager::platform_transfer_manager_mutex_); auto* managers = GetPlatformTransferManagers(); auto it = managers->find(platform->id()); diff --git a/tensorflow/compiler/xla/service/transfer_manager.h b/tensorflow/compiler/xla/service/transfer_manager.h index 057bdffe931..d7f85f5765e 100644 --- a/tensorflow/compiler/xla/service/transfer_manager.h +++ b/tensorflow/compiler/xla/service/transfer_manager.h @@ -158,11 +158,8 @@ class TransferManager { const perftools::gputools::Platform* platform); private: - // Routine that returns the mutex that guards the - // platform-to-transfer manager map. Done as a routine to - // ensure correct initialization ordering, since RegisterTransferManager - // can be called during program initialization time. - static tensorflow::mutex* platform_transfer_manager_mutex(); + // The mutex that guards the platform-to-transfer manager map. + static tensorflow::mutex platform_transfer_manager_mutex_; // State kept for each kind of TransferManager. Registration functions // set up creation_function, and then we use that to lazily create From fdff4048d4d0fdf7c12f927b92bb5e2fb812df12 Mon Sep 17 00:00:00 2001 From: Derek Murray Date: Wed, 15 Nov 2017 14:07:41 -0800 Subject: [PATCH 069/104] Add `WorkerService.DeleteWorkerSession` method to fix a memory leak. The new method is the counterpart to `WorkerService.CreateWorkerSession`, and is called in all cases where worker sessions have been explicitly created (i.e. when using ClusterSpec propagation). PiperOrigin-RevId: 175877407 --- .../distributed_runtime/master_session.cc | 60 +++++++++++++++++++ .../core/distributed_runtime/master_session.h | 4 ++ .../rpc/grpc_remote_worker.cc | 8 +++ .../rpc/grpc_worker_service.cc | 11 ++++ .../rpc/grpc_worker_service_impl.cc | 2 + .../rpc/grpc_worker_service_impl.h | 1 + tensorflow/core/distributed_runtime/worker.cc | 7 +++ tensorflow/core/distributed_runtime/worker.h | 4 ++ .../distributed_runtime/worker_interface.h | 9 +++ tensorflow/core/protobuf/worker.proto | 16 +++++ tensorflow/core/protobuf/worker_service.proto | 4 ++ 11 files changed, 126 insertions(+) diff --git a/tensorflow/core/distributed_runtime/master_session.cc b/tensorflow/core/distributed_runtime/master_session.cc index 5798ad09e81..91a1fa7d1e1 100644 --- a/tensorflow/core/distributed_runtime/master_session.cc +++ b/tensorflow/core/distributed_runtime/master_session.cc @@ -1044,6 +1044,7 @@ Status MasterSession::Create(GraphDef* graph_def, graph_def, execution_options, &execution_state_)); } if (options.cluster_def != nullptr) { + should_delete_worker_sessions_ = true; return CreateWorkerSessions(options); } return Status::OK(); @@ -1122,6 +1123,59 @@ Status MasterSession::CreateWorkerSessions( return status; } +Status MasterSession::DeleteWorkerSessions() { + WorkerCacheInterface* worker_cache = get_worker_cache(); + std::vector worker_names; + worker_cache->ListWorkers(&worker_names); + + struct WorkerGroup { + // The worker name. (Not owned.) + const string* name; + + // The worker referenced by name. (Not owned.) + WorkerInterface* worker = nullptr; + + // Request and responses used for a given worker. + DeleteWorkerSessionRequest request; + DeleteWorkerSessionResponse response; + Status status = Status::OK(); + }; + BlockingCounter done(worker_names.size()); + std::vector workers(worker_names.size()); + + // Release the workers. + auto cleanup = gtl::MakeCleanup([this, &workers, worker_cache] { + for (auto&& worker_group : workers) { + if (worker_group.worker != nullptr) { + worker_cache->ReleaseWorker(*worker_group.name, worker_group.worker); + } + } + }); + + Status status = Status::OK(); + // Create all the workers & kick off the computations. + for (size_t i = 0; i < worker_names.size(); ++i) { + workers[i].name = &worker_names[i]; + workers[i].worker = worker_cache_->CreateWorker(worker_names[i]); + workers[i].request.set_session_handle(handle_); + } + + for (size_t i = 0; i < worker_names.size(); ++i) { + auto cb = [i, &workers, &done](const Status& s) { + workers[i].status = s; + done.DecrementCount(); + }; + workers[i].worker->DeleteWorkerSessionAsync(&workers[i].request, + &workers[i].response, cb); + } + + done.Wait(); + for (size_t i = 0; i < workers.size(); ++i) { + status.Update(workers[i].status); + } + return status; +} + Status MasterSession::ListDevices(ListDevicesResponse* resp) const { if (worker_cache_) { // This is a ClusterSpec-propagated session, and thus env_->local_devices @@ -1604,6 +1658,12 @@ Status MasterSession::Close() { ClearRunsTable(&to_unref, &partial_run_graphs_); } for (ReffedClientGraph* rcg : to_unref) rcg->Unref(); + if (should_delete_worker_sessions_) { + Status s = DeleteWorkerSessions(); + if (!s.ok()) { + LOG(WARNING) << s; + } + } return Status::OK(); } diff --git a/tensorflow/core/distributed_runtime/master_session.h b/tensorflow/core/distributed_runtime/master_session.h index eb696eb06a9..4bd4e1367aa 100644 --- a/tensorflow/core/distributed_runtime/master_session.h +++ b/tensorflow/core/distributed_runtime/master_session.h @@ -201,6 +201,10 @@ class MasterSession : public core::RefCounted { // workers. Status CreateWorkerSessions(const WorkerCacheFactoryOptions& server_def); + // TODO(b/36574172): Always use Create/DeleteWorkerSession. + bool should_delete_worker_sessions_ = false; + Status DeleteWorkerSessions(); + Status StartStep(const BuildGraphOptions& opts, int64* count, ReffedClientGraph** graph, bool is_partial); void ClearRunsTable(std::vector* to_unref, diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc index 170c72deca7..b3b05408b15 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_remote_worker.cc @@ -47,6 +47,7 @@ class GrpcRemoteWorker : public WorkerInterface { cq_(completion_queue), getstatus_(Method(GrpcWorkerMethod::kGetStatus)), createworkersession_(Method(GrpcWorkerMethod::kCreateWorkerSession)), + deleteworkersession_(Method(GrpcWorkerMethod::kDeleteWorkerSession)), registergraph_(Method(GrpcWorkerMethod::kRegisterGraph)), deregistergraph_(Method(GrpcWorkerMethod::kDeregisterGraph)), rungraph_(Method(GrpcWorkerMethod::kRunGraph)), @@ -71,6 +72,12 @@ class GrpcRemoteWorker : public WorkerInterface { IssueRequest(request, response, createworkersession_, std::move(done)); } + void DeleteWorkerSessionAsync(const DeleteWorkerSessionRequest* request, + DeleteWorkerSessionResponse* response, + StatusCallback done) override { + IssueRequest(request, response, deleteworkersession_, std::move(done)); + } + void RegisterGraphAsync(const RegisterGraphRequest* request, RegisterGraphResponse* response, StatusCallback done) override { @@ -199,6 +206,7 @@ class GrpcRemoteWorker : public WorkerInterface { const ::grpc::string getstatus_; const ::grpc::string createworkersession_; + const ::grpc::string deleteworkersession_; const ::grpc::string registergraph_; const ::grpc::string deregistergraph_; const ::grpc::string rungraph_; diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc index 4ee5ae09017..eee93ec6572 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc @@ -114,6 +114,7 @@ class GrpcWorkerService : public AsyncServiceInterface { // types. ENQUEUE_REQUEST(GetStatus, false); ENQUEUE_REQUEST(CreateWorkerSession, false); + ENQUEUE_REQUEST(DeleteWorkerSession, false); ENQUEUE_REQUEST(CleanupAll, false); ENQUEUE_REQUEST(RegisterGraph, false); ENQUEUE_REQUEST(DeregisterGraph, false); @@ -192,6 +193,16 @@ class GrpcWorkerService : public AsyncServiceInterface { ENQUEUE_REQUEST(CreateWorkerSession, false); } + void DeleteWorkerSessionHandler( + WorkerCall* + call) { + Schedule([this, call]() { + Status s = worker_->DeleteWorkerSession(&call->request, &call->response); + call->SendResponse(ToGrpcStatus(s)); + }); + ENQUEUE_REQUEST(DeleteWorkerSession, false); + } + void CleanupAllHandler( WorkerCall* call) { Schedule([this, call]() { diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.cc index 348c6dc98bd..05a9db10d3c 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.cc +++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.cc @@ -32,6 +32,8 @@ const char* GrpcWorkerMethodName(GrpcWorkerMethod id) { return "/tensorflow.WorkerService/GetStatus"; case GrpcWorkerMethod::kCreateWorkerSession: return "/tensorflow.WorkerService/CreateWorkerSession"; + case GrpcWorkerMethod::kDeleteWorkerSession: + return "/tensorflow.WorkerService/DeleteWorkerSession"; case GrpcWorkerMethod::kRegisterGraph: return "/tensorflow.WorkerService/RegisterGraph"; case GrpcWorkerMethod::kDeregisterGraph: diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.h b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.h index e9862a61a3f..fb23f8631fd 100644 --- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.h +++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service_impl.h @@ -110,6 +110,7 @@ namespace tensorflow { enum class GrpcWorkerMethod { kGetStatus, kCreateWorkerSession, + kDeleteWorkerSession, kRegisterGraph, kDeregisterGraph, kRunGraph, diff --git a/tensorflow/core/distributed_runtime/worker.cc b/tensorflow/core/distributed_runtime/worker.cc index fcb18301970..8bf87923ed4 100644 --- a/tensorflow/core/distributed_runtime/worker.cc +++ b/tensorflow/core/distributed_runtime/worker.cc @@ -48,6 +48,13 @@ void Worker::CreateWorkerSessionAsync(const CreateWorkerSessionRequest* request, done(s); } +void Worker::DeleteWorkerSessionAsync(const DeleteWorkerSessionRequest* request, + DeleteWorkerSessionResponse* response, + StatusCallback done) { + Status s = env_->session_mgr->DeleteSession(request->session_handle()); + done(s); +} + void Worker::RegisterGraphAsync(const RegisterGraphRequest* request, RegisterGraphResponse* response, StatusCallback done) { diff --git a/tensorflow/core/distributed_runtime/worker.h b/tensorflow/core/distributed_runtime/worker.h index 07300338c38..c62347926fa 100644 --- a/tensorflow/core/distributed_runtime/worker.h +++ b/tensorflow/core/distributed_runtime/worker.h @@ -52,6 +52,10 @@ class Worker : public WorkerInterface { CreateWorkerSessionResponse* response, StatusCallback done) override; + void DeleteWorkerSessionAsync(const DeleteWorkerSessionRequest* request, + DeleteWorkerSessionResponse* response, + StatusCallback done) override; + void RegisterGraphAsync(const RegisterGraphRequest* request, RegisterGraphResponse* response, StatusCallback done) override; diff --git a/tensorflow/core/distributed_runtime/worker_interface.h b/tensorflow/core/distributed_runtime/worker_interface.h index c9db28ec67f..4c58bf41a46 100644 --- a/tensorflow/core/distributed_runtime/worker_interface.h +++ b/tensorflow/core/distributed_runtime/worker_interface.h @@ -44,6 +44,10 @@ class WorkerInterface { const CreateWorkerSessionRequest* request, CreateWorkerSessionResponse* response, StatusCallback done) = 0; + virtual void DeleteWorkerSessionAsync( + const DeleteWorkerSessionRequest* request, + DeleteWorkerSessionResponse* response, StatusCallback done) = 0; + virtual void RegisterGraphAsync(const RegisterGraphRequest* request, RegisterGraphResponse* response, StatusCallback done) = 0; @@ -118,6 +122,11 @@ class WorkerInterface { return CallAndWait(&ME::CreateWorkerSessionAsync, request, response); } + Status DeleteWorkerSession(const DeleteWorkerSessionRequest* request, + DeleteWorkerSessionResponse* response) { + return CallAndWait(&ME::DeleteWorkerSessionAsync, request, response); + } + Status RegisterGraph(const RegisterGraphRequest* request, RegisterGraphResponse* response) { return CallAndWait(&ME::RegisterGraphAsync, request, response); diff --git a/tensorflow/core/protobuf/worker.proto b/tensorflow/core/protobuf/worker.proto index 34a5cff3660..e7b3f36fcc7 100644 --- a/tensorflow/core/protobuf/worker.proto +++ b/tensorflow/core/protobuf/worker.proto @@ -64,6 +64,22 @@ message CreateWorkerSessionRequest { message CreateWorkerSessionResponse { } +//////////////////////////////////////////////////////////////////////////////// +// +// DeleteSession method request/response messages +// +// Deletes all worker-side state associated with the given session handle. +// +//////////////////////////////////////////////////////////////////////////////// + +message DeleteWorkerSessionRequest { + // Sessions are identified by a given handle. + string session_handle = 1; +} + +message DeleteWorkerSessionResponse { +} + //////////////////////////////////////////////////////////////////////////////// // // RegisterGraph method request/response messages diff --git a/tensorflow/core/protobuf/worker_service.proto b/tensorflow/core/protobuf/worker_service.proto index 3de9e48b78e..e1bfb04d7c5 100644 --- a/tensorflow/core/protobuf/worker_service.proto +++ b/tensorflow/core/protobuf/worker_service.proto @@ -43,6 +43,10 @@ service WorkerService { rpc CreateWorkerSession(CreateWorkerSessionRequest) returns (CreateWorkerSessionResponse); + // See worker.proto for details. + rpc DeleteWorkerSession(DeleteWorkerSessionRequest) + returns (DeleteWorkerSessionResponse); + // See worker.proto for details. rpc RegisterGraph(RegisterGraphRequest) returns (RegisterGraphResponse); From 2c36ab6cd733876cb2d25696d61a936939cf606b Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Wed, 15 Nov 2017 14:09:21 -0800 Subject: [PATCH 070/104] Fast-path zeros and ones for backprop PiperOrigin-RevId: 175877719 --- tensorflow/python/eager/backprop.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py index 33601a1edcc..a2a7f1c0227 100644 --- a/tensorflow/python/eager/backprop.py +++ b/tensorflow/python/eager/backprop.py @@ -733,20 +733,28 @@ _last_shape_dtype = [None, None] _last_zero = [None] +def _fast_fill(value, shape, dtype): + return array_ops.fill(shape, constant_op.constant(value, dtype=dtype)) + + def _zeros(shape, dtype): """Wraps array_ops.zeros to cache last zero for a given shape and dtype.""" if [shape, dtype] != _last_shape_dtype: _last_shape_dtype[:] = [shape, dtype] - _last_zero[0] = array_ops.zeros(shape, dtype) + _last_zero[0] = _fast_fill(0, shape, dtype) return _last_zero[0] +def _ones(shape, dtype): + return _fast_fill(1, shape, dtype) + + _default_vspace = imperative_grad.VSpace( num_elements_fn=_num_elements, aggregate_fn=_aggregate_grads, tensor_id=ops.tensor_id, zeros=_zeros, - ones=array_ops.ones) + ones=_ones) class GradientTape(object): From a6626a8271123d30c9c61ba25e2fa5294ff149e5 Mon Sep 17 00:00:00 2001 From: Allen Lavoie Date: Wed, 15 Nov 2017 14:16:39 -0800 Subject: [PATCH 071/104] Switch tfe.Network to zero-based naming to match Layer variable names. The plan is that Layer names will transition to match their variable names (currently Layer names are one-based, but their variable names are zero-based). Since we can't change variable names, it's better to match those. Layers added to Networks will follow this convention now. PiperOrigin-RevId: 175878910 --- tensorflow/contrib/eager/python/network.py | 6 +- .../contrib/eager/python/network_test.py | 257 ++++++++++-------- tensorflow/python/layers/base.py | 22 +- 3 files changed, 163 insertions(+), 122 deletions(-) diff --git a/tensorflow/contrib/eager/python/network.py b/tensorflow/contrib/eager/python/network.py index 713ab1ee573..f7303cb5b4b 100644 --- a/tensorflow/contrib/eager/python/network.py +++ b/tensorflow/contrib/eager/python/network.py @@ -108,7 +108,8 @@ class Network(base.Layer): for name in self._variable_scope_counts_on_init.keys() if name) self._name, self._base_name = self._make_unique_name( name_uid_map=name_uid_map, avoid_names=avoid_names, - namespace=self._default_parent_variable_scope.name) + namespace=self._default_parent_variable_scope.name, + zero_based=True) if self._first_parent is None or (self._first_parent # False = no parent and self._first_parent() is None): # Save a pointer to the parent Network so that we can later check that the @@ -258,7 +259,8 @@ class Network(base.Layer): # name, and we should respect it (subject to error checking). layer._name, layer._base_name = layer._make_unique_name( name_uid_map=self._sub_layer_name_uids, - avoid_names=self._owned_layers + avoid_names=self._owned_layers, + zero_based=True # No namespace required, since we've specified our own UID map. ) layer._first_parent = weakref.ref(self) diff --git a/tensorflow/contrib/eager/python/network_test.py b/tensorflow/contrib/eager/python/network_test.py index e66486d1655..555c6e048d2 100644 --- a/tensorflow/contrib/eager/python/network_test.py +++ b/tensorflow/contrib/eager/python/network_test.py @@ -126,6 +126,33 @@ class NetworkTest(test.TestCase): self.assertAllEqual(self.evaluate(net1.variables[0]), self.evaluate(net2.variables[0])) + @test_util.run_in_graph_and_eager_modes() + def testNetworkMatchesLayerVariableNames(self): + zero = constant_op.constant([[0.]]) + layer_one = core.Dense(1, use_bias=False) + layer_one(zero) + layer_two = core.Dense(1, use_bias=False) + layer_two(zero) + + class TwoLayerNet(network.Network): + + def __init__(self, name=None): + super(TwoLayerNet, self).__init__(name=name) + self.first = self.track_layer(core.Dense( + 1, use_bias=False)) + self.second = self.track_layer(core.Dense( + 1, use_bias=False)) + + def call(self, x): + return self.second(self.first(x)) + + net = TwoLayerNet() + net(zero) + self.assertEqual("two_layer_net/" + layer_one.variables[0].name, + net.first.variables[0].name) + self.assertEqual("two_layer_net/" + layer_two.variables[0].name, + net.second.variables[0].name) + @test_util.run_in_graph_and_eager_modes() def testLoadIntoUnbuiltSharedLayer(self): @@ -173,7 +200,7 @@ class NetworkTest(test.TestCase): # Re-map the variable names so that with default restore mapping we'll # attempt to restore into the unbuilt Layer. name_mapping = { - "checkpoint_creator/first_layer/kernel": "owner_1/first_layer/kernel", + "checkpoint_creator/first_layer/kernel": "owner/first_layer/kernel", "checkpoint_creator/second_layer/kernel": "second_layer/kernel", } save_path = network.save_network_checkpoint( @@ -197,10 +224,10 @@ class NetworkTest(test.TestCase): del first_owner gc.collect() def _restore_map_func(original_name): - if original_name.startswith("owner_1"): - return original_name.replace("owner_1", "owner_2") + if original_name.startswith("owner/"): + return original_name.replace("owner/", "owner_1/") else: - return "user_2/" + original_name + return "user_1/" + original_name with self.assertRaisesRegexp(ValueError, "garbage collected"): network.restore_network_checkpoint( load_into, save_path, map_func=_restore_map_func) @@ -281,7 +308,7 @@ class NetworkTest(test.TestCase): with self.assertRaisesRegexp( ValueError, "The map_func passed to save_network_checkpoint for the Network " - "'parent_1' resulted in two variables named 'foo'"): + "'parent' resulted in two variables named 'foo'"): network.save_network_checkpoint( make_checkpoint, self.get_temp_dir(), map_func=lambda n: "foo") checkpoint = network.save_network_checkpoint( @@ -294,14 +321,14 @@ class NetworkTest(test.TestCase): with self.assertRaisesRegexp( ValueError, ("The map_func passed to restore_network_checkpoint for the Network" - " 'parent_2' resulted in two variables named 'foo'")): + " 'parent_1' resulted in two variables named 'foo'")): loader(one) loader = Parent() loader(one) with self.assertRaisesRegexp( ValueError, ("The map_func passed to restore_network_checkpoint for the Network" - " 'parent_3' resulted in two variables named 'foo'")): + " 'parent_2' resulted in two variables named 'foo'")): network.restore_network_checkpoint( loader, checkpoint, map_func=lambda n: "foo") @@ -309,7 +336,7 @@ class NetworkTest(test.TestCase): def testDefaultMapCollisionErrors(self): one = constant_op.constant([[1.]]) - first = core.Dense(1, name="dense_1", use_bias=False) + first = core.Dense(1, name="dense", use_bias=False) first(one) class Parent(network.Network): @@ -330,7 +357,7 @@ class NetworkTest(test.TestCase): with self.assertRaisesRegexp( ValueError, ("The default checkpoint variable name mapping strategy for Network " - "'parent_1' resulted in a naming conflict.")): + "'parent' resulted in a naming conflict.")): network.save_network_checkpoint(make_checkpoint, self.get_temp_dir()) class Compatible(network.Network): @@ -352,7 +379,7 @@ class NetworkTest(test.TestCase): with self.assertRaisesRegexp( ValueError, ("The default checkpoint variable name mapping strategy for Network " - "'parent_2' resulted in a naming conflict.")): + "'parent_1' resulted in a naming conflict.")): network.restore_network_checkpoint(load_checkpoint, checkpoint_path) def testNoReferenceCyclesAfterCall(self): @@ -423,25 +450,25 @@ class NetworkTest(test.TestCase): # Naming happens in the order of first build rather than the order of # construction, but for clarity they're the same here and construction is # annotated. - outside_net_before = MyNetwork() # name=my_network_1 + outside_net_before = MyNetwork() # name=my_network outside_net_before(one) captured_scope = variable_scope.get_variable_scope() with variable_scope.variable_scope("outside_scope"): - net1 = MyNetwork() # name=outside_scope/my_network_1 + net1 = MyNetwork() # name=outside_scope/my_network net1(one) name_conflict1 = MyNetwork(name="name_conflict") # fine, unique so far name_conflict2 = MyNetwork(name="name_conflict") # error on build with variable_scope.variable_scope("inside_scope"): # No issue here since the name is unique within its scope. name_conflict3 = MyNetwork(name="name_conflict") - net2 = MyNetwork() # name=outside_scope/my_network_3 to avoid the - # variable_scope my_network_2 below. + net2 = MyNetwork() # name=outside_scope/my_network_2 to avoid the + # variable_scope my_network_1 below. vs_name_conflict = MyNetwork(name="vs_name_conflict") # conflict below with variable_scope.variable_scope("intervening_scope"): with variable_scope.variable_scope(captured_scope): with variable_scope.variable_scope("outside_scope"): name_conflict4 = MyNetwork(name="name_conflict") # error on build - with variable_scope.variable_scope("my_network_2"): + with variable_scope.variable_scope("my_network_1"): pass with variable_scope.variable_scope("vs_name_conflict"): pass @@ -461,35 +488,35 @@ class NetworkTest(test.TestCase): self.assertEqual("outside_scope/name_conflict", name_conflict1.name) self.assertStartsWith( - expected_start="outside_scope/name_conflict/dense_1/", + expected_start="outside_scope/name_conflict/dense/", actual=name_conflict1.variables[0].name) self.assertEqual("outside_scope/inside_scope/name_conflict", name_conflict3.name) self.assertStartsWith( - expected_start="outside_scope/inside_scope/name_conflict/dense_1/", + expected_start="outside_scope/inside_scope/name_conflict/dense/", actual=name_conflict3.variables[0].name) - self.assertEqual("outside_scope/my_network_1", net1.name) + self.assertEqual("outside_scope/my_network", net1.name) self.assertStartsWith( - expected_start="outside_scope/my_network_1/dense_1/", + expected_start="outside_scope/my_network/dense/", actual=net1.trainable_weights[0].name) - self.assertEqual("outside_scope/my_network_3", net2.name) + self.assertEqual("outside_scope/my_network_2", net2.name) self.assertStartsWith( - expected_start="outside_scope/my_network_3/dense_1/", + expected_start="outside_scope/my_network_2/dense/", actual=net2.trainable_weights[0].name) net3(one) - self.assertEqual("outside_scope/my_network_4", net3.name) + self.assertEqual("outside_scope/my_network_3", net3.name) self.assertStartsWith( - expected_start="outside_scope/my_network_4/dense_1/", + expected_start="outside_scope/my_network_3/dense/", actual=net3.trainable_weights[0].name) outside_net_after = MyNetwork() outside_net_after(one) - self.assertEqual("my_network_1", outside_net_before.name) + self.assertEqual("my_network", outside_net_before.name) self.assertStartsWith( - expected_start="my_network_1/dense_1/", + expected_start="my_network/dense/", actual=outside_net_before.trainable_weights[0].name) - self.assertEqual("my_network_2", outside_net_after.name) + self.assertEqual("my_network_1", outside_net_after.name) self.assertStartsWith( - expected_start="my_network_2/dense_1/", + expected_start="my_network_1/dense/", actual=outside_net_after.trainable_weights[0].name) @test_util.run_in_graph_and_eager_modes() @@ -499,12 +526,12 @@ class NetworkTest(test.TestCase): net = MyNetwork() net(constant_op.constant([[2.0]])) self.evaluate(net.variables[0].assign([[42.]])) - self.assertEqual(net.name, "scope1/scope2/my_network_1") + self.assertEqual(net.name, "scope1/scope2/my_network") self.assertStartsWith( - expected_start="scope1/scope2/my_network_1/dense_1/", + expected_start="scope1/scope2/my_network/dense/", actual=net.trainable_weights[0].name) save_path = network.save_network_checkpoint(net, self.get_temp_dir()) - self.assertIn("scope1_scope2_my_network_1", save_path) + self.assertIn("scope1_scope2_my_network", save_path) restore_net = MyNetwork() # Delayed restoration network.restore_network_checkpoint(restore_net, save_path) @@ -532,7 +559,7 @@ class NetworkTest(test.TestCase): one = constant_op.constant([[1.]]) net = ParentNetwork() net(one) - self.assertStartsWith(expected_start="parent_network_1/explicit_name/", + self.assertStartsWith(expected_start="parent_network/explicit_name/", actual=net.trainable_weights[0].name) self.assertEqual("explicit_name", net.first.name) @@ -587,15 +614,15 @@ class NetworkTest(test.TestCase): # locally so that previous Layer consutrciton does not interfere with # variable naming (e.g. add a Layer construction before the Network, # suddenly your previously saved checkpoint is incompatible). - self.assertEqual("dense_1", net1.l1.name) - self.assertEqual("dense_1", net2.l1.name) + self.assertEqual("dense", net1.l1.name) + self.assertEqual("dense", net2.l1.name) self.evaluate(net1.trainable_weights[0].assign([[1.]])) self.evaluate(net2.trainable_weights[0].assign([[2.]])) self.assertEqual(2., self.evaluate(net2.trainable_weights[0])) self.assertEqual(1., self.evaluate(net1.trainable_weights[0])) - self.assertStartsWith(expected_start="my_network_1/dense_1/", + self.assertStartsWith(expected_start="my_network/dense/", actual=net1.trainable_weights[0].name) - self.assertStartsWith(expected_start="my_network_2/dense_1/", + self.assertStartsWith(expected_start="my_network_1/dense/", actual=net2.trainable_weights[0].name) @test_util.run_in_graph_and_eager_modes() @@ -616,31 +643,31 @@ class NetworkTest(test.TestCase): one = constant_op.constant([[1.]]) net = ParentNetwork() net(one) - self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense", + self.assertStartsWith(expected_start="parent_network/my_network/dense", actual=net.trainable_weights[0].name) - self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense", + self.assertStartsWith(expected_start="parent_network/my_network/dense", actual=net.first.trainable_weights[0].name) - self.assertStartsWith(expected_start="parent_network_1/my_network_2/dense", + self.assertStartsWith(expected_start="parent_network/my_network_1/dense", actual=net.trainable_weights[1].name) - self.assertStartsWith(expected_start="parent_network_1/my_network_2/dense", + self.assertStartsWith(expected_start="parent_network/my_network_1/dense", actual=net.second.trainable_weights[0].name) - self.assertEqual("parent_network_1", net.name) - self.assertEqual("my_network_1", net.first.name) - self.assertEqual("my_network_2", net.second.name) + self.assertEqual("parent_network", net.name) + self.assertEqual("my_network", net.first.name) + self.assertEqual("my_network_1", net.second.name) net2 = ParentNetwork() net2(one) - self.assertStartsWith(expected_start="parent_network_2/my_network_1/dense", + self.assertStartsWith(expected_start="parent_network_1/my_network/dense", actual=net2.trainable_weights[0].name) - self.assertStartsWith(expected_start="parent_network_2/my_network_1/dense", + self.assertStartsWith(expected_start="parent_network_1/my_network/dense", actual=net2.first.trainable_weights[0].name) - self.assertStartsWith(expected_start="parent_network_2/my_network_2/dense", + self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense", actual=net2.trainable_weights[1].name) - self.assertStartsWith(expected_start="parent_network_2/my_network_2/dense", + self.assertStartsWith(expected_start="parent_network_1/my_network_1/dense", actual=net2.second.trainable_weights[0].name) - self.assertEqual("parent_network_2", net2.name) - self.assertEqual("my_network_1", net2.first.name) - self.assertEqual("my_network_2", net2.second.name) + self.assertEqual("parent_network_1", net2.name) + self.assertEqual("my_network", net2.first.name) + self.assertEqual("my_network_1", net2.second.name) @test_util.run_in_graph_and_eager_modes() def testNestableExplicit(self): @@ -701,26 +728,26 @@ class NetworkTest(test.TestCase): one = constant_op.constant([[1.]]) net = MixedLayerNetwork() net(one) - self.assertEqual("dense_1", net.first.name) - self.assertEqual("dense_2", net.second.name) - self.assertEqual("dense_3", net.third.name) - self.assertEqual("dense_4", net.fourth.name) - self.assertEqual("dense_5", net.fifth.name) + self.assertEqual("dense", net.first.name) + self.assertEqual("dense_1", net.second.name) + self.assertEqual("dense_2", net.third.name) + self.assertEqual("dense_3", net.fourth.name) + self.assertEqual("dense_4", net.fifth.name) # Note that this is _not_ the default naming behavior for Layers. Layers # which are added to Networks follow Network variable naming conventions # (i.e. variable names = network name unless variable sharing). Nested # Layers revert to Layer behavior. - self.assertStartsWith(expected_start="mixed_layer_network_1/dense_1/", + self.assertStartsWith(expected_start="mixed_layer_network/dense/", actual=net.trainable_weights[0].name) - self.assertStartsWith(expected_start="mixed_layer_network_1/dense_2/", + self.assertStartsWith(expected_start="mixed_layer_network/dense_1/", actual=net.trainable_weights[1].name) - self.assertStartsWith(expected_start="mixed_layer_network_1/dense_3/", + self.assertStartsWith(expected_start="mixed_layer_network/dense_2/", actual=net.trainable_weights[2].name) - self.assertStartsWith(expected_start="mixed_layer_network_1/dense_4/", + self.assertStartsWith(expected_start="mixed_layer_network/dense_3/", actual=net.trainable_weights[3].name) - self.assertStartsWith(expected_start="mixed_layer_network_1/dense_5/", + self.assertStartsWith(expected_start="mixed_layer_network/dense_4/", actual=net.trainable_weights[4].name) - self.assertEqual("mixed_layer_network_1", net.name) + self.assertEqual("mixed_layer_network", net.name) @test_util.run_in_graph_and_eager_modes() def testNestableExplicitCollisions(self): @@ -773,24 +800,24 @@ class NetworkTest(test.TestCase): net = ParentNetwork() net(one) self.assertStartsWith( - expected_start="parent_network_1/first_unique_child_name/dense_1/", + expected_start="parent_network/first_unique_child_name/dense/", actual=net.trainable_weights[0].name) self.assertStartsWith( - expected_start="parent_network_1/second_unique_child_name/dense_1/", + expected_start="parent_network/second_unique_child_name/dense/", actual=net.trainable_weights[1].name) - self.assertEqual("parent_network_1", net.name) + self.assertEqual("parent_network", net.name) self.assertEqual("first_unique_child_name", net.first.name) self.assertEqual("second_unique_child_name", net.second.name) net2 = ParentNetwork() net2(one) self.assertStartsWith( - expected_start="parent_network_2/first_unique_child_name/dense", + expected_start="parent_network_1/first_unique_child_name/dense", actual=net2.trainable_weights[0].name) self.assertStartsWith( - expected_start="parent_network_2/second_unique_child_name/dense", + expected_start="parent_network_1/second_unique_child_name/dense", actual=net2.trainable_weights[1].name) - self.assertEqual("parent_network_2", net2.name) + self.assertEqual("parent_network_1", net2.name) self.assertEqual("first_unique_child_name", net2.first.name) self.assertEqual("second_unique_child_name", net2.second.name) @@ -848,15 +875,15 @@ class NetworkTest(test.TestCase): net2(one) self.assertStartsWith( - expected_start="first_parent_network_1/my_network_1/dense_1/", + expected_start="first_parent_network/my_network/dense/", actual=net2.trainable_weights[0].name) self.assertStartsWith( - expected_start="second_parent_network_1/my_network_1/dense_1/", + expected_start="second_parent_network/my_network/dense/", actual=net2.trainable_weights[1].name) - self.assertEqual("second_parent_network_1", net2.name) + self.assertEqual("second_parent_network", net2.name) self.assertTrue(net2.first is net.first) - self.assertEqual("my_network_1", net2.first.name) - self.assertEqual("my_network_1", net2.second.name) + self.assertEqual("my_network", net2.first.name) + self.assertEqual("my_network", net2.second.name) # No name collision; the owned Network is added first and has a different # name than the shared Network. @@ -874,15 +901,15 @@ class NetworkTest(test.TestCase): net3(one) self.assertStartsWith( - expected_start="third_parent_network_1/my_network_1/dense", + expected_start="third_parent_network/my_network/dense", actual=net3.trainable_weights[0].name) self.assertStartsWith( - expected_start="first_parent_network_1/my_network_2/dense", + expected_start="first_parent_network/my_network_1/dense", actual=net3.trainable_weights[1].name) - self.assertEqual("third_parent_network_1", net3.name) + self.assertEqual("third_parent_network", net3.name) self.assertTrue(net3.second is net.second) - self.assertEqual("my_network_1", net3.first.name) - self.assertEqual("my_network_2", net3.second.name) + self.assertEqual("my_network", net3.first.name) + self.assertEqual("my_network_1", net3.second.name) # "Unavoidable" same-name Layer. The owned name is added first (fixed), then # a shared Network is added with the same name. @@ -900,15 +927,15 @@ class NetworkTest(test.TestCase): net4(one) self.assertStartsWith( - expected_start="fourth_parent_network_1/my_network_1/dense_1/", + expected_start="fourth_parent_network/my_network/dense/", actual=net4.trainable_weights[0].name) self.assertStartsWith( - expected_start="first_parent_network_1/my_network_1/dense_1/", + expected_start="first_parent_network/my_network/dense/", actual=net4.trainable_weights[1].name) - self.assertEqual("fourth_parent_network_1", net4.name) + self.assertEqual("fourth_parent_network", net4.name) self.assertTrue(net4.second is net.first) - self.assertEqual("my_network_1", net4.first.name) - self.assertEqual("my_network_1", net4.second.name) + self.assertEqual("my_network", net4.first.name) + self.assertEqual("my_network", net4.second.name) @test_util.run_in_graph_and_eager_modes() def testRecursiveLayerRenaming(self): @@ -939,28 +966,28 @@ class NetworkTest(test.TestCase): net(one) self.assertStartsWith( - expected_start=("parent_network_1/network_with_layer_children_1/" - "dense_1/"), + expected_start=("parent_network/network_with_layer_children/" + "dense/"), actual=net.trainable_weights[0].name) self.assertStartsWith( - expected_start=("parent_network_1/network_with_layer_children_1/" - "dense_2/"), + expected_start=("parent_network/network_with_layer_children/" + "dense_1/"), actual=net.trainable_weights[1].name) self.assertStartsWith( - expected_start=("parent_network_1/network_with_layer_children_2/" - "dense_1/"), + expected_start=("parent_network/network_with_layer_children_1/" + "dense/"), actual=net.trainable_weights[2].name) self.assertStartsWith( - expected_start=("parent_network_1/network_with_layer_children_2/" - "dense_2/"), + expected_start=("parent_network/network_with_layer_children_1/" + "dense_1/"), actual=net.trainable_weights[3].name) - self.assertEqual("parent_network_1", net.name) - self.assertEqual("network_with_layer_children_1", net.first.name) - self.assertEqual("network_with_layer_children_2", net.second.name) - self.assertEqual("dense_1", net.first.first.name) - self.assertEqual("dense_2", net.first.second.name) - self.assertEqual("dense_1", net.second.first.name) - self.assertEqual("dense_2", net.second.second.name) + self.assertEqual("parent_network", net.name) + self.assertEqual("network_with_layer_children", net.first.name) + self.assertEqual("network_with_layer_children_1", net.second.name) + self.assertEqual("dense", net.first.first.name) + self.assertEqual("dense_1", net.first.second.name) + self.assertEqual("dense", net.second.first.name) + self.assertEqual("dense_1", net.second.second.name) @test_util.run_in_graph_and_eager_modes() def testCallInDifferentOrderThanConstruct(self): @@ -994,23 +1021,23 @@ class NetworkTest(test.TestCase): net1(one) self.assertStartsWith( - expected_start="first_network_1/my_network_1/dense_1/", + expected_start="first_network/my_network/dense/", actual=net1.trainable_weights[0].name) self.assertStartsWith( - expected_start="first_network_1/my_network_2/dense_1/", + expected_start="first_network/my_network_1/dense/", actual=net1.trainable_weights[1].name) self.assertStartsWith( - expected_start="first_network_1/my_network_1/dense_1/", + expected_start="first_network/my_network/dense/", actual=net2.trainable_weights[0].name) self.assertStartsWith( - expected_start="second_network_1/my_network_1/dense_1/", + expected_start="second_network/my_network/dense/", actual=net2.trainable_weights[1].name) self.assertTrue(net1.trainable_weights[0] is net2.trainable_weights[0]) - self.assertEqual("first_network_1", net1.name) - self.assertEqual("my_network_1", net1.first.name) - self.assertEqual("my_network_2", net1.second.name) + self.assertEqual("first_network", net1.name) + self.assertEqual("my_network", net1.first.name) + self.assertEqual("my_network_1", net1.second.name) self.assertTrue(net2.first is net1.first) - self.assertEqual("my_network_1", net2.second.name) + self.assertEqual("my_network", net2.second.name) @test_util.run_in_graph_and_eager_modes() def testLayerCallInDifferentOrderThanConstruct(self): @@ -1047,23 +1074,23 @@ class NetworkTest(test.TestCase): net1(one) self.assertStartsWith( - expected_start="first_network_1/dense_1/", + expected_start="first_network/dense/", actual=net1.trainable_weights[0].name) self.assertStartsWith( - expected_start="first_network_1/dense_2/", + expected_start="first_network/dense_1/", actual=net1.trainable_weights[1].name) self.assertStartsWith( - expected_start="first_network_1/dense_1/", + expected_start="first_network/dense/", actual=net2.trainable_weights[0].name) self.assertStartsWith( - expected_start="second_network_1/dense_1/", + expected_start="second_network/dense/", actual=net2.trainable_weights[1].name) self.assertTrue(net1.trainable_weights[0] is net2.trainable_weights[0]) - self.assertEqual("first_network_1", net1.name) - self.assertEqual("dense_1", net1.first.name) - self.assertEqual("dense_2", net1.second.name) + self.assertEqual("first_network", net1.name) + self.assertEqual("dense", net1.first.name) + self.assertEqual("dense_1", net1.second.name) self.assertTrue(net2.first is net1.first) - self.assertEqual("dense_1", net2.second.name) + self.assertEqual("dense", net2.second.name) @test_util.run_in_graph_and_eager_modes() def testLayerAlreadyBuilt(self): @@ -1092,13 +1119,13 @@ class NetworkTest(test.TestCase): # do not match their layer names. actual=net.trainable_weights[0].name) self.assertStartsWith( - expected_start="first_network_1/dense_1/", + expected_start="first_network/dense/", actual=net.trainable_weights[1].name) self.assertTrue( net.trainable_weights[0] is shared_layer.trainable_weights[0]) - self.assertEqual("first_network_1", net.name) + self.assertEqual("first_network", net.name) self.assertEqual("dense_3", net.first.name) - self.assertEqual("dense_1", net.second.name) + self.assertEqual("dense", net.second.name) class SequentialTest(test.TestCase): diff --git a/tensorflow/python/layers/base.py b/tensorflow/python/layers/base.py index c71e8382e91..55da959a492 100644 --- a/tensorflow/python/layers/base.py +++ b/tensorflow/python/layers/base.py @@ -402,10 +402,11 @@ class Layer(object): return input_shape def _make_unique_name(self, name_uid_map=None, avoid_names=None, - namespace=''): + namespace='', zero_based=False): base_name = _to_snake_case(self.__class__.__name__) name = _unique_layer_name(base_name, name_uid_map=name_uid_map, - avoid_names=avoid_names, namespace=namespace) + avoid_names=avoid_names, namespace=namespace, + zero_based=zero_based) return (name, base_name) def _set_scope(self, scope=None): @@ -2371,7 +2372,8 @@ def _get_default_graph_uid_map(): return name_uid_map -def _unique_layer_name(name, name_uid_map=None, avoid_names=None, namespace=''): +def _unique_layer_name(name, name_uid_map=None, avoid_names=None, namespace='', + zero_based=False): """Makes a layer name (or arbitrary string) unique within a TensorFlow graph. Arguments: @@ -2383,6 +2385,8 @@ def _unique_layer_name(name, name_uid_map=None, avoid_names=None, namespace=''): namespace: Gets a name which is unique within the (graph, namespace). Layers which are not Networks use a blank namespace and so get graph-global names. + zero_based: If True, name sequences start with no suffix (e.g. "dense", + "dense_1"). If False, naming is one-based ("dense_1", "dense_2"). Returns: Unique string name. @@ -2401,6 +2405,14 @@ def _unique_layer_name(name, name_uid_map=None, avoid_names=None, namespace=''): proposed_name = None while proposed_name is None or proposed_name in avoid_names: name_key = (namespace, name) - name_uid_map[name_key] += 1 - proposed_name = name + '_' + str(name_uid_map[name_key]) + if zero_based: + number = name_uid_map[name_key] + if number: + proposed_name = name + '_' + str(number) + else: + proposed_name = name + name_uid_map[name_key] += 1 + else: + name_uid_map[name_key] += 1 + proposed_name = name + '_' + str(name_uid_map[name_key]) return proposed_name From 9642c81ef1e25094b6e775204f5392d6cf2eb32b Mon Sep 17 00:00:00 2001 From: Eli Bendersky Date: Wed, 15 Nov 2017 14:22:42 -0800 Subject: [PATCH 072/104] Make per-platform test disabling mechanism more flexible by allowing regular expressions. PiperOrigin-RevId: 175879989 --- tensorflow/compiler/xla/tests/build_defs.bzl | 1 + tensorflow/compiler/xla/tests/test_macros.cc | 14 +++++++------- tensorflow/compiler/xla/tests/test_macros.h | 6 ++++-- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/tensorflow/compiler/xla/tests/build_defs.bzl b/tensorflow/compiler/xla/tests/build_defs.bzl index 36d10fff540..f594c609db6 100644 --- a/tensorflow/compiler/xla/tests/build_defs.bzl +++ b/tensorflow/compiler/xla/tests/build_defs.bzl @@ -248,5 +248,6 @@ def generate_backend_test_macros(backends=[]): deps = [ "//tensorflow/compiler/xla:types", "//tensorflow/core:lib", + "//tensorflow/core:regexp_internal", "//tensorflow/core:test", ]) diff --git a/tensorflow/compiler/xla/tests/test_macros.cc b/tensorflow/compiler/xla/tests/test_macros.cc index 173fb1b0008..978a669bcab 100644 --- a/tensorflow/compiler/xla/tests/test_macros.cc +++ b/tensorflow/compiler/xla/tests/test_macros.cc @@ -21,12 +21,13 @@ limitations under the License. #include #include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/regexp.h" namespace xla { namespace { // Mapping from test name; i.e. MyTest.MyTestCase to platforms on which it is -// disabled. +// disabled - a sequence of regexps. using ManifestT = std::unordered_map>; ManifestT ReadManifest() { @@ -66,9 +67,6 @@ ManifestT ReadManifest() { string PrependDisabledIfIndicated(const string& test_case_name, const string& test_name) { - // TODO(leary): this code reads the manifest for every test case instantiated - // in every file. Consider switching to a singleton or using a compile-time - // genrule instead. ManifestT manifest = ReadManifest(); // First try full match: test_case_name.test_name @@ -83,11 +81,13 @@ string PrependDisabledIfIndicated(const string& test_case_name, } } + // Expect a full match vs. one of the platform regexps to disable the test. const std::vector& disabled_platforms = it->second; string platform_string = XLA_PLATFORM; - if (std::find(disabled_platforms.begin(), disabled_platforms.end(), - platform_string) != disabled_platforms.end()) { - return "DISABLED_" + test_name; + for (const auto& s : disabled_platforms) { + if (RE2::FullMatch(/*text=*/platform_string, /*re=*/s)) { + return "DISABLED_" + test_name; + } } // We didn't hit in the disabled manifest entries, so don't disable it. diff --git a/tensorflow/compiler/xla/tests/test_macros.h b/tensorflow/compiler/xla/tests/test_macros.h index bea0b5ef92a..28a2d0198a7 100644 --- a/tensorflow/compiler/xla/tests/test_macros.h +++ b/tensorflow/compiler/xla/tests/test_macros.h @@ -66,8 +66,10 @@ limitations under the License. namespace xla { -// Reads a disabled manifest file (and retains it as a singleton) to resolve -// whether test cases should be disabled on a particular platform. +// Reads a disabled manifest file to resolve whether test cases should be +// disabled on a particular platform. For a test that should be disabled, +// returns DISABLED_ prepended to its name; otherwise returns the test name +// unmodified. string PrependDisabledIfIndicated(const string& test_case_name, const string& test_name); From bf3b0a8c541bf3d1f7ccbd98375ecc3b92d1537f Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 14:24:28 -0800 Subject: [PATCH 073/104] Use optimized reverse_row funciton to flip uint8 images. In the imagenet preprocessing pipeline, we can always flip the image first then resize it. It's cheaper to flip a small uint8 image. PiperOrigin-RevId: 175880318 --- tensorflow/core/kernels/reverse_op.cc | 66 +++- tensorflow/core/kernels/reverse_op_test.cc | 332 ++++++++++++++------- 2 files changed, 286 insertions(+), 112 deletions(-) diff --git a/tensorflow/core/kernels/reverse_op.cc b/tensorflow/core/kernels/reverse_op.cc index 4f2afa52579..7ac34d1c623 100644 --- a/tensorflow/core/kernels/reverse_op.cc +++ b/tensorflow/core/kernels/reverse_op.cc @@ -23,6 +23,7 @@ limitations under the License. #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/type_traits.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/bounds_check.h" #include "tensorflow/core/lib/core/status.h" @@ -35,7 +36,7 @@ typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; #ifdef TENSORFLOW_USE_SYCL typedef Eigen::SyclDevice SYCLDevice; -#endif // TENSORFLOW_USE_SYCL +#endif // TENSORFLOW_USE_SYCL namespace { @@ -43,7 +44,7 @@ namespace { // NUM_CHANNELS can be <= 0 to compute it dynamically from // Otherwise, it must equal input.dim_size(2) and is used as a compile-time // constant. -template +template void ReverseRows(OpKernelContext* context, const Tensor& input, Tensor* result) { auto work = [&input, result](int64 start, int64 end) { @@ -53,8 +54,8 @@ void ReverseRows(OpKernelContext* context, const Tensor& input, const int64 row_size = inner_size * middle_size; DCHECK_EQ(input.dim_size(2), inner_size); - const int32* in_ptr = input.bit_casted_tensor().data(); - int32* out_ptr = result->bit_casted_tensor().data(); + const T* in_ptr = input.bit_casted_tensor().data(); + T* out_ptr = result->bit_casted_tensor().data(); in_ptr += start * row_size; out_ptr += start * row_size; @@ -64,7 +65,7 @@ void ReverseRows(OpKernelContext* context, const Tensor& input, int remaining = middle_size; while (remaining > 0) { out_ptr -= inner_size; - memcpy(out_ptr, in_ptr, inner_size * sizeof(float)); + memcpy(out_ptr, in_ptr, inner_size * sizeof(T)); in_ptr += inner_size; --remaining; } @@ -81,6 +82,48 @@ void ReverseRows(OpKernelContext* context, const Tensor& input, std::move(work)); } +template +struct data_type_can_memcpy { + static constexpr bool value = + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value || + std::is_same::value || std::is_same::value; +}; + +template +typename std::enable_if::value>::type +DoHandleReverseCase(OpKernelContext* context, const Tensor& input, + Tensor* result) { + if (sizeof(T) == 1) { + static_assert(sizeof(uint8) == 1, "uint8 must be 1 byte."); + ReverseRows(context, input, result); + } else if (sizeof(T) == 2) { + static_assert(sizeof(uint16) == 2, "uint16 must be 2 bytes"); + ReverseRows(context, input, result); + } else if (sizeof(T) == 4) { + static_assert(sizeof(uint32) == 4, "uint32 must be 4 bytes"); + ReverseRows(context, input, result); + } else if (sizeof(T) == 8) { + static_assert(sizeof(uint64) == 8, "uint64 must be 8 bytes"); + ReverseRows(context, input, result); + } else if (sizeof(T) == 16) { + static_assert(sizeof(complex128) == 16, "complex128 must be 16 bytes"); + ReverseRows(context, input, result); + } else { + context->CtxFailure( + errors::InvalidArgument("%s has unexpected size of %d bytes", + DataTypeString(input.dtype()), sizeof(T))); + } +} + +template +typename std::enable_if::value>::type +DoHandleReverseCase(OpKernelContext* context, const Tensor& input, + Tensor* result) {} + } // namespace template @@ -91,15 +134,14 @@ void HandleReverseCase(OpKernelContext* context, // Use optimized reverse if possible. if (NDIMS == 3 && std::is_same::value && - std::is_same::value && (!dims(0) && dims(1) && !dims(2))) { + data_type_can_memcpy::value && (!dims(0) && dims(1) && !dims(2))) { if (input.dim_size(2) == 3) { - ReverseRows<3>(context, input, result); + DoHandleReverseCase(context, input, result); } else { - ReverseRows<-1>(context, input, result); + DoHandleReverseCase(context, input, result); } return; } - typename Eigen::array axes_di; for (int i = 0; i < NDIMS; i++) { axes_di[i] = dims(i); @@ -168,11 +210,11 @@ void HandleReverseV2Case(OpKernelContext* context, // Use optimized reverse if possible. if (NDIMS == 3 && std::is_same::value && - std::is_same::value && (!axes[0] && axes[1] && !axes[2])) { + data_type_can_memcpy::value && (!axes[0] && axes[1] && !axes[2])) { if (input.dim_size(2) == 3) { - ReverseRows<3>(context, input, result); + DoHandleReverseCase(context, input, result); } else { - ReverseRows<-1>(context, input, result); + DoHandleReverseCase(context, input, result); } return; } diff --git a/tensorflow/core/kernels/reverse_op_test.cc b/tensorflow/core/kernels/reverse_op_test.cc index 9829e40fe85..e8285fb0e24 100644 --- a/tensorflow/core/kernels/reverse_op_test.cc +++ b/tensorflow/core/kernels/reverse_op_test.cc @@ -46,69 +46,132 @@ class ReverseOpTest : public OpsTestBase { .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } + + template + void Reverse_0() { + MakeOp(DataTypeToEnum::value); + AddInputFromArray(TensorShape({}), {3}); + AddInputFromArray(TensorShape({}), {true}); + TF_ASSERT_OK(RunOpKernel()); + + Tensor* output = GetOutput(0); + Tensor expected(allocator(), DataTypeToEnum::value, TensorShape({})); + expected.scalar() = expected.scalar().constant(3); + test::ExpectTensorEqual(expected, *output); + } + + template + void Reverse_234() { + MakeOp(DataTypeToEnum::value); + // Feed and run + // [[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + // [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]] + AddInputFromArray(TensorShape({2, 3, 4}), + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); + AddInputFromArray(TensorShape({3}), {true, false, true}); + + TF_ASSERT_OK(RunOpKernel()); + + // Check the new state of the input + Tensor* params_tensor = GetOutput(0); + Tensor expected(allocator(), DataTypeToEnum::value, + TensorShape({2, 3, 4})); + // Should become + // [[[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]] + // [[3, 2, 1, 0], [7, 6, 5, 4], [11, 10, 9, 8]]] + test::FillValues(&expected, + {15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20, + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8}); + test::ExpectTensorEqual(expected, *params_tensor); + } + + template + void Reverse_1234() { + MakeOp(DataTypeToEnum::value); + // Feed and run + // [[[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + // [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]] + AddInputFromArray(TensorShape({1, 2, 3, 4}), + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); + AddInputFromArray(TensorShape({4}), {true, true, false, true}); + + TF_ASSERT_OK(RunOpKernel()); + + // Check the new state of the input + Tensor* params_tensor = GetOutput(0); + Tensor expected(allocator(), DataTypeToEnum::value, + TensorShape({1, 2, 3, 4})); + // Should become + // [[[[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]] + // [[3, 2, 1, 0], [7, 6, 5, 4], [11, 10, 9, 8]]]] + test::FillValues(&expected, + {15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20, + 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8}); + test::ExpectTensorEqual(expected, *params_tensor); + } }; -TEST_F(ReverseOpTest, Reverse_0) { - MakeOp(DT_FLOAT); - AddInputFromArray(TensorShape({}), {3}); - AddInputFromArray(TensorShape({}), {true}); - TF_ASSERT_OK(RunOpKernel()); +TEST_F(ReverseOpTest, Reverse_0_uint8) { Reverse_0(); } - Tensor* output = GetOutput(0); - Tensor expected(allocator(), DT_FLOAT, TensorShape({})); - expected.scalar() = expected.scalar().constant(3.f); - test::ExpectTensorEqual(expected, *output); -} +TEST_F(ReverseOpTest, Reverse_0_int8) { Reverse_0(); } -TEST_F(ReverseOpTest, Reverse_234) { - MakeOp(DT_FLOAT); +TEST_F(ReverseOpTest, Reverse_0_uint16) { Reverse_0(); } - // Feed and run - // [[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] - // [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]] - AddInputFromArray(TensorShape({2, 3, 4}), - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23}); - AddInputFromArray(TensorShape({3}), {true, false, true}); +TEST_F(ReverseOpTest, Reverse_0_int16) { Reverse_0(); } - TF_ASSERT_OK(RunOpKernel()); +TEST_F(ReverseOpTest, Reverse_0_float) { Reverse_0(); } - // Check the new state of the input - Tensor* params_tensor = GetOutput(0); - Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 4})); - // Should become - // [[[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]] - // [[3, 2, 1, 0], [7, 6, 5, 4], [11, 10, 9, 8]]] - test::FillValues( - &expected, {15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20, 3, 2, 1, 0, 7, - 6, 5, 4, 11, 10, 9, 8}); - test::ExpectTensorEqual(expected, *params_tensor); -} +TEST_F(ReverseOpTest, Reverse_0_int32) { Reverse_0(); } -TEST_F(ReverseOpTest, Reverse_1234) { - MakeOp(DT_FLOAT); +TEST_F(ReverseOpTest, Reverse_0_int64) { Reverse_0(); } - // Feed and run - // [[[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] - // [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]]] - AddInputFromArray(TensorShape({1, 2, 3, 4}), - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23}); - AddInputFromArray(TensorShape({4}), {true, true, false, true}); +TEST_F(ReverseOpTest, Reverse_0_double) { Reverse_0(); } - TF_ASSERT_OK(RunOpKernel()); +TEST_F(ReverseOpTest, Reverse_0_complex64) { Reverse_0(); } - // Check the new state of the input - Tensor* params_tensor = GetOutput(0); - Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 3, 4})); - // Should become - // [[[[15, 14, 13, 12], [19, 18, 17, 16], [23, 22, 21, 20]] - // [[3, 2, 1, 0], [7, 6, 5, 4], [11, 10, 9, 8]]]] - test::FillValues( - &expected, {15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20, 3, 2, 1, 0, 7, - 6, 5, 4, 11, 10, 9, 8}); - test::ExpectTensorEqual(expected, *params_tensor); -} +TEST_F(ReverseOpTest, Reverse_0_complex128) { Reverse_0(); } + +TEST_F(ReverseOpTest, Reverse_234_uint8) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_int8) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_uint16) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_int16) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_float) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_int32) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_int64) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_double) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_complex64) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_234_complex128) { Reverse_234(); } + +TEST_F(ReverseOpTest, Reverse_1234_uint8) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_int8) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_uint16) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_int16) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_float) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_int32) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_int64) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_double) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_complex64) { Reverse_1234(); } + +TEST_F(ReverseOpTest, Reverse_1234_complex128) { Reverse_1234(); } static SessionOptions GetOptions(int intra_threads) { SessionOptions opts; @@ -119,10 +182,11 @@ static SessionOptions GetOptions(int intra_threads) { // Creates a Graph which "reduce"s a 3D float tensor of "num" elements // into a scalar. +template static Graph* Reverse(const TensorShape& shape, int reverse_axis) { Graph* g = new Graph(OpRegistry::Global()); - Tensor data(DT_FLOAT, shape); - data.flat().setRandom(); + Tensor data(DataTypeToEnum::value, shape); + data.flat().setRandom(); Tensor axes(DT_INT32, TensorShape({1})); axes.flat()(0) = reverse_axis; test::graph::Reverse(g, test::graph::Constant(g, data), @@ -130,81 +194,149 @@ static Graph* Reverse(const TensorShape& shape, int reverse_axis) { return g; } +template static void RunReverseRowsBenchmark(int iters, int outer_dim, int middle_dim, int intra_threads, int channels) { SessionOptions opts = GetOptions(intra_threads); TensorShape shape{outer_dim, middle_dim, channels}; const int64 num_items = static_cast(iters) * shape.num_elements(); testing::ItemsProcessed(num_items); - testing::BytesProcessed(num_items * sizeof(float)); + testing::BytesProcessed(num_items * sizeof(T)); testing::UseRealTime(); - test::Benchmark("cpu", Reverse(shape, 1), &opts).Run(iters); + test::Benchmark("cpu", Reverse(shape, 1), &opts).Run(iters); } -static void BM_ReverseRowsOf1Channel_1T(int iters, int outer_dim, - int middle_dim) { - RunReverseRowsBenchmark(iters, outer_dim, middle_dim, 1 /* intra_threads */, - 1 /* channels */); +static void BM_ReverseRowsOf1Channel_1T_float(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 1 /* intra_threads */, 1 /* channels */); } -BENCHMARK(BM_ReverseRowsOf1Channel_1T) +BENCHMARK(BM_ReverseRowsOf1Channel_1T_float) ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); -static void BM_ReverseRowsOf1Channel_4T(int iters, int outer_dim, - int middle_dim) { - RunReverseRowsBenchmark(iters, outer_dim, middle_dim, 4 /* intra_threads */, - 1 /* channels */); +static void BM_ReverseRowsOf1Channel_1T_uint8(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 1 /* intra_threads */, 1 /* channels */); } -BENCHMARK(BM_ReverseRowsOf1Channel_4T) +BENCHMARK(BM_ReverseRowsOf1Channel_1T_uint8) ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); -static void BM_ReverseRowsOf3Channels_1T(int iters, int outer_dim, - int middle_dim) { - RunReverseRowsBenchmark(iters, outer_dim, middle_dim, 1 /* intra_threads */, - 3 /* channels */); +static void BM_ReverseRowsOf1Channel_4T_float(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 4 /* intra_threads */, 1 /* channels */); } -BENCHMARK(BM_ReverseRowsOf3Channels_1T) - ->ArgPair(288, 288) - ->ArgPair(224, 224) - ->ArgPair(1024, 1024) - ->ArgPair(10 * 1024, 1024); - -static void BM_ReverseRowsOf3Channels_4T(int iters, int outer_dim, - int middle_dim) { - RunReverseRowsBenchmark(iters, outer_dim, middle_dim, 4 /* intra_threads */, - 3 /* channels */); -} - -BENCHMARK(BM_ReverseRowsOf3Channels_4T) - ->ArgPair(288, 288) - ->ArgPair(224, 224) - ->ArgPair(1024, 1024) - ->ArgPair(10 * 1024, 1024); - -static void BM_ReverseRowsOf4Channels_1T(int iters, int outer_dim, - int middle_dim) { - RunReverseRowsBenchmark(iters, outer_dim, middle_dim, 1 /* intra_threads */, - 4 /* channels */); -} - -BENCHMARK(BM_ReverseRowsOf4Channels_1T) +BENCHMARK(BM_ReverseRowsOf1Channel_4T_float) ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); -static void BM_ReverseRowsOf4Channels_4T(int iters, int outer_dim, - int middle_dim) { - RunReverseRowsBenchmark(iters, outer_dim, middle_dim, 4 /* intra_threads */, - 4 /* channels */); +static void BM_ReverseRowsOf1Channel_4T_uint8(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 4 /* intra_threads */, 1 /* channels */); } -BENCHMARK(BM_ReverseRowsOf4Channels_4T) +BENCHMARK(BM_ReverseRowsOf1Channel_4T_uint8) + ->ArgPair(288, 288) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf3Channels_1T_float(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 1 /* intra_threads */, 3 /* channels */); +} + +BENCHMARK(BM_ReverseRowsOf3Channels_1T_float) + ->ArgPair(288, 288) + ->ArgPair(30, 30) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf3Channels_1T_uint8(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 1 /* intra_threads */, 3 /* channels */); +} + +BENCHMARK(BM_ReverseRowsOf3Channels_1T_uint8) + ->ArgPair(288, 288) + ->ArgPair(30, 30) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf3Channels_4T_float(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 4 /* intra_threads */, 3 /* channels */); +} + +BENCHMARK(BM_ReverseRowsOf3Channels_4T_float) + ->ArgPair(288, 288) + ->ArgPair(30, 30) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf3Channels_4T_uint8(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 4 /* intra_threads */, 3 /* channels */); +} +BENCHMARK(BM_ReverseRowsOf3Channels_4T_uint8) + ->ArgPair(288, 288) + ->ArgPair(30, 30) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf4Channels_1T_float(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 1 /* intra_threads */, 4 /* channels */); +} + +BENCHMARK(BM_ReverseRowsOf4Channels_1T_float) + ->ArgPair(288, 288) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf4Channels_1T_uint8(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 1 /* intra_threads */, 4 /* channels */); +} + +BENCHMARK(BM_ReverseRowsOf4Channels_1T_uint8) + ->ArgPair(288, 288) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf4Channels_4T_float(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 4 /* intra_threads */, 4 /* channels */); +} + +BENCHMARK(BM_ReverseRowsOf4Channels_4T_float) + ->ArgPair(288, 288) + ->ArgPair(1024, 1024) + ->ArgPair(10 * 1024, 1024); + +static void BM_ReverseRowsOf4Channels_4T_uint8(int iters, int outer_dim, + int middle_dim) { + RunReverseRowsBenchmark(iters, outer_dim, middle_dim, + 4 /* intra_threads */, 4 /* channels */); +} + +BENCHMARK(BM_ReverseRowsOf4Channels_4T_uint8) ->ArgPair(288, 288) ->ArgPair(1024, 1024) ->ArgPair(10 * 1024, 1024); From 459153ab91ede37a3175a4dee5aa3f38690d3ebb Mon Sep 17 00:00:00 2001 From: Artem Belevich Date: Wed, 15 Nov 2017 14:36:49 -0800 Subject: [PATCH 074/104] Improve ptxas-related error handling. * Don't crash on cubin file cleanup as the file may not have been created. * Log only one error message if ptxas is not found. PiperOrigin-RevId: 175882482 --- .../compiler/xla/service/gpu/gpu_compiler.cc | 27 +++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc b/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc index 6a0eacc66a5..23fb308ec6b 100644 --- a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc +++ b/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc @@ -16,6 +16,7 @@ limitations under the License. #include "tensorflow/compiler/xla/service/gpu/gpu_compiler.h" #include +#include #include #include @@ -258,7 +259,9 @@ StatusOr> CompilePtx(const string& ptx, int cc_major, return InternalError("couldn't get temp CUBIN file name"); } auto cubin_cleaner = tensorflow::gtl::MakeCleanup([&cubin_path] { - TF_CHECK_OK(tensorflow::Env::Default()->DeleteFile(cubin_path)); + // CUBIN file may never be created, so the failure to delete it should not + // produce TF error. + tensorflow::Env::Default()->DeleteFile(cubin_path).IgnoreError(); }); tensorflow::SubProcess ptxas_info_dumper; std::vector ptxas_args = {ptxas_path, ptx_path, "-o", cubin_path, @@ -500,10 +503,24 @@ std::vector GpuCompiler::CompilePtxOrGetCachedResult(const string& ptx, VLOG(2) << "Compiled PTX size:" << ptx.size() << " CUBIN size: " << cache_value->cubin_data.size(); } else { - LOG(WARNING) - << "Failed to compile ptx to cubin. Will attempt to let " - "GPU driver compile the ptx. " - << maybe_cubin.status(); + bool log_warning = true; + if (maybe_cubin.status().code() == + tensorflow::error::Code::NOT_FOUND) { + // Missing ptxas is expected in some environments where CUDA SDK + // binaries are not available. We don't want to spam logs with + // identical warnings in this case. + + // TODO(zhengxq): we should implement a LOG_FIRST_N and LOG_EVERY_N + // for more general usage. + static std::atomic warning_done(false); + log_warning = !warning_done.exchange(true); + } + if (log_warning) { + LOG(WARNING) + << "Failed to compile ptx to cubin. Will attempt to let " + "GPU driver compile the ptx. " + << maybe_cubin.status(); + } } } cache_value->compilation_done = true; From 4634ee62ed5628ac8a1962f9172907f4b7289710 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 16:02:29 -0800 Subject: [PATCH 075/104] Merged commit includes the following changes: 175895735 by gunan: Move IODataType to a new types.proto in preparation for using it in ModelFlags -- 175893178 by fchollet: Fix multiprocessing issue in keras.utils.data_utils. -- 175891377 by jart: Use redundant download URLs and fix protobuf def I uncommented all the GitHub URLs that turned out to have consistent SHAs. The ones that didn't, I left them commented. We can't re-mirror those URLs because doing so would cause past revisions of the repository to become broken. So workspace.bzl is going to have to evolve back to not having these comments, over time, as we continue to upgrade stuff. -- 175889431 by apassos: Fixed order for EagerVariableStore. -- 175882680 by A. Unique TensorFlower: Move IODataType to a new types.proto in preparation for using it in ModelFlags -- PiperOrigin-RevId: 175895735 --- .../keras/_impl/keras/utils/data_utils.py | 6 +- tensorflow/python/ops/variable_scope.py | 5 +- tensorflow/workspace.bzl | 78 ++++++++++--------- 3 files changed, 48 insertions(+), 41 deletions(-) diff --git a/tensorflow/python/keras/_impl/keras/utils/data_utils.py b/tensorflow/python/keras/_impl/keras/utils/data_utils.py index 4f335af62e0..1f2e9ac4407 100644 --- a/tensorflow/python/keras/_impl/keras/utils/data_utils.py +++ b/tensorflow/python/keras/_impl/keras/utils/data_utils.py @@ -381,7 +381,7 @@ class Sequence(object): # Global variables to be shared across processes _SHARED_SEQUENCES = {} # We use a Value to provide unique id to different processes. -_SEQUENCE_COUNTER = multiprocessing.Value('i', 0) +_SEQUENCE_COUNTER = None def get_index(uid, i): @@ -477,6 +477,10 @@ class OrderedEnqueuer(SequenceEnqueuer): self.sequence = sequence # Doing Multiprocessing.Value += x is not process-safe. + global _SEQUENCE_COUNTER + if _SEQUENCE_COUNTER is None: + _SEQUENCE_COUNTER = multiprocessing.Value('i', 0) + with _SEQUENCE_COUNTER.get_lock(): self.uid = _SEQUENCE_COUNTER.value _SEQUENCE_COUNTER.value += 1 diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py index 9a0ff755941..2cdf5855030 100644 --- a/tensorflow/python/ops/variable_scope.py +++ b/tensorflow/python/ops/variable_scope.py @@ -1225,11 +1225,12 @@ class EagerVariableStore(object): return with_variable_store(self._store) def variables(self): - return self._store._vars.values() # pylint: disable=protected-access + return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access def trainable_variables(self): # pylint: disable=protected-access - return [x for x in self._store._vars.values() if x._trainable] + return sorted([x for x in self._store._vars.values() if x._trainable], + key=lambda x: x.name) # pylint: enable=protected-access diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index 19e1deb95da..cd4ea8a7d00 100644 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -152,7 +152,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "mkl", urls = [ "https://mirror.bazel.build/github.com/01org/mkl-dnn/releases/download/v0.9/mklml_lnx_2018.0.20170720.tgz", - # "https://github.com/01org/mkl-dnn/releases/download/v0.9/mklml_lnx_2018.0.20170720.tgz", + "https://github.com/01org/mkl-dnn/releases/download/v0.9/mklml_lnx_2018.0.20170720.tgz", ], sha256 = "57ba56c4c243f403ff78f417ff854ef50b9eddf4a610a917b7c95e7fa8553a4b", strip_prefix = "mklml_lnx_2018.0.20170720", @@ -211,7 +211,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "libxsmm_archive", urls = [ "https://mirror.bazel.build/github.com/hfp/libxsmm/archive/1.8.1.tar.gz", - # "https://github.com/hfp/libxsmm/archive/1.8.1.tar.gz", + "https://github.com/hfp/libxsmm/archive/1.8.1.tar.gz", ], sha256 = "2ade869c3f42f23b5263c7d594aa3c7e5e61ac6a3afcaf5d6e42899d2a7986ce", strip_prefix = "libxsmm-1.8.1", @@ -238,7 +238,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "com_googlesource_code_re2", urls = [ "https://mirror.bazel.build/github.com/google/re2/archive/b94b7cd42e9f02673cd748c1ac1d16db4052514c.tar.gz", - # "https://github.com/google/re2/archive/b94b7cd42e9f02673cd748c1ac1d16db4052514c.tar.gz", + "https://github.com/google/re2/archive/b94b7cd42e9f02673cd748c1ac1d16db4052514c.tar.gz", ], sha256 = "bd63550101e056427c9e7ff12a408c1c8b74e9803f393ca916b2926fc2c4906f", strip_prefix = "re2-b94b7cd42e9f02673cd748c1ac1d16db4052514c", @@ -247,8 +247,8 @@ def tf_workspace(path_prefix="", tf_repo_name=""): native.http_archive( name = "gemmlowp", urls = [ - "https://mirror.bazel.build/github.com/google/gemmlowp/archive/010bb3e71a26ca1d0884a167081d092b43563996.zip" - # "https://github.com/google/gemmlowp/archive/010bb3e71a26ca1d0884a167081d092b43563996.zip", + "https://mirror.bazel.build/github.com/google/gemmlowp/archive/010bb3e71a26ca1d0884a167081d092b43563996.zip", + "https://github.com/google/gemmlowp/archive/010bb3e71a26ca1d0884a167081d092b43563996.zip", ], sha256 = "dd2557072bde12141419cb8320a9c25e6ec41a8ae53c2ac78c076a347bb46d9d", strip_prefix = "gemmlowp-010bb3e71a26ca1d0884a167081d092b43563996", @@ -258,7 +258,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "farmhash_archive", urls = [ "https://mirror.bazel.build/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz", - # "https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz", + "https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz", ], sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0", strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45", @@ -274,7 +274,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "highwayhash", urls = [ "https://mirror.bazel.build/github.com/google/highwayhash/archive/dfcb97ca4fe9277bf9dc1802dd979b071896453b.tar.gz", - # "https://github.com/google/highwayhash/archive/dfcb97ca4fe9277bf9dc1802dd979b071896453b.tar.gz", + "https://github.com/google/highwayhash/archive/dfcb97ca4fe9277bf9dc1802dd979b071896453b.tar.gz", ], sha256 = "0f30a15b1566d93f146c8d149878a06e91d9bb7ec2cfd76906df62a82be4aac9", strip_prefix = "highwayhash-dfcb97ca4fe9277bf9dc1802dd979b071896453b", @@ -296,7 +296,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "jpeg", urls = [ "https://mirror.bazel.build/github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.1.tar.gz", - # "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.1.tar.gz", + "https://github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.1.tar.gz", ], sha256 = "c15a9607892113946379ccea3ca8b85018301b200754f209453ab21674268e77", strip_prefix = "libjpeg-turbo-1.5.1", @@ -308,7 +308,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "png_archive", urls = [ "https://mirror.bazel.build/github.com/glennrp/libpng/archive/v1.2.53.tar.gz", - # "https://github.com/glennrp/libpng/archive/v1.2.53.tar.gz", + "https://github.com/glennrp/libpng/archive/v1.2.53.tar.gz", ], sha256 = "716c59c7dfc808a4c368f8ada526932be72b2fcea11dd85dc9d88b1df1dfe9c2", strip_prefix = "libpng-1.2.53", @@ -351,6 +351,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): native.http_archive( name = "absl_py", urls = [ + "https://mirror.bazel.build/github.com/abseil/abseil-py/archive/231e3870b976c1dc61dce1749138661d21556028.tar.gz", "https://github.com/abseil/abseil-py/archive/231e3870b976c1dc61dce1749138661d21556028.tar.gz", ], sha256 = "8ea2b23bfdb9ae7622f3e5d95236bc600c8d8509a2f38c84732b3145585d4f73", @@ -372,7 +373,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "com_github_andreif_codegen", urls = [ "https://mirror.bazel.build/github.com/andreif/codegen/archive/1.0.tar.gz", - # "https://github.com/andreif/codegen/archive/1.0.tar.gz", + "https://github.com/andreif/codegen/archive/1.0.tar.gz", ], sha256 = "2dadd04a2802de27e0fe5a19b76538f6da9d39ff244036afa00c1bba754de5ee", strip_prefix = "codegen-1.0", @@ -395,12 +396,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""): actual = "@six_archive//:six", ) - # TODO(gunan): Add github mirror back if/when sha256sum issues are resolved. - # See https://github.com/libgit2/libgit2/issues/4343 for contetxt. patched_http_archive( name = "protobuf_archive", urls = [ "https://mirror.bazel.build/github.com/google/protobuf/archive/b04e5cba356212e4e8c66c61bbe0c3a20537c5b9.tar.gz", + "https://github.com/google/protobuf/archive/b04e5cba356212e4e8c66c61bbe0c3a20537c5b9.tar.gz", ], sha256 = "e178a25c52efcb6b05988bdbeace4c0d3f2d2fe5b46696d1d9898875c3803d6a", strip_prefix = "protobuf-b04e5cba356212e4e8c66c61bbe0c3a20537c5b9", @@ -424,31 +424,31 @@ def tf_workspace(path_prefix="", tf_repo_name=""): # We need to import the protobuf library under the names com_google_protobuf # and com_google_protobuf_cc to enable proto_library support in bazel. # Unfortunately there is no way to alias http_archives at the moment. - # TODO(gunan): Add github mirror back if/when sha256sum issues are resolved. native.http_archive( name = "com_google_protobuf", urls = [ - "https://mirror.bazel.build/github.com/google/protobuf/archive/0b059a3d8a8f8aa40dde7bea55edca4ec5dfea66.tar.gz", + "https://mirror.bazel.build/github.com/google/protobuf/archive/b04e5cba356212e4e8c66c61bbe0c3a20537c5b9.tar.gz", + "https://github.com/google/protobuf/archive/b04e5cba356212e4e8c66c61bbe0c3a20537c5b9.tar.gz", ], - sha256 = "6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93", - strip_prefix = "protobuf-0b059a3d8a8f8aa40dde7bea55edca4ec5dfea66", + sha256 = "e178a25c52efcb6b05988bdbeace4c0d3f2d2fe5b46696d1d9898875c3803d6a", + strip_prefix = "protobuf-b04e5cba356212e4e8c66c61bbe0c3a20537c5b9", ) - # TODO(gunan): Add github mirror back if/when sha256sum issues are resolved. native.http_archive( name = "com_google_protobuf_cc", urls = [ - "https://mirror.bazel.build/github.com/google/protobuf/archive/0b059a3d8a8f8aa40dde7bea55edca4ec5dfea66.tar.gz", + "https://mirror.bazel.build/github.com/google/protobuf/archive/b04e5cba356212e4e8c66c61bbe0c3a20537c5b9.tar.gz", + "https://github.com/google/protobuf/archive/b04e5cba356212e4e8c66c61bbe0c3a20537c5b9.tar.gz", ], - sha256 = "6d43b9d223ce09e5d4ce8b0060cb8a7513577a35a64c7e3dad10f0703bf3ad93", - strip_prefix = "protobuf-0b059a3d8a8f8aa40dde7bea55edca4ec5dfea66", + sha256 = "e178a25c52efcb6b05988bdbeace4c0d3f2d2fe5b46696d1d9898875c3803d6a", + strip_prefix = "protobuf-b04e5cba356212e4e8c66c61bbe0c3a20537c5b9", ) native.http_archive( name = "nsync", urls = [ "https://mirror.bazel.build/github.com/google/nsync/archive/93815892dddafe9146a5f7e7042281d59d0f4323.tar.gz", - # "https://github.com/google/nsync/archive/93815892dddafe9146a5f7e7042281d59d0f4323.tar.gz", + "https://github.com/google/nsync/archive/93815892dddafe9146a5f7e7042281d59d0f4323.tar.gz", ], sha256 = "e3bd4555415ace511338fc27e595351738eea4e9006f1612b76c82914770716b", strip_prefix = "nsync-93815892dddafe9146a5f7e7042281d59d0f4323", @@ -458,7 +458,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "com_google_googletest", urls = [ "https://mirror.bazel.build/github.com/google/googletest/archive/9816b96a6ddc0430671693df90192bbee57108b6.zip", - # "https://github.com/google/googletest/archive/9816b96a6ddc0430671693df90192bbee57108b6.zip", + "https://github.com/google/googletest/archive/9816b96a6ddc0430671693df90192bbee57108b6.zip", ], sha256 = "9cbca84c4256bed17df2c8f4d00c912c19d247c11c9ba6647cd6dd5b5c996b8d", strip_prefix = "googletest-9816b96a6ddc0430671693df90192bbee57108b6", @@ -468,7 +468,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "com_github_gflags_gflags", urls = [ "https://mirror.bazel.build/github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz", - # "https://github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz", + "https://github.com/gflags/gflags/archive/f8a0efe03aa69b3336d8e228b37d4ccb17324b88.tar.gz", ], sha256 = "4d222fab8f1ede4709cdff417d15a1336f862d7334a81abf76d09c15ecf9acd1", strip_prefix = "gflags-f8a0efe03aa69b3336d8e228b37d4ccb17324b88", @@ -536,11 +536,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""): native.http_archive( name = "grpc", urls = [ - # "https://mirror.bazel.build/github.com/grpc/grpc/archive/54e8f37e537794c2d814c1604c1282125f64f093.tar.gz", + "https://mirror.bazel.build/github.com/grpc/grpc/archive/54e8f37e537794c2d814c1604c1282125f64f093.tar.gz", "https://github.com/grpc/grpc/archive/54e8f37e537794c2d814c1604c1282125f64f093.tar.gz", ], - sha256 = "c2166b6d96daddf72fe45b2c594210c65ca17ec3c1b2e12089159a9529edb5e4", - strip_prefix = "grpc-54e8f37e537794c2d814c1604c1282125f64f093", + sha256 = "c2166b6d96daddf72fe45b2c594210c65ca17ec3c1b2e12089159a9529edb5e4", + strip_prefix = "grpc-54e8f37e537794c2d814c1604c1282125f64f093", ) # gRPC wants the existence of a cares dependence but its contents are not @@ -567,7 +567,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7", urls = [ "https://mirror.bazel.build/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz", - # "https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz", + "https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz", ], strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3", build_file = str(Label("//third_party:linenoise.BUILD")), @@ -591,7 +591,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "lmdb", urls = [ "https://mirror.bazel.build/github.com/LMDB/lmdb/archive/LMDB_0.9.19.tar.gz", - # "https://github.com/LMDB/lmdb/archive/LMDB_0.9.19.tar.gz", + "https://github.com/LMDB/lmdb/archive/LMDB_0.9.19.tar.gz", ], sha256 = "108532fb94c6f227558d45be3f3347b52539f0f58290a7bb31ec06c462d05326", strip_prefix = "lmdb-LMDB_0.9.19/libraries/liblmdb", @@ -602,7 +602,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "jsoncpp_git", urls = [ "https://mirror.bazel.build/github.com/open-source-parsers/jsoncpp/archive/11086dd6a7eba04289944367ca82cea71299ed70.tar.gz", - # "https://github.com/open-source-parsers/jsoncpp/archive/11086dd6a7eba04289944367ca82cea71299ed70.tar.gz", + "https://github.com/open-source-parsers/jsoncpp/archive/11086dd6a7eba04289944367ca82cea71299ed70.tar.gz", ], sha256 = "07d34db40593d257324ec5fb9debc4dc33f29f8fb44e33a2eeb35503e61d0fe2", strip_prefix = "jsoncpp-11086dd6a7eba04289944367ca82cea71299ed70", @@ -618,6 +618,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "boringssl", urls = [ "https://mirror.bazel.build/github.com/google/boringssl/archive/a0fb951d2a26a8ee746b52f3ba81ab011a0af778.tar.gz", + "https://github.com/google/boringssl/archive/a0fb951d2a26a8ee746b52f3ba81ab011a0af778.tar.gz", ], sha256 = "524ba98a56300149696481b4cb9ddebd0c7b7ac9b9f6edee81da2d2d7e5d2bb3", strip_prefix = "boringssl-a0fb951d2a26a8ee746b52f3ba81ab011a0af778", @@ -653,7 +654,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "snappy", urls = [ "https://mirror.bazel.build/github.com/google/snappy/archive/1.1.4.tar.gz", - # "https://github.com/google/snappy/archive/1.1.4.tar.gz", + "https://github.com/google/snappy/archive/1.1.4.tar.gz", ], sha256 = "2f7504c73d85bac842e893340333be8cb8561710642fc9562fccdd9d2c3fcc94", strip_prefix = "snappy-1.1.4", @@ -665,7 +666,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "nccl_archive", urls = [ "https://mirror.bazel.build/github.com/nvidia/nccl/archive/03d856977ecbaac87e598c0c4bafca96761b9ac7.tar.gz", - # "https://github.com/nvidia/nccl/archive/03d856977ecbaac87e598c0c4bafca96761b9ac7.tar.gz", + "https://github.com/nvidia/nccl/archive/03d856977ecbaac87e598c0c4bafca96761b9ac7.tar.gz", ], sha256 = "2ca86fb6179ecbff789cc67c836139c1bbc0324ed8c04643405a30bf26325176", strip_prefix = "nccl-03d856977ecbaac87e598c0c4bafca96761b9ac7", @@ -676,8 +677,8 @@ def tf_workspace(path_prefix="", tf_repo_name=""): temp_workaround_http_archive( name = "aws", urls = [ - "http://bazel-mirror.storage.googleapis.com/github.com/aws/aws-sdk-cpp/archive/1.0.90.tar.gz", - # "https://github.com/aws/aws-sdk-cpp/archive/1.0.90.tar.gz", + "https://mirror.bazel.build/github.com/aws/aws-sdk-cpp/archive/1.0.90.tar.gz", + "https://github.com/aws/aws-sdk-cpp/archive/1.0.90.tar.gz", ], sha256 = "f599b57aec4f03ad696044dd430b2d201864113937353adc346f53ad47991319", strip_prefix = "aws-sdk-cpp-1.0.90", @@ -714,7 +715,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "jemalloc", urls = [ "https://mirror.bazel.build/github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz", - # "https://github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz", + "https://github.com/jemalloc/jemalloc/archive/4.4.0.tar.gz", ], sha256 = "3c8f25c02e806c3ce0ab5fb7da1817f89fc9732709024e2a81b6b82f7cc792a8", strip_prefix = "jemalloc-4.4.0", @@ -761,7 +762,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "com_google_pprof", urls = [ "https://mirror.bazel.build/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz", - # "https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz", + "https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz", ], sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4", strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650", @@ -772,7 +773,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "cub_archive", urls = [ "https://mirror.bazel.build/github.com/NVlabs/cub/archive/1.7.4.zip", - # "https://github.com/NVlabs/cub/archive/1.7.4.zip", + "https://github.com/NVlabs/cub/archive/1.7.4.zip", ], sha256 = "20a1a39fd97e5da7f40f5f2e7fd73fd2ea59f9dc4bb8a6c5f228aa543e727e31", strip_prefix = "cub-1.7.4", @@ -799,7 +800,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): name = "bazel_toolchains", urls = [ "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz", - # "https://github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz", ], sha256 = "d58bb2d6c8603f600d522b6104d6192a65339aa26cbba9f11ff5c4b36dedb928", strip_prefix = "bazel-toolchains-af4681c3d19f063f090222ec3d04108c4e0ca255", @@ -832,6 +833,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""): build_file = str(Label("//third_party:tflite_mobilenet.BUILD")), sha256 = "23f814d1c076bdf03715dfb6cab3713aa4fbdf040fd5448c43196bd2e97a4c1b", urls = [ - "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip" + "https://mirror.bazel.build/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip", + "https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_v1_224_android_quant_2017_11_08.zip", ], ) From 0adc3bf00c6b7073be7f15bdb9b556f02c87c4fb Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 16:12:59 -0800 Subject: [PATCH 076/104] Added ability to specify estimation mode in constructor of KfacOptimizer. This functions analogously to the estimation_mode argument to FisherEstimator (and is just passed directly to the FisherEstimator that the optimizer constructs). PiperOrigin-RevId: 175897318 --- tensorflow/contrib/kfac/python/ops/optimizer.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tensorflow/contrib/kfac/python/ops/optimizer.py b/tensorflow/contrib/kfac/python/ops/optimizer.py index bfa15e0948c..88299e495cb 100644 --- a/tensorflow/contrib/kfac/python/ops/optimizer.py +++ b/tensorflow/contrib/kfac/python/ops/optimizer.py @@ -44,7 +44,8 @@ class KfacOptimizer(gradient_descent.GradientDescentOptimizer): momentum=0., momentum_type="regular", norm_constraint=None, - name="KFAC",): + name="KFAC", + estimation_mode="gradients"): """Initializes the KFAC optimizer with the given settings. Args: @@ -72,6 +73,10 @@ class KfacOptimizer(gradient_descent.GradientDescentOptimizer): specified value. May only be used with momentum type 'regular'. (Default: None) name: The name for this optimizer. (Default: 'KFAC') + estimation_mode: The type of estimator to use for the Fishers. Can be + 'gradients', 'empirical', 'curvature_propagation', or 'exact'. + (Default: 'gradients'). See the doc-string for FisherEstimator for + more a more detailed description of these options. Raises: ValueError: If the momentum type is unsupported. @@ -86,7 +91,8 @@ class KfacOptimizer(gradient_descent.GradientDescentOptimizer): variables = tf_variables.trainable_variables() self._fisher_est = est.FisherEstimator(variables, cov_ema_decay, damping, - layer_collection) + layer_collection, + estimation_mode=estimation_mode) momentum_type = momentum_type.lower() legal_momentum_types = ["regular", "adam", "qmodel"] From 4f30c8a5c6eddea0d89ea1bc808900c830bd95b5 Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Wed, 15 Nov 2017 16:36:34 -0800 Subject: [PATCH 077/104] Improve docstring on contrib summary module PiperOrigin-RevId: 175900586 --- tensorflow/contrib/summary/summary.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tensorflow/contrib/summary/summary.py b/tensorflow/contrib/summary/summary.py index a73193f4608..f783179f614 100644 --- a/tensorflow/contrib/summary/summary.py +++ b/tensorflow/contrib/summary/summary.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== +"""TensorFlow Summary API v2. -"""Contrib summary package. - -The operations in this package are safe to use with eager execution turned or on -off. - +The operations in this package are safe to use with eager execution turned on or +off. It has a more flexible API that allows summaries to be written directly +from ops to places other than event log files, rather than propagating protos +from @{tf.summary.merge_all} to @{tf.summary.FileWriter}. """ from __future__ import absolute_import From 6d313682bb41a33eb53a47b3b8de9618f1278194 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 17:08:22 -0800 Subject: [PATCH 078/104] Add missing Unref of TensorReference in async CheckNumericsOp. PiperOrigin-RevId: 175904805 --- tensorflow/core/kernels/check_numerics_op.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/core/kernels/check_numerics_op.cc b/tensorflow/core/kernels/check_numerics_op.cc index 56cb50d2d18..534527c6bdc 100644 --- a/tensorflow/core/kernels/check_numerics_op.cc +++ b/tensorflow/core/kernels/check_numerics_op.cc @@ -168,10 +168,10 @@ class CheckNumericsOp : public AsyncOpKernel { abnormal_detected_host, context, done]() { ::perftools::gputools::cuda::ScopedActivateExecutorContext scoped_activation{stream->parent()}; - auto abnormal_detected_host_flat = abnormal_detected_host.flat(); int is_nan = abnormal_detected_host_flat(0); int is_inf = abnormal_detected_host_flat(1); + abnormal_detected_ref.Unref(); if (is_nan || is_inf) { string status; LOG(ERROR) << "abnormal_detected_host @" From 94f76e76f277565504b6f80f245152c7ff5f10e1 Mon Sep 17 00:00:00 2001 From: Sanjoy Das Date: Wed, 15 Nov 2017 17:13:09 -0800 Subject: [PATCH 079/104] Expose an Orc JIT memory mapper registry. XLA clients can use this registry to inject client-specific behavior into how Orc JIT's manages virtual memory. PiperOrigin-RevId: 175905401 --- tensorflow/compiler/xla/service/cpu/BUILD | 23 ++++++-- .../xla/service/cpu/orc_jit_memory_mapper.cc | 40 +++++++++++++ .../xla/service/cpu/orc_jit_memory_mapper.h | 56 +++++++++++++++++++ .../xla/service/cpu/simple_orc_jit.cc | 7 ++- tensorflow/compiler/xla/xla.bzl | 2 + tensorflow/workspace.bzl | 8 +-- 6 files changed, 125 insertions(+), 11 deletions(-) create mode 100644 tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc create mode 100644 tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD index 89e8d07200f..78216f2ffb9 100644 --- a/tensorflow/compiler/xla/service/cpu/BUILD +++ b/tensorflow/compiler/xla/service/cpu/BUILD @@ -17,6 +17,7 @@ package_group( load(":build_defs.bzl", "runtime_copts") load("//tensorflow:tensorflow.bzl", "tf_cc_test") load("//tensorflow:tensorflow.bzl", "tf_cc_binary") +load("//tensorflow/compiler/xla:xla.bzl", "ORC_JIT_MEMORY_MAPPER_TARGETS") # Filegroup used to collect source files for dependency checking. filegroup( @@ -157,21 +158,23 @@ cc_library( ":custom_call_target_registry", ":disassembler", ":external_constant_pool", + ":orc_jit_memory_mapper", ":runtime_conv2d", ":runtime_fork_join", ":runtime_matmul", ":runtime_single_threaded_conv2d", ":runtime_single_threaded_matmul", - "//tensorflow/compiler/xla:types", - "//tensorflow/compiler/xla:util", - "//tensorflow/core:lib", - "@llvm//:core", "@llvm//:execution_engine", + "@llvm//:core", "@llvm//:mc", # fixdeps: keep "@llvm//:orc_jit", "@llvm//:support", "@llvm//:target", # fixdeps: keep - ], + "//tensorflow/compiler/xla:types", + "//tensorflow/compiler/xla:util", + "//tensorflow/core:lib", + "//tensorflow/core:lib_internal", + ] + ORC_JIT_MEMORY_MAPPER_TARGETS, ) cc_library( @@ -737,6 +740,16 @@ cc_library( visibility = ["//visibility:public"], ) +cc_library( + name = "orc_jit_memory_mapper", + srcs = ["orc_jit_memory_mapper.cc"], + hdrs = ["orc_jit_memory_mapper.h"], + deps = [ + "//tensorflow/core:lib", + "@llvm//:execution_engine", + ], +) + # ----------------------------------------------------------------------------- filegroup( diff --git a/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc b/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc new file mode 100644 index 00000000000..e624e5cc7eb --- /dev/null +++ b/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc @@ -0,0 +1,40 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h" +#include "tensorflow/core/platform/logging.h" +#include "tensorflow/core/platform/mutex.h" + +namespace xla { +namespace cpu { +namespace orc_jit_memory_mapper { + +static tensorflow::mutex mapper_instance_mutex(tensorflow::LINKER_INITIALIZED); +static llvm::SectionMemoryManager::MemoryMapper* mapper_instance + GUARDED_BY(mapper_instance_mutex) = nullptr; + +llvm::SectionMemoryManager::MemoryMapper* GetInstance() { + tensorflow::mutex_lock lock(mapper_instance_mutex); + return mapper_instance; +} + +Registrar::Registrar( + std::unique_ptr mapper) { + tensorflow::mutex_lock lock(mapper_instance_mutex); + mapper_instance = mapper.release(); +} +} // namespace orc_jit_memory_mapper +} // namespace cpu +} // namespace xla diff --git a/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h b/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h new file mode 100644 index 00000000000..2d29550fd5b --- /dev/null +++ b/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h @@ -0,0 +1,56 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef THIRD_PARTY_TENSORFLOW_COMPILER_XLA_SERVICE_CPU_ORC_JIT_MEMORY_MAPPER_H_ +#define THIRD_PARTY_TENSORFLOW_COMPILER_XLA_SERVICE_CPU_ORC_JIT_MEMORY_MAPPER_H_ + +#include + +#include "llvm/ExecutionEngine/SectionMemoryManager.h" + +namespace xla { +namespace cpu { + +namespace orc_jit_memory_mapper { +// Returns the registered memory mapper if there is one. Returns nullptr if no +// memory mapper is registered. +llvm::SectionMemoryManager::MemoryMapper* GetInstance(); + +class Registrar { + public: + // Registers the `mapper` as a memory mapper. This is a no-op if `mapper` is + // null. Precondition: no other memory mapper has been registered yet. + explicit Registrar( + std::unique_ptr mapper); +}; +} // namespace orc_jit_memory_mapper + +#define XLA_INTERNAL_REGISTER_ORC_JIT_MEMORY_MAPPER(mapper_instance, ctr) \ + static ::xla::cpu::orc_jit_memory_mapper::Registrar \ + XLA_INTERNAL_REGISTER_ORC_JIT_MEMORY_MAPPER_NAME(ctr)(mapper_instance) + +// __COUNTER__ must go through another macro to be properly expanded +#define XLA_INTERNAL_REGISTER_ORC_JIT_MEMORY_MAPPER_NAME(ctr) \ + __orc_jit_memory_mapper_registrar_##ctr + +// Registers the std::unique_ptr +// returned by the `factory` expression. `factory` is allowed to evaluate to +// a null unique_ptr in which case this macro does nothing. +#define XLA_REGISTER_ORC_JIT_MEMORY_MAPPER(factory) \ + XLA_INTERNAL_REGISTER_ORC_JIT_MEMORY_MAPPER(factory, __COUNTER__) +} // namespace cpu +} // namespace xla + +#endif // THIRD_PARTY_TENSORFLOW_COMPILER_XLA_SERVICE_CPU_ORC_JIT_MEMORY_MAPPER_H_ diff --git a/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc b/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc index fdf02e5b422..db6c201876b 100644 --- a/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc +++ b/tensorflow/compiler/xla/service/cpu/simple_orc_jit.cc @@ -32,6 +32,7 @@ limitations under the License. #include "tensorflow/compiler/xla/service/cpu/cpu_runtime_neon.h" #include "tensorflow/compiler/xla/service/cpu/cpu_runtime_sse4_1.h" #include "tensorflow/compiler/xla/service/cpu/custom_call_target_registry.h" +#include "tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h" #include "tensorflow/compiler/xla/service/cpu/runtime_conv2d.h" #include "tensorflow/compiler/xla/service/cpu/runtime_fork_join.h" #include "tensorflow/compiler/xla/service/cpu/runtime_matmul.h" @@ -125,8 +126,10 @@ SimpleOrcJIT::SimpleOrcJIT(const llvm::TargetOptions& target_options, /*MAttrs=*/DetectMachineAttributes()))), disassembler_(*target_machine_), data_layout_(target_machine_->createDataLayout()), - object_layer_( - [] { return std::make_shared(); }), + object_layer_([] { + return std::make_shared( + orc_jit_memory_mapper::GetInstance()); + }), compile_layer_( object_layer_, CompilerFunctor(target_machine_.get(), &disassembler_, opt_level, diff --git a/tensorflow/compiler/xla/xla.bzl b/tensorflow/compiler/xla/xla.bzl index 3fa5bcc1df4..6b136d333bb 100644 --- a/tensorflow/compiler/xla/xla.bzl +++ b/tensorflow/compiler/xla/xla.bzl @@ -17,3 +17,5 @@ def xla_proto_library(name, srcs=[], deps=[], visibility=None, testonly=0): protoc="@protobuf_archive//:protoc", testonly=testonly, visibility=visibility,) + +ORC_JIT_MEMORY_MAPPER_TARGETS = [] diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl index cd4ea8a7d00..8e62228c1b7 100644 --- a/tensorflow/workspace.bzl +++ b/tensorflow/workspace.bzl @@ -578,11 +578,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""): temp_workaround_http_archive( name = "llvm", urls = [ - "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/618cf290880ae9cd87b4bbf6c9b1759476f422eb.tar.gz", - "https://github.com/llvm-mirror/llvm/archive/618cf290880ae9cd87b4bbf6c9b1759476f422eb.tar.gz", + "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/823bedeb8e23a095173389fa05680597eba3f569.tar.gz", + "https://github.com/llvm-mirror/llvm/archive/823bedeb8e23a095173389fa05680597eba3f569.tar.gz", ], - sha256 = "ec2e032e58372c614c41b539c0309baa91843c30d7a9c6dee647dcd24be02e3c", - strip_prefix = "llvm-618cf290880ae9cd87b4bbf6c9b1759476f422eb", + sha256 = "93464bc760fd0319ebd0a5831fe477fdc4954f3612a29cc64d7405eaee8e00b2", + strip_prefix = "llvm-823bedeb8e23a095173389fa05680597eba3f569", build_file = str(Label("//third_party/llvm:llvm.BUILD")), repository = tf_repo_name, ) From 05fe77d24a22f7f43362f94ebe1949e58f014e00 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 17:40:19 -0800 Subject: [PATCH 080/104] Fix MarkDown formatting of code block. PiperOrigin-RevId: 175908614 --- tensorflow/python/eager/backprop.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py index a2a7f1c0227..25f7ae785e6 100644 --- a/tensorflow/python/eager/backprop.py +++ b/tensorflow/python/eager/backprop.py @@ -305,6 +305,7 @@ def implicit_val_and_grad(f): is not known ahead of time. Example: + ```python dense_layer = tf.layers.Dense(1) def loss(x, y): @@ -378,6 +379,7 @@ def implicit_grad(f): is not known ahead of time. Example: + ```python dense_layer = tf.layers.Dense(1) def loss(x, y): From 1542d977f410eddf2896553fbbd5f697605d57c9 Mon Sep 17 00:00:00 2001 From: Allen Lavoie Date: Wed, 15 Nov 2017 17:44:18 -0800 Subject: [PATCH 081/104] Rename layers.base.Network -> layers.network.GraphNetwork Splits GraphNetwork out into a new file, moves some shared utility functions to layers.utils. Should have no functional changes. PiperOrigin-RevId: 175909000 --- tensorflow/python/BUILD | 36 +- .../keras/_impl/keras/engine/topology.py | 30 +- .../keras/_impl/keras/integration_test.py | 4 +- .../keras/_impl/keras/layers/wrappers.py | 6 +- tensorflow/python/layers/base.py | 951 +---------------- tensorflow/python/layers/base_test.py | 490 --------- tensorflow/python/layers/layers.py | 2 +- tensorflow/python/layers/network.py | 957 ++++++++++++++++++ tensorflow/python/layers/network_test.py | 525 ++++++++++ tensorflow/python/layers/utils.py | 17 + .../api/golden/tensorflow.keras.-model.pbtxt | 2 +- .../golden/tensorflow.keras.-sequential.pbtxt | 2 +- ...tensorflow.keras.layers.-input-layer.pbtxt | 2 +- .../tensorflow.keras.models.-model.pbtxt | 2 +- .../tensorflow.keras.models.-sequential.pbtxt | 2 +- 15 files changed, 1566 insertions(+), 1462 deletions(-) create mode 100644 tensorflow/python/layers/network.py create mode 100644 tensorflow/python/layers/network_test.py diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD index bc034e1902d..970f3ecaff2 100644 --- a/tensorflow/python/BUILD +++ b/tensorflow/python/BUILD @@ -3850,15 +3850,15 @@ py_library( deps = [ ":array_ops", ":control_flow_ops", - ":framework", ":framework_for_generated_wrappers", - ":init_ops", + ":platform", + ":tensor_util", ":util", ":variable_scope", ":variables", + "//tensorflow/python/eager:context", "//tensorflow/python/estimator:util", "//third_party/py/numpy", - "@six_archive//:six", ], ) @@ -3869,12 +3869,14 @@ py_library( "layers/core.py", "layers/layers.py", "layers/maxout.py", + "layers/network.py", "layers/normalization.py", "layers/pooling.py", ], srcs_version = "PY2AND3", deps = [ ":array_ops", + ":array_ops_gen", ":control_flow_ops", ":framework", ":framework_for_generated_wrappers", @@ -3882,12 +3884,18 @@ py_library( ":layers_base", ":math_ops", ":nn", + ":nn_ops", + ":platform", + ":resource_variable_ops", + ":resource_variable_ops_gen", ":standard_ops", + ":state_ops", ":training", ":util", ":variable_scope", ":variables", "//tensorflow/python/eager:context", + "//tensorflow/python/estimator:util", "//third_party/py/numpy", "@six_archive//:six", ], @@ -3900,14 +3908,36 @@ py_test( main = "layers/base_test.py", srcs_version = "PY2AND3", deps = [ + ":array_ops", ":client_testlib", ":framework_for_generated_wrappers", ":framework_test_lib", ":init_ops", ":layers", + ":layers_base", ":math_ops", ":random_ops", ":variable_scope", + "//tensorflow/python/eager:context", + ], +) + +py_test( + name = "layers_network_test", + size = "small", + srcs = ["layers/network_test.py"], + main = "layers/network_test.py", + srcs_version = "PY2AND3", + deps = [ + ":array_ops", + ":client_testlib", + ":framework_for_generated_wrappers", + ":framework_test_lib", + ":layers", + ":layers_base", + ":sparse_ops", + "//tensorflow/python/eager:context", + "//third_party/py/numpy", ], ) diff --git a/tensorflow/python/keras/_impl/keras/engine/topology.py b/tensorflow/python/keras/_impl/keras/engine/topology.py index 814961bd1d4..4a7bb2e8389 100644 --- a/tensorflow/python/keras/_impl/keras/engine/topology.py +++ b/tensorflow/python/keras/_impl/keras/engine/topology.py @@ -36,6 +36,8 @@ from tensorflow.python.keras._impl.keras.utils import conv_utils from tensorflow.python.keras._impl.keras.utils.io_utils import ask_to_proceed_with_overwrite from tensorflow.python.keras._impl.keras.utils.layer_utils import print_summary as print_layer_summary from tensorflow.python.layers import base as tf_base_layers +from tensorflow.python.layers import network as tf_network +from tensorflow.python.layers import utils as tf_layers_util from tensorflow.python.platform import tf_logging as logging @@ -485,7 +487,7 @@ class Layer(tf_base_layers.Layer): self._activity_regularizer = activity_regularizer -class InputLayer(tf_base_layers.InputLayer, Layer): +class InputLayer(tf_network.InputLayer, Layer): """Layer to be used as an entry point into a graph. It can either wrap an existing tensor (pass an `input_tensor` argument) @@ -636,7 +638,7 @@ def Input( # pylint: disable=invalid-name return outputs -class Network(tf_base_layers.Network, Layer): +class Network(tf_network.GraphNetwork, Layer): """A Network is a directed acyclic graph of layers. It is the topological form of a "model". A Model @@ -681,8 +683,8 @@ class Network(tf_base_layers.Network, Layer): for x in self.inputs: mask = x._keras_mask if hasattr(x, '_keras_mask') else None masks.append(mask) - mask_cache_key = (tf_base_layers._object_list_uid(self.inputs) + '_' + - tf_base_layers._object_list_uid(masks)) + mask_cache_key = (tf_layers_util.object_list_uid(self.inputs) + '_' + + tf_layers_util.object_list_uid(masks)) masks = [] for x in self.outputs: mask = x._keras_mask if hasattr(x, '_keras_mask') else None @@ -798,8 +800,8 @@ class Network(tf_base_layers.Network, Layer): else: kept_nodes = 0 for original_node_index, node in enumerate(layer._inbound_nodes): - node_key = tf_base_layers._make_node_key(layer.name, - original_node_index) + node_key = tf_network._make_node_key(layer.name, + original_node_index) if node_key in self._network_nodes: node_conversion_map[node_key] = kept_nodes kept_nodes += 1 @@ -809,8 +811,8 @@ class Network(tf_base_layers.Network, Layer): layer_config = layer.get_config() filtered_inbound_nodes = [] for original_node_index, node in enumerate(layer._inbound_nodes): - node_key = tf_base_layers._make_node_key(layer.name, - original_node_index) + node_key = tf_network._make_node_key(layer.name, + original_node_index) if node_key in self._network_nodes: # The node is relevant to the model: # add to filtered_inbound_nodes. @@ -834,8 +836,8 @@ class Network(tf_base_layers.Network, Layer): inbound_layer = node.inbound_layers[i] node_index = node.node_indices[i] tensor_index = node.tensor_indices[i] - node_key = tf_base_layers._make_node_key(inbound_layer.name, - node_index) + node_key = tf_network._make_node_key(inbound_layer.name, + node_index) new_node_index = node_conversion_map.get(node_key, 0) node_data.append( [inbound_layer.name, new_node_index, tensor_index, kwargs]) @@ -852,8 +854,8 @@ class Network(tf_base_layers.Network, Layer): model_inputs = [] for i in range(len(self._input_layers)): layer, node_index, tensor_index = self._input_coordinates[i] - node_key = tf_base_layers._make_node_key(layer.name, - node_index) + node_key = tf_network._make_node_key(layer.name, + node_index) if node_key not in self._network_nodes: continue new_node_index = node_conversion_map[node_key] @@ -862,8 +864,8 @@ class Network(tf_base_layers.Network, Layer): model_outputs = [] for i in range(len(self._output_layers)): layer, node_index, tensor_index = self._output_coordinates[i] - node_key = tf_base_layers._make_node_key(layer.name, - node_index) + node_key = tf_network._make_node_key(layer.name, + node_index) if node_key not in self._network_nodes: continue new_node_index = node_conversion_map[node_key] diff --git a/tensorflow/python/keras/_impl/keras/integration_test.py b/tensorflow/python/keras/_impl/keras/integration_test.py index 871a8c73298..15c3d14727a 100644 --- a/tensorflow/python/keras/_impl/keras/integration_test.py +++ b/tensorflow/python/keras/_impl/keras/integration_test.py @@ -22,8 +22,8 @@ import numpy as np from tensorflow.python.keras._impl import keras from tensorflow.python.keras._impl.keras import testing_utils -from tensorflow.python.layers import base as tf_base_layers from tensorflow.python.layers import core as tf_core_layers +from tensorflow.python.layers import network as tf_network_layers from tensorflow.python.ops import nn from tensorflow.python.platform import test @@ -275,7 +275,7 @@ class KerasIntegrationTest(test.TestCase): y_train = keras.utils.to_categorical(y_train) y_test = keras.utils.to_categorical(y_test) - inputs = tf_base_layers.Input(shape=(10,)) + inputs = tf_network_layers.Input(shape=(10,)) x = tf_core_layers.Dense(32, activation=nn.relu)(inputs) outputs = tf_core_layers.Dense(2, activation=nn.softmax)(x) model = keras.models.Model(inputs, outputs) diff --git a/tensorflow/python/keras/_impl/keras/layers/wrappers.py b/tensorflow/python/keras/_impl/keras/layers/wrappers.py index 0e82005caad..aefa5a1c020 100644 --- a/tensorflow/python/keras/_impl/keras/layers/wrappers.py +++ b/tensorflow/python/keras/_impl/keras/layers/wrappers.py @@ -26,7 +26,7 @@ from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras.engine import InputSpec from tensorflow.python.keras._impl.keras.engine import Layer from tensorflow.python.keras._impl.keras.utils.generic_utils import has_arg -from tensorflow.python.layers import base as tf_base_layers +from tensorflow.python.layers import utils as tf_layers_util class Wrapper(Layer): @@ -77,7 +77,7 @@ class Wrapper(Layer): # get the updates from the inner layer. inner_inputs = inputs if inputs is not None: - uid = tf_base_layers._object_list_uid(inputs) + uid = tf_layers_util.object_list_uid(inputs) if uid in self._input_map: inner_inputs = self._input_map[uid] @@ -223,7 +223,7 @@ class TimeDistributed(Wrapper): input_length = K.shape(inputs)[1] # Shape: (num_samples * timesteps, ...). And track the # transformation in self._input_map. - input_uid = tf_base_layers._object_list_uid(inputs) + input_uid = tf_layers_util.object_list_uid(inputs) inputs = K.reshape(inputs, (-1,) + input_shape[2:]) self._input_map[input_uid] = inputs # (num_samples * timesteps, ...) diff --git a/tensorflow/python/layers/base.py b/tensorflow/python/layers/base.py index 55da959a492..9677db2bce1 100644 --- a/tensorflow/python/layers/base.py +++ b/tensorflow/python/layers/base.py @@ -30,6 +30,7 @@ from tensorflow.python.estimator import util as estimator_util from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape +from tensorflow.python.layers import utils as layers_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables as tf_variables @@ -250,7 +251,7 @@ class Layer(object): if inputs is not None: # We compute an ID that uniquely identifies the list of tensors. # This ID is order-sensitive. - inputs_hash = _object_list_uid(inputs) + inputs_hash = layers_util.object_list_uid(inputs) else: inputs_hash = None if inputs_hash not in self._per_input_updates: @@ -279,7 +280,7 @@ class Layer(object): if not inputs: inputs = None if inputs is not None: - inputs_hash = _object_list_uid(inputs) + inputs_hash = layers_util.object_list_uid(inputs) else: inputs_hash = None return self._per_input_updates.get(inputs_hash, []) @@ -326,7 +327,7 @@ class Layer(object): if inputs is not None: # We compute an ID that uniquely identifies the list of tensors. # This ID is order-sensitive. - inputs_hash = _object_list_uid(inputs) + inputs_hash = layers_util.object_list_uid(inputs) else: inputs_hash = None if inputs_hash not in self._per_input_losses: @@ -357,7 +358,7 @@ class Layer(object): if not inputs: inputs = None if inputs is not None: - inputs_hash = _object_list_uid(inputs) + inputs_hash = layers_util.object_list_uid(inputs) else: inputs_hash = None return self._per_input_losses.get(inputs_hash, []) @@ -1267,9 +1268,9 @@ class Node(object): # Following 2 properties: input and output shapes. # List of shape tuples, shapes of input_tensors. - self.input_shapes = [_static_shape(x) for x in input_tensors] + self.input_shapes = [layers_util.static_shape(x) for x in input_tensors] # List of shape tuples, shapes of output_tensors. - self.output_shapes = [_static_shape(x) for x in output_tensors] + self.output_shapes = [layers_util.static_shape(x) for x in output_tensors] # Optional keyword arguments to layer's `call`. self.arguments = arguments @@ -1327,926 +1328,6 @@ class _DeferredTensor(object): self.dtype.name) -class InputLayer(Layer): - """Layer to be used as an entry point into a Network (a graph of layers). - - It can either wrap an existing tensor (pass an `input_tensor` argument) - or create its a placeholder tensor (pass arguments `input_shape` - as well as `dtype`). - - It is generally recommend to use the functional layer API via `Input`, - (which creates an `InputLayer`) without directly using `InputLayer`. - - Arguments: - input_shape: Shape tuple (not including the batch axis), or `TensorShape` - instance (not including the batch axis). - batch_size: Optional input batch size (integer or None). - dtype: Datatype of the input. - input_tensor: Optional tensor to use as layer input - instead of creating a placeholder. - sparse: Boolean, whether the placeholder created - is meant to be sparse. - name: Name of the layer (string). - - Raises: - RuntimeError: If created in Eager mode. - """ - - def __init__(self, - input_shape=None, - batch_size=None, - dtype=dtypes.float32, - input_tensor=None, - sparse=False, - name=None): - super(InputLayer, self).__init__(dtype=dtype, name=name) - self.built = True - self.sparse = sparse - self.batch_size = batch_size - - if isinstance(input_shape, tensor_shape.TensorShape): - input_shape = tuple(input_shape.as_list()) - - if input_tensor is None: - if input_shape is not None: - batch_input_shape = (batch_size,) + tuple(input_shape) - else: - batch_input_shape = None - - if context.in_eager_mode(): - # In eager mode, create a temporary placeholder to call the layer on. - input_tensor = _DeferredTensor( - shape=batch_input_shape, - dtype=dtype, - name=self.name) - else: - # In graph mode, create a graph placeholder to call the layer on. - if sparse: - input_tensor = array_ops.sparse_placeholder( - shape=batch_input_shape, - dtype=dtype, - name=self.name) - else: - input_tensor = array_ops.placeholder( - shape=batch_input_shape, - dtype=dtype, - name=self.name) - - # For compatibility with Keras API. - self.is_placeholder = True - self._batch_input_shape = batch_input_shape - else: - # For compatibility with Keras API. - self.is_placeholder = False - self._batch_input_shape = tuple(input_tensor.get_shape().as_list()) - - # Create an input node to add to self.outbound_node - # and set output_tensors' _keras_history. - input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access - Node( - self, - inbound_layers=[], - node_indices=[], - tensor_indices=[], - input_tensors=[input_tensor], - output_tensors=[input_tensor]) - - -def Input( # pylint: disable=invalid-name - shape=None, - batch_size=None, - name=None, - dtype=dtypes.float32, - sparse=False, - tensor=None): - """`Input()` is used to instantiate an input tensor for use with a `Network`. - - For instance, if a, b and c are tensors created via `Input`, - it becomes possible to do: - - `network = Network(inputs=[a, b], outputs=c)` - - Example: - - ```python - # This is a logistic regression - x = tf.layers.Input(shape=(32,)) - y = tf.layers.Dense(16, activation='softmax')(x) - network = tf.layers.Network(x, y) - ``` - - Arguments: - shape: A shape tuple (integer), not including the batch size. - For instance, `shape=(32,)` indicates that the expected input - will be batches of 32-dimensional vectors. - batch_size: Optional input batch size (integer or None). - name: An optional name string for the layer. - Should be unique in a model (do not reuse the same name twice). - It will be autogenerated if it isn't provided. - dtype: The data type expected by the input, as a string - (`float32`, `float64`, `int32`...) - sparse: A boolean specifying whether the placeholder - to be created is sparse. - tensor: Optional existing tensor to wrap into the `Input` layer. - If set, the layer will not create a placeholder tensor. - - Returns: - A tensor: either a new placeholder (with history metadata) or - `tensor` (if passed), with added history metadata. - - Raises: - RuntimeError: If called in Eager mode. - """ - input_layer = InputLayer( - input_shape=shape, - batch_size=batch_size, - name=name, - dtype=dtype, - sparse=sparse, - input_tensor=tensor) - # Return tensor including `_keras_history` metadata. - # Note that in this case train_output and test_output are the same pointer. - outputs = input_layer._inbound_nodes[0].output_tensors # pylint: disable=protected-access - if len(outputs) == 1: - return outputs[0] - else: - return outputs - - -class Network(Layer): - """A Network is a directed acyclic graph of layers. - - It is the topological form of a "model". - A Model is simply a Network with added training/evaluation routines. - - A Network instance implements the full Layer API. In particular, a network - can be called on new inputs. - - Example: - - ```python - # This is a logistic regression - x = tf.layers.Input(shape=(32,)) - y = tf.layers.Dense(16, activation='softmax')(x) - network = tf.layers.Network(x, y) - - # It is then possible to call the network on compatible inputs: - z = tf.layers.Input(shape=(32,)) - w = network(z) - - # It is possible to retrieve the same properties as a layer: - weights = network.trainable_weights - ``` - - Arguments: - inputs: Input tensor or list of input tensors. - Must come from `tf.layers.Input`. - output: Output tensor or list of output tensors. Must come from - tf.layers Layers or Keras layers. - name: Optional name of the model (string). - - Attributes: - Network has the same attributes as Layer. On top of it, it also has: - - layers: a list of the children layers of the network, - a list of layer instances, ordered from "earlier in the graph" - to "later in the graph". - - Methods: - Network has the same methods as Layer. On top of it, it also has: - - get_layer: retrieves a child layer by name or index in the graph. - - Raises: - RuntimeError: If created in Eager mode. - """ - - def __init__(self, inputs, outputs, name=None): # pylint: disable=super-init-not-called - if context.in_eager_mode(): - # TODO(fchollet): check that all inputs and outputs are DeferredTensors. - pass - - self._init_set_name(name) - self._activity_regularizer = None - with vs.variable_scope( - None, default_name=self._base_name) as captured_scope: - self._scope = captured_scope - call_fn_args = estimator_util.fn_args(self.call) - self._compute_previous_mask = ('mask' in call_fn_args or - hasattr(self, 'compute_mask')) - self._call_has_scope_arg = 'scope' in call_fn_args - - # This acts just like the `trainable` attribute of any layer instance. - # It does not affect users of the underlying layers, only users of the - # Network instance. - self.trainable = True - # A Network does not create weights of its own, thus it is already built. - self.built = True - # A Network does not create weights of its own, thus has no dtype. - self._dtype = None - # The following are implemented as property functions: - # self.trainable_weights - # self.non_trainable_weights - # self.input_spec - - # Private attributes to implement compatibility with Layer. - self._per_input_losses = {} - self._per_input_updates = {} - self._updates = [] - self._losses = [] - self._scope = None - self._reuse = None - self._graph = ops.get_default_graph() - - # Network-specific properties. - if isinstance(inputs, (list, tuple)): - self.inputs = list(inputs) # Tensor or list of tensors. - else: - self.inputs = [inputs] - if isinstance(outputs, (list, tuple)): - self.outputs = list(outputs) - else: - self.outputs = [outputs] - # All layers in order of horizontal graph traversal. - # Entries are unique. Includes input and output layers. - self.layers = [] - - # Check for redundancy in inputs. - if len(set(self.inputs)) != len(self.inputs): - raise ValueError('The list of inputs passed to the model ' - 'is redundant. ' - 'All inputs should only appear once.' - ' Found: ' + str(self.inputs)) - - # # List of initial layers (1 to 1 mapping with self.inputs, - # # hence the same layer might appear twice) - # self._input_layers = [] - # self._input_layers_node_indices = [] - # self._input_layers_tensor_indices = [] - # # list of layers (1 to 1 mapping with self.inputs, - # # hence the same layer might appear twice) - # self._output_layers = [] - # self._output_layers_node_indices = [] - # self._output_layers_tensor_indices = [] - - self._input_layers = [] - self._output_layers = [] - self._input_coordinates = [] - self._output_coordinates = [] - - # This is for performance optimization - # when calling the Network on new inputs. - # every time the Network is called on a set on input tensors, - # we compute the output tensors, - # output masks and output shapes in one pass, - # then cache them here. When any of these outputs is queried later, - # we retrieve it from there instead of recomputing it. - self._output_mask_cache = {} - self._output_tensor_cache = {} - self._output_shape_cache = {} - - # User-provided arguments validation. - for x in self.inputs: - # Check that x has appropriate `_keras_history` metadata. - if not hasattr(x, '_keras_history'): - cls_name = self.__class__.__name__ - raise ValueError('Input tensors to a ' + cls_name + ' ' + - 'must come from `tf.layers.Input`. ' - 'Received: ' + str(x) + - ' (missing previous layer metadata).') - # Check that x is an input tensor. - # pylint: disable=protected-access - layer, node_index, tensor_index = x._keras_history - if len(layer._inbound_nodes) > 1 or ( - layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers): - cls_name = self.__class__.__name__ - logging.warning(cls_name + ' inputs must come from ' - '`tf.layers.Input` (thus holding past layer metadata), ' - 'they cannot be the output of ' - 'a previous non-Input layer. ' - 'Here, a tensor specified as ' - 'input to "' + self.name + '" was not an Input tensor, ' - 'it was generated by layer ' + layer.name + '.\n' - 'Note that input tensors are ' - 'instantiated via `tensor = tf.layers.Input(shape)`.\n' - 'The tensor that caused the issue was: ' + str(x.name)) - # pylint: enable=protected-access - for x in self.outputs: - if not hasattr(x, '_keras_history'): - cls_name = self.__class__.__name__ - raise ValueError('Output tensors to a ' + cls_name + ' must be ' - 'the output of a TensorFlow `Layer` ' - '(thus holding past layer metadata). Found: ' + str(x)) - - # Build self._output_layers: - for x in self.outputs: - layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access - self._output_layers.append(layer) - self._output_coordinates.append((layer, node_index, tensor_index)) - - # Build self._input_layers: - for x in self.inputs: - layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access - # It's supposed to be an input layer, so only one node - # and one tensor output. - assert node_index == 0 - assert tensor_index == 0 - self._input_layers.append(layer) - self._input_coordinates.append((layer, node_index, tensor_index)) - - # Network_nodes: set of nodes included in the graph - # (not all nodes included in the layers - # are relevant to the current graph). - network_nodes = set() # ids of all nodes relevant to the Network - nodes_depths = {} # dict {node: depth value} - layers_depths = {} # dict {layer: depth value} - layer_indices = {} # dict {layer: index in traversal} - nodes_in_decreasing_depth = [] - - def build_map_of_graph(tensor, - finished_nodes, - nodes_in_progress, - layer, - node_index, - tensor_index): - """Builds a map of the graph of layers. - - This recursively updates the map `layer_indices`, - the list `nodes_in_decreasing_depth` and the set `network_nodes`. - - Arguments: - tensor: Some tensor in a graph. - finished_nodes: Set of nodes whose subgraphs have been traversed - completely. Useful to prevent duplicated work. - nodes_in_progress: Set of nodes that are currently active on the - recursion stack. Useful to detect cycles. - layer: Layer from which `tensor` comes from. If not provided, - will be obtained from `tensor._keras_history`. - node_index: Node index from which `tensor` comes from. - tensor_index: Tensor_index from which `tensor` comes from. - - Raises: - ValueError: if a cycle is detected. - """ - node = layer._inbound_nodes[node_index] # pylint: disable=protected-access - - # Prevent cycles. - if node in nodes_in_progress: - raise ValueError('The tensor ' + str(tensor) + ' at layer "' + - layer.name + '" is part of a cycle.') - - # Don't repeat work for shared subgraphs - if node in finished_nodes: - return - - node_key = _make_node_key(layer.name, node_index) - # Update network_nodes. - network_nodes.add(node_key) - - # Store the traversal order for layer sorting. - if layer not in layer_indices: - layer_indices[layer] = len(layer_indices) - - nodes_in_progress.add(node) - - # Propagate to all previous tensors connected to this node. - for i in range(len(node.inbound_layers)): - x = node.input_tensors[i] - layer = node.inbound_layers[i] - node_index = node.node_indices[i] - tensor_index = node.tensor_indices[i] - build_map_of_graph(x, finished_nodes, nodes_in_progress, layer, - node_index, tensor_index) - - finished_nodes.add(node) - nodes_in_progress.remove(node) - nodes_in_decreasing_depth.append(node) - - finished_nodes = set() - nodes_in_progress = set() - for x in self.outputs: - layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access - build_map_of_graph(x, finished_nodes, nodes_in_progress, - layer=layer, - node_index=node_index, - tensor_index=tensor_index) - - for node in reversed(nodes_in_decreasing_depth): - # If the depth is not set, the node has no outbound nodes (depth 0). - depth = nodes_depths.setdefault(node, 0) - - # Update the depth of the corresponding layer - previous_depth = layers_depths.get(node.outbound_layer, 0) - # If we've seen this layer before at a higher depth, - # we should use that depth instead of the node depth. - # This is necessary for shared layers that have inputs at different - # depth levels in the graph. - depth = max(depth, previous_depth) - layers_depths[node.outbound_layer] = depth - nodes_depths[node] = depth - - # Update the depth of inbound nodes. - # The "depth" of a node is the max of the depths - # of all layers it is connected to. - for i in range(len(node.inbound_layers)): - inbound_layer = node.inbound_layers[i] - node_index = node.node_indices[i] - inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access - previous_depth = nodes_depths.get(inbound_node, 0) - nodes_depths[inbound_node] = max(depth + 1, previous_depth) - - # Build a dict {depth: list of nodes with this depth} - nodes_by_depth = {} - for node, depth in nodes_depths.items(): - if depth not in nodes_by_depth: - nodes_by_depth[depth] = [] - nodes_by_depth[depth].append(node) - - # Build a dict {depth: list of layers with this depth} - layers_by_depth = {} - for layer, depth in layers_depths.items(): - if depth not in layers_by_depth: - layers_by_depth[depth] = [] - layers_by_depth[depth].append(layer) - - # Get sorted list of layer depths. - depth_keys = list(layers_by_depth.keys()) - depth_keys.sort(reverse=True) - - # Set self.layers and self._layers_by_depth. - layers = [] - for depth in depth_keys: - layers_for_depth = layers_by_depth[depth] - # Network.layers needs to have a deterministic order: - # here we order them by traversal order. - layers_for_depth.sort(key=lambda x: layer_indices[x]) - layers.extend(layers_for_depth) - self.layers = layers - self._layers_by_depth = layers_by_depth - - # Get sorted list of node depths. - depth_keys = list(nodes_by_depth.keys()) - depth_keys.sort(reverse=True) - - # Check that all tensors required are computable. - # computable_tensors: all tensors in the graph - # that can be computed from the inputs provided. - computable_tensors = [] - for x in self.inputs: - computable_tensors.append(x) - - layers_with_complete_input = [] # To provide a better error msg. - for depth in depth_keys: - for node in nodes_by_depth[depth]: - layer = node.outbound_layer - if layer: - for x in node.input_tensors: - if x not in computable_tensors: - raise ValueError('Graph disconnected: ' - 'cannot obtain value for tensor ' + str(x) + - ' at layer "' + layer.name + '". ' - 'The following previous layers ' - 'were accessed without issue: ' + - str(layers_with_complete_input)) - for x in node.output_tensors: - computable_tensors.append(x) - layers_with_complete_input.append(layer.name) - - # Keep track of the network's nodes. - self._network_nodes = network_nodes - self._nodes_by_depth = nodes_by_depth - - # Ensure name unicity, which will be crucial for serialization - # (since serialized nodes refer to layers by their name). - all_names = [layer.name for layer in self.layers] - for name in all_names: - if all_names.count(name) != 1: - raise ValueError('The name "' + name + '" is used ' + - str(all_names.count(name)) + ' times in the model. ' - 'All layer names should be unique.') - - # Layer parameters. - # The new network starts with a single inbound node - # for its inputs, and no outbound nodes. - self._outbound_nodes = [] # Will be appended to by future calls to __call__ - self._inbound_nodes = [ - ] # Will be appended to below, and by future calls to __call__ - # Create the node linking internal inputs to internal outputs. - Node( - outbound_layer=self, - inbound_layers=[], - node_indices=[], - tensor_indices=[], - input_tensors=self.inputs, - output_tensors=self.outputs) - - def get_layer(self, name=None, index=None): - """Retrieves a layer based on either its name (unique) or index. - - Indices are based on order of horizontal graph traversal (bottom-up). - - Arguments: - name: String, name of layer. - index: Integer, index of layer. - - Returns: - A layer instance. - - Raises: - ValueError: In case of invalid layer name or index. - """ - # TODO(fchollet): We could build a dictionary based on layer names - # since they are constant, but we have not done that yet. - if index is not None: - if len(self.layers) <= index: - raise ValueError('Was asked to retrieve layer at index ' + str(index) + - ' but model only has ' + str(len(self.layers)) + - ' layers.') - else: - return self.layers[index] - else: - if not name: - raise ValueError('Provide either a layer name or layer index.') - for layer in self.layers: - if layer.name == name: - return layer - raise ValueError('No such layer: ' + name) - - @property - def updates(self): - """Retrieve the network's updates. - - Will only include updates that are either - unconditional, or conditional on inputs to this model - (e.g. will not include updates that depend on tensors - that aren't inputs to this model). - - Returns: - A list of update ops. - """ - updates = [] - for layer in self.layers: - if hasattr(layer, 'updates'): - # Collect updates that are dependent on inputs - # that are part of the model. - for node_index, node in enumerate(layer._inbound_nodes): # pylint: disable=protected-access - node_key = _make_node_key(layer.name, node_index) - if node_key in self._network_nodes: - # The model owns this layer node. - inputs = node.input_tensors - updates += layer.get_updates_for(inputs) - # Collect unconditional updates. - updates += layer.get_updates_for(None) - return updates - - @property - def losses(self): - """Retrieve the network's losses. - - Will only include losses that are either - unconditional, or conditional on inputs to this model - (e.g. will not include losses that depend on tensors - that aren't inputs to this model). - - Returns: - A list of loss tensors. - """ - losses = [] - # Retrieve losses for all internal layers. - for layer in self.layers: - if hasattr(layer, 'losses'): - # Collect losses that are dependent on inputs - # that are part of the model. - for node_index, node in enumerate(layer._inbound_nodes): # pylint: disable=protected-access - node_key = _make_node_key(layer.name, node_index) - if node_key in self._network_nodes: - # The model owns this layer node. - inputs = node.input_tensors - losses += layer.get_losses_for(inputs) - # Collect unconditional losses. - losses += layer.get_losses_for(None) - # Add any potential unconditional model-level loss. - losses += self.get_losses_for(None) - return losses - - @property - def trainable_weights(self): - if not self.trainable: - return [] - weights = [] - for layer in self.layers: - weights += layer.trainable_weights - return weights - - @property - def non_trainable_weights(self): - weights = [] - for layer in self.layers: - weights += layer.non_trainable_weights - if not self.trainable: - trainable_weights = [] - for layer in self.layers: - trainable_weights += layer.trainable_weights - return trainable_weights + weights - return weights - - @property - def input_spec(self): - """Gets the network's input specs. - - Returns: - A list of `InputSpec` instances (one per input to the model) - or a single instance if the model has only one input. - """ - specs = [] - for layer in self._input_layers: - if layer.input_spec is None: - specs.append(None) - else: - if not isinstance(layer.input_spec, list): - raise TypeError('Layer ' + layer.name + - ' has an input_spec attribute that ' - 'is not a list. We expect a list. ' - 'Found input_spec = ' + str(layer.input_spec)) - specs += layer.input_spec - if len(specs) == 1: - return specs[0] - return specs - - def call(self, inputs, mask=None): - """Call the model on new inputs. - - In this case `call` just reapplies - all ops in the graph to the new inputs - (e.g. build a new computational graph from the provided inputs). - - Arguments: - inputs: A tensor or list of tensors. - mask: A mask or list of masks. A mask can be - either a tensor or None (no mask). - - Returns: - A tensor if there is a single output, or - a list of tensors if there are more than one outputs. - """ - inputs = nest.flatten(inputs) - if mask is None: - masks = [None for _ in range(len(inputs))] - else: - masks = nest.flatten(mask) - - if context.in_graph_mode(): - # Try to retrieve cached outputs if the layer has already been called - # on these exact inputs. - cache_key = _object_list_uid(inputs) + '_' + _object_list_uid(masks) - if cache_key in self._output_tensor_cache: - # Cache hit. - return self._output_tensor_cache[cache_key] - # Actually apply the network graph to the new inputs. - outputs, _ = self._run_internal_graph(inputs, masks) - return outputs - - def _compute_output_shape(self, input_shape): - if isinstance(input_shape, list): - input_shapes = [] - for shape in input_shape: - if shape is not None: - input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list())) - else: - input_shapes.append(None) - else: - if input_shape is not None: - input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())] - else: - input_shapes = [None] - - if len(input_shapes) != len(self._input_layers): - raise ValueError('Invalid input_shape argument ' + str(input_shape) + - ': model has ' + str(len(self._input_layers)) + - ' tensor inputs.') - - cache_key = _object_list_uid(input_shapes) - if cache_key not in self._output_shape_cache: - # Cache miss. We have to run the network graph manually (recursive calls - # to `_compute_output_shape`). - layers_to_output_shapes = {} - for i in range(len(input_shapes)): - layer = self._input_layers[i] - input_shape = input_shapes[i] - # It's an input layer: then `_compute_output_shape` is identity, - # and there is only one node and one tensor output. - shape_key = layer.name + '_0_0' - layers_to_output_shapes[shape_key] = input_shape - - depth_keys = list(self._nodes_by_depth.keys()) - depth_keys.sort(reverse=True) - # Iterate over nodes, by depth level. - if len(depth_keys) > 1: - for depth in depth_keys: - nodes = self._nodes_by_depth[depth] - for node in nodes: - # This is always a single layer, never a list. - layer = node.outbound_layer - if layer in self._input_layers: - # We've already covered the input layers - # a few lines above. - continue - # Potentially redundant list, - # same size as node.input_tensors. - input_shapes = [] - for j in range(len(node.inbound_layers)): - inbound_layer = node.inbound_layers[j] - node_index = node.node_indices[j] - tensor_index = node.tensor_indices[j] - shape_key = inbound_layer.name + '_%s_%s' % (node_index, - tensor_index) - input_shape = layers_to_output_shapes[shape_key] - input_shapes.append(input_shape) - - if len(input_shapes) == 1: - output_shape = layer._compute_output_shape(input_shapes[0]) # pylint: disable=protected-access - else: - output_shape = layer._compute_output_shape(input_shapes) # pylint: disable=protected-access - if isinstance(output_shape, list): - output_shapes = [ - tuple(tensor_shape.TensorShape(shape).as_list()) - for shape in output_shape - ] - else: - output_shapes = [ - tuple(tensor_shape.TensorShape(output_shape).as_list()) - ] - - node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access - for j in range(len(output_shapes)): - shape_key = layer.name + '_%s_%s' % (node_index, j) - layers_to_output_shapes[shape_key] = output_shapes[j] - - # Read final output shapes from layers_to_output_shapes. - output_shapes = [] - for i in range(len(self._output_layers)): - layer, node_index, tensor_index = self._output_coordinates[i] - shape_key = layer.name + '_%s_%s' % (node_index, tensor_index) - output_shapes.append(layers_to_output_shapes[shape_key]) - - # Store in cache. - self._output_shape_cache[cache_key] = output_shapes - else: - # Cache hit. - output_shapes = self._output_shape_cache[cache_key] - - if isinstance(output_shapes, list): - if len(output_shapes) == 1: - return tensor_shape.TensorShape(output_shapes[0]) - else: - return [tensor_shape.TensorShape(shape) for shape in output_shapes] - else: - return tensor_shape.TensorShape(output_shapes) - - def _run_internal_graph(self, inputs, masks=None): - """Computes output tensors for new inputs. - - # Note: - - Expects `inputs` to be a list (potentially with 1 element). - - Can be run on non-Keras tensors. - - Arguments: - inputs: List of tensors - masks: List of masks (tensors or None). - - Returns: - Three lists: output_tensors, output_masks, output_shapes - """ - # Note: masking support is relevant mainly for Keras. - # It cannot be factored out without having the fully reimplement the - # network calling logic on the Keras side. We choose to incorporate it - # in Network because 1) it may be useful to fully support in tf.layers in - # the future and 2) Keras is a major user of Network. - # If you don't use masking, it does not interfere with regular behavior - # at all and you can ignore it. - if masks is None: - masks = [None for _ in range(len(inputs))] - - # Dictionary mapping reference tensors to tuples - # (computed tensor, compute mask) - # we assume a 1:1 mapping from tensor to mask - # TODO(fchollet): raise exception when a `.compute_mask()` call - # does not return a list the same size as `call` - tensor_map = {} - for x, y, mask in zip(self.inputs, inputs, masks): - tensor_map[str(id(x))] = (y, mask) - - depth_keys = list(self._nodes_by_depth.keys()) - depth_keys.sort(reverse=True) - for depth in depth_keys: - nodes = self._nodes_by_depth[depth] - for node in nodes: - # This is always a single layer, never a list. - layer = node.outbound_layer - - reference_input_tensors = node.input_tensors - reference_output_tensors = node.output_tensors - - # If all previous input tensors are available in tensor_map, - # then call node.inbound_layer on them. - computed_data = [] # List of tuples (input, mask). - for x in reference_input_tensors: - if str(id(x)) in tensor_map: - computed_data.append(tensor_map[str(id(x))]) - - if len(computed_data) == len(reference_input_tensors): - # Call layer (reapplying ops to new inputs). - with ops.name_scope(layer.name): - if node.arguments: - kwargs = node.arguments - else: - kwargs = {} - if len(computed_data) == 1: - computed_tensor, computed_mask = computed_data[0] - # Ensure mask propagation if applicable. - if 'mask' in estimator_util.fn_args(layer.call): - if 'mask' not in kwargs: - kwargs['mask'] = computed_mask - - output_tensors = nest.flatten( - layer.call(computed_tensor, **kwargs)) - if hasattr(layer, 'compute_mask'): - output_masks = nest.flatten( - layer.compute_mask(computed_tensor, computed_mask)) - else: - output_masks = [None for _ in range(len(output_tensors))] - computed_tensors = [computed_tensor] - computed_masks = [computed_mask] - else: - computed_tensors = [x[0] for x in computed_data] - computed_masks = [x[1] for x in computed_data] - if 'mask' in estimator_util.fn_args(layer.call): - if 'mask' not in kwargs: - kwargs['mask'] = computed_masks - output_tensors = nest.flatten( - layer.call(computed_tensors, **kwargs)) - if hasattr(layer, 'compute_mask'): - output_masks = nest.flatten( - layer.compute_mask(computed_tensors, computed_masks)) - else: - output_masks = [None for _ in range(len(output_tensors))] - - # Apply activity regularizer if any: - if layer.activity_regularizer is not None: - regularization_losses = [ - layer.activity_regularizer(x) for x in computed_tensors - ] - layer.add_loss(regularization_losses, computed_tensors) - - if context.in_graph_mode(): - # Update model updates and losses: - # Keep track of updates that depend on the inputs - # (e.g. BN updates). - self.add_update(layer.get_updates_for(computed_tensors), inputs) - # Keep track of unconditional updates (e.g. a counter). - self.add_update(layer.get_updates_for(None), None) - # Keep track of losses that depend on the inputs - # (e.g. activity regularizers). - self.add_loss(layer.get_losses_for(computed_tensors), inputs) - # Keep track of unconditional losses - # (e.g. weight regularizers). - self.add_loss(layer.get_losses_for(None), None) - - # Update tensor_map. - for x, y, mask in zip(reference_output_tensors, output_tensors, - output_masks): - tensor_map[str(id(x))] = (y, mask) - - output_tensors = [] - output_masks = [] - output_shapes = [] - for x in self.outputs: - assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x) - tensor, mask = tensor_map[str(id(x))] - output_shapes.append(_static_shape(x)) - output_tensors.append(tensor) - output_masks.append(mask) - - if len(output_tensors) == 1: - output_tensors = output_tensors[0] - if output_shapes is not None: - output_shapes = output_shapes[0] - if output_masks is not None: - output_masks = output_masks[0] - - if context.in_graph_mode(): - # Update cache; - # keys are based on ids on input tensors and inputs masks. - cache_key = _object_list_uid(inputs) + '_' + _object_list_uid(masks) - self._output_tensor_cache[cache_key] = output_tensors - if output_masks is not None: - self._output_mask_cache[cache_key] = output_masks - if output_shapes is not None: - input_shapes = [_static_shape(x) for x in inputs] - cache_key = _object_list_uid(input_shapes) - self._output_shape_cache[cache_key] = output_shapes - - return output_tensors, output_masks - - def _is_tensor_or_tensor_list(v): v = nest.flatten(v) if v and isinstance(v[0], ops.Tensor): @@ -2297,24 +1378,6 @@ def _add_elements_to_collection(elements, collection_list): collection.append(element) -def _object_list_uid(object_list): - object_list = nest.flatten(object_list) - return ', '.join([str(abs(id(x))) for x in object_list]) - - -def _make_node_key(layer_name, node_index): - return layer_name + '_ib-' + str(node_index) - - -def _static_shape(x): - if x is None: - return None - try: - return tuple(x.get_shape().as_list()) - except ValueError: - return None - - def _is_all_none(iterable_or_element): if not isinstance(iterable_or_element, (list, tuple)): iterable = [iterable_or_element] diff --git a/tensorflow/python/layers/base_test.py b/tensorflow/python/layers/base_test.py index 509ad5a7afb..1eea20deefe 100644 --- a/tensorflow/python/layers/base_test.py +++ b/tensorflow/python/layers/base_test.py @@ -20,8 +20,6 @@ from __future__ import print_function import copy -import numpy as np - from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes @@ -33,7 +31,6 @@ from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops -from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test @@ -431,115 +428,6 @@ class BaseLayerTest(test.TestCase): layer.apply(array_ops.placeholder('int32')) layer.apply(array_ops.placeholder('int32', shape=(2, 3))) - def test_get_updates_for(self): - a = base_layers.Input(shape=(2,)) - dense_layer = core_layers.Dense(1) - dense_layer.add_update(0, inputs=a) - dense_layer.add_update(1, inputs=None) - - self.assertEqual(dense_layer.get_updates_for(a), [0]) - self.assertEqual(dense_layer.get_updates_for(None), [1]) - - def test_get_losses_for(self): - a = base_layers.Input(shape=(2,)) - dense_layer = core_layers.Dense(1) - dense_layer.add_loss(0, inputs=a) - dense_layer.add_loss(1, inputs=None) - - self.assertEqual(dense_layer.get_losses_for(a), [0]) - self.assertEqual(dense_layer.get_losses_for(None), [1]) - - def testTopologicalAttributes(self): - # test layer attributes / methods related to cross-layer connectivity. - a = base_layers.Input(shape=(32,), name='input_a') - b = base_layers.Input(shape=(32,), name='input_b') - - # test input, output, input_shape, output_shape - test_layer = core_layers.Dense(16, name='test_layer') - a_test = test_layer(a) - self.assertEqual(test_layer.input, a) - self.assertEqual(test_layer.output, a_test) - self.assertEqual(test_layer.input_shape, (None, 32)) - self.assertEqual(test_layer.output_shape, (None, 16)) - - # test `get_*_at` methods - dense = core_layers.Dense(16, name='dense_1') - a_2 = dense(a) - b_2 = dense(b) - - self.assertEqual(dense.get_input_at(0), a) - self.assertEqual(dense.get_input_at(1), b) - self.assertEqual(dense.get_output_at(0), a_2) - self.assertEqual(dense.get_output_at(1), b_2) - self.assertEqual(dense.get_input_shape_at(0), (None, 32)) - self.assertEqual(dense.get_input_shape_at(1), (None, 32)) - self.assertEqual(dense.get_output_shape_at(0), (None, 16)) - self.assertEqual(dense.get_output_shape_at(1), (None, 16)) - - # Test invalid value for attribute retrieval. - with self.assertRaises(ValueError): - dense.get_input_at(2) - with self.assertRaises(AttributeError): - new_dense = core_layers.Dense(16) - _ = new_dense.input - with self.assertRaises(AttributeError): - new_dense = core_layers.Dense(16) - _ = new_dense.output - with self.assertRaises(AttributeError): - new_dense = core_layers.Dense(16) - _ = new_dense.output_shape - with self.assertRaises(AttributeError): - new_dense = core_layers.Dense(16) - _ = new_dense.input_shape - with self.assertRaises(AttributeError): - new_dense = core_layers.Dense(16) - a = base_layers.Input(shape=(3, 32)) - a = base_layers.Input(shape=(5, 32)) - a_2 = dense(a) - b_2 = dense(b) - _ = new_dense.input_shape - with self.assertRaises(AttributeError): - new_dense = core_layers.Dense(16) - a = base_layers.Input(shape=(3, 32)) - a = base_layers.Input(shape=(5, 32)) - a_2 = dense(a) - b_2 = dense(b) - _ = new_dense.output_shape - - def testTopologicalAttributesMultiOutputLayer(self): - - class PowersLayer(base_layers.Layer): - - def call(self, inputs): - return [inputs**2, inputs**3] - - x = base_layers.Input(shape=(32,)) - test_layer = PowersLayer() - p1, p2 = test_layer(x) # pylint: disable=not-callable - - self.assertEqual(test_layer.input, x) - self.assertEqual(test_layer.output, [p1, p2]) - self.assertEqual(test_layer.input_shape, (None, 32)) - self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)]) - - def testTopologicalAttributesMultiInputLayer(self): - - class AddLayer(base_layers.Layer): - - def call(self, inputs): - assert len(inputs) == 2 - return inputs[0] + inputs[1] - - a = base_layers.Input(shape=(32,)) - b = base_layers.Input(shape=(32,)) - test_layer = AddLayer() - y = test_layer([a, b]) # pylint: disable=not-callable - - self.assertEqual(test_layer.input, [a, b]) - self.assertEqual(test_layer.output, y) - self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)]) - self.assertEqual(test_layer.output_shape, (None, 32)) - @test_util.run_in_graph_and_eager_modes() def test_count_params(self): dense = core_layers.Dense(16) @@ -582,383 +470,5 @@ class BaseLayerTest(test.TestCase): self.assertEqual(len(layer.get_losses_for(x)), 1) -class NetworkTest(test.TestCase): - - def testBasicNetwork(self): - # minimum viable network - x = base_layers.Input(shape=(32,)) - dense = core_layers.Dense(2) - y = dense(x) - network = base_layers.Network(x, y, name='dense_network') - - # test basic attributes - self.assertEqual(network.name, 'dense_network') - self.assertEqual(len(network.layers), 2) # InputLayer + Dense - self.assertEqual(network.layers[1], dense) - self.assertEqual(network.weights, dense.weights) - self.assertEqual(network.trainable_weights, dense.trainable_weights) - self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights) - - # test callability on Input - x_2 = base_layers.Input(shape=(32,)) - y_2 = network(x_2) - self.assertEqual(y_2.get_shape().as_list(), [None, 2]) - - # test callability on regular tensor - x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32)) - y_2 = network(x_2) - self.assertEqual(y_2.get_shape().as_list(), [None, 2]) - - # test network `trainable` attribute - network.trainable = False - self.assertEqual(network.weights, dense.weights) - self.assertEqual(network.trainable_weights, []) - self.assertEqual(network.non_trainable_weights, - dense.trainable_weights + dense.non_trainable_weights) - - def test_node_construction(self): - # test graph topology construction basics - a = base_layers.Input(shape=(32,), name='input_a') - b = base_layers.Input(shape=(32,), name='input_b') - - self.assertEqual(a.get_shape().as_list(), [None, 32]) - a_layer, a_node_index, a_tensor_index = a._keras_history - b_layer, _, _ = b._keras_history - self.assertEqual(len(a_layer._inbound_nodes), 1) - self.assertEqual(a_tensor_index, 0) - node = a_layer._inbound_nodes[a_node_index] - self.assertEqual(node.outbound_layer, a_layer) - - self.assertEqual(node.inbound_layers, []) - self.assertEqual(node.input_tensors, [a]) - self.assertEqual(node.input_shapes, [(None, 32)]) - self.assertEqual(node.output_tensors, [a]) - self.assertEqual(node.output_shapes, [(None, 32)]) - - dense = core_layers.Dense(16, name='dense_1') - dense(a) - dense(b) - - self.assertEqual(len(dense._inbound_nodes), 2) - self.assertEqual(len(dense._outbound_nodes), 0) - self.assertEqual(dense._inbound_nodes[0].inbound_layers, [a_layer]) - self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense) - self.assertEqual(dense._inbound_nodes[1].inbound_layers, [b_layer]) - self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense) - self.assertEqual(dense._inbound_nodes[0].input_tensors, [a]) - self.assertEqual(dense._inbound_nodes[1].input_tensors, [b]) - - # Test config - config_0 = dense._inbound_nodes[0].get_config() - self.assertEqual(config_0['outbound_layer'], dense.name) - - def testMultiInputNetwork(self): - a = base_layers.Input(shape=(32,), name='input_a') - b = base_layers.Input(shape=(32,), name='input_b') - - class AddLayer(base_layers.Layer): - - def call(self, inputs): - assert len(inputs) == 2 - return inputs[0] + inputs[1] - - c = AddLayer()([a, b]) # pylint: disable=not-callable - network = base_layers.Network([a, b], c) - self.assertEqual(len(network.layers), 3) # 2 * InputLayer + AddLayer - - # Test callability. - a2 = base_layers.Input(shape=(32,)) - b2 = base_layers.Input(shape=(32,)) - c2 = network([a2, b2]) - self.assertEqual(c2.get_shape().as_list(), [None, 32]) - - def testMultiOutputNetwork(self): - x = base_layers.Input(shape=(32,)) - y1 = core_layers.Dense(2)(x) - y2 = core_layers.Dense(3)(x) - network = base_layers.Network(x, [y1, y2]) - - self.assertEqual(len(network.layers), 3) # InputLayer + 2 * Dense - - # Test callability. - x2 = base_layers.Input(shape=(32,)) - outputs = network(x2) - - self.assertEqual(type(outputs), list) - self.assertEqual(len(outputs), 2) - self.assertEqual(outputs[0].get_shape().as_list(), [None, 2]) - self.assertEqual(outputs[1].get_shape().as_list(), [None, 3]) - - def testMultiInputMultiOutputNetworkSharedLayer(self): - a = base_layers.Input(shape=(32,), name='input_a') - b = base_layers.Input(shape=(32,), name='input_b') - - dense = core_layers.Dense(2) - - y1 = dense(a) - y2 = dense(b) - network = base_layers.Network([a, b], [y1, y2]) - self.assertEqual(len(network.layers), 3) # 2 * InputLayer + Dense - - # Test callability. - a2 = base_layers.Input(shape=(32,)) - b2 = base_layers.Input(shape=(32,)) - outputs = network([a2, b2]) - - self.assertEqual(type(outputs), list) - self.assertEqual(len(outputs), 2) - self.assertEqual(outputs[0].get_shape().as_list(), [None, 2]) - self.assertEqual(outputs[1].get_shape().as_list(), [None, 2]) - - def testCrossDataFlows(self): - # Test the ability to have multi-output layers with outputs that get routed - # to separate layers - - class PowersLayer(base_layers.Layer): - - def call(self, inputs): - return [inputs**2, inputs**3] - - x = base_layers.Input(shape=(32,)) - p1, p2 = PowersLayer()(x) # pylint: disable=not-callable - y1 = core_layers.Dense(2)(p1) - y2 = core_layers.Dense(3)(p2) - network = base_layers.Network(x, [y1, y2]) - - self.assertEqual(len(network.layers), 4) # InputLayer + 2 * Dense + PLayer - - # Test callability. - x2 = base_layers.Input(shape=(32,)) - outputs = network(x2) - - self.assertEqual(type(outputs), list) - self.assertEqual(len(outputs), 2) - self.assertEqual(outputs[0].get_shape().as_list(), [None, 2]) - self.assertEqual(outputs[1].get_shape().as_list(), [None, 3]) - - def testNetworkAttributes(self): - x = base_layers.Input(shape=(32,)) - z = core_layers.Dense(2, kernel_regularizer=lambda x: 0.01 * (x**2))(x) - dense = core_layers.Dense(2, name='dense') - dense.add_update(1) - y = dense(z) - net = base_layers.Network(x, y) - - # losses - self.assertEqual(len(net.losses), 1) - - # updates - self.assertEqual(len(net.updates), 1) - - # get_layer - self.assertEqual(net.get_layer('dense'), dense) - self.assertEqual(net.get_layer(index=2), dense) - with self.assertRaises(ValueError): - net.get_layer('dense_unknown') - with self.assertRaises(ValueError): - net.get_layer() - with self.assertRaises(ValueError): - net.get_layer(index=4) - - # input, output - self.assertEqual(net.input, x) - self.assertEqual(net.output, y) - - # input_shape, output_shape - self.assertEqual(net.input_shape, (None, 32)) - self.assertEqual(net.output_shape, (None, 2)) - - # get_*_at - self.assertEqual(net.get_input_at(0), x) - self.assertEqual(net.get_output_at(0), y) - - # _compute_output_shape - self.assertEqual(net._compute_output_shape((3, 32)).as_list(), [3, 2]) - - def testInvalidNetworks(self): - # redundant inputs - x = base_layers.Input(shape=(32,)) - y = core_layers.Dense(2)(x) - with self.assertRaises(ValueError): - base_layers.Network([x, x], y) - - # inputs that don't come from Input - x = array_ops.placeholder(dtype='float32', shape=(None, 32)) - y = core_layers.Dense(2)(x) - with self.assertRaises(ValueError): - base_layers.Network(x, y) - - # inputs that don't come from Input but have a layer history - x = base_layers.Input(shape=(32,)) - x = core_layers.Dense(32)(x) - y = core_layers.Dense(2)(x) - with self.assertRaises(ValueError): - base_layers.Network(x, y) - - # outputs that don't come from layers - x = base_layers.Input(shape=(32,)) - y = core_layers.Dense(2)(x) - y = 2 * y - with self.assertRaises(ValueError): - base_layers.Network(x, y) - - # disconnected graphs - x1 = base_layers.Input(shape=(32,)) - x2 = base_layers.Input(shape=(32,)) - y = core_layers.Dense(2)(x1) - with self.assertRaises(ValueError): - base_layers.Network(x2, y) - - # redundant layer names - x = base_layers.Input(shape=(32,)) - z = core_layers.Dense(2, name='dense')(x) - y = core_layers.Dense(2, name='dense')(z) - with self.assertRaises(ValueError): - base_layers.Network(x, y) - - def testInputTensorWrapping(self): - x = array_ops.placeholder(dtype='float32', shape=(None, 32)) - x = base_layers.Input(tensor=x) - y = core_layers.Dense(2)(x) - base_layers.Network(x, y) - - def testExplicitBatchSize(self): - x = base_layers.Input(shape=(32,), batch_size=3) - y = core_layers.Dense(2)(x) - self.assertEqual(y.get_shape().as_list(), [3, 2]) - - def testNetworkRecursion(self): - # test the ability of networks to be used as layers inside networks. - a = base_layers.Input(shape=(32,)) - b = core_layers.Dense(2)(a) - net = base_layers.Network(a, b) - - c = base_layers.Input(shape=(32,)) - d = net(c) - - recursive_net = base_layers.Network(c, d) - self.assertEqual(len(recursive_net.layers), 2) - self.assertEqual(recursive_net.layers[1], net) - self.assertEqual(len(recursive_net.weights), 2) - - # test callability - x = array_ops.placeholder(dtype='float32', shape=(None, 32)) - y = recursive_net(x) - self.assertEqual(y.get_shape().as_list(), [None, 2]) - - def testSparseInput(self): - - class SparseSoftmax(base_layers.Layer): - - def call(self, inputs): - return sparse_ops.sparse_softmax(inputs) - - x = base_layers.Input(shape=(32,), sparse=True) - y = SparseSoftmax()(x) # pylint: disable=not-callable - network = base_layers.Network(x, y) - - self.assertEqual(len(network.layers), 2) - self.assertEqual(network.layers[0].sparse, True) - - @test_util.run_in_graph_and_eager_modes() - def testMaskingSingleInput(self): - - class MaskedLayer(base_layers.Layer): - - def call(self, inputs, mask=None): - if mask is not None: - return inputs * mask - return inputs - - def compute_mask(self, inputs, mask=None): - return array_ops.ones_like(inputs) - - if context.in_graph_mode(): - x = base_layers.Input(shape=(32,)) - y = MaskedLayer()(x) # pylint: disable=not-callable - network = base_layers.Network(x, y) - - # test callability on Input - x_2 = base_layers.Input(shape=(32,)) - y_2 = network(x_2) - self.assertEqual(y_2.get_shape().as_list(), [None, 32]) - - # test callability on regular tensor - x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32)) - y_2 = network(x_2) - self.assertEqual(y_2.get_shape().as_list(), [None, 32]) - else: - a = constant_op.constant([2] * 32) - mask = constant_op.constant([0, 1] * 16) - a._keras_mask = mask - b = MaskedLayer().apply(a) - self.assertTrue(hasattr(b, '_keras_mask')) - self.assertAllEqual(self.evaluate(array_ops.ones_like(mask)), - self.evaluate(getattr(b, '_keras_mask'))) - self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b)) - - -class DeferredModeTest(test.TestCase): - - def testDeferredTensorAttributes(self): - x = base_layers._DeferredTensor(shape=(None, 2), dtype='float32', name='x') - self.assertEqual(str(x), - 'DeferredTensor(\'x\', shape=(?, 2), dtype=float32)') - self.assertEqual(repr(x), - '<_DeferredTensor \'x\' shape=(?, 2) dtype=float32>') - - @test_util.run_in_graph_and_eager_modes() - def testSimpleNetworkBuilding(self): - inputs = base_layers.Input(shape=(32,)) - if context.in_eager_mode(): - self.assertIsInstance(inputs, base_layers._DeferredTensor) - self.assertEqual(inputs.dtype.name, 'float32') - self.assertEqual(inputs.shape.as_list(), [None, 32]) - - x = core_layers.Dense(2)(inputs) - if context.in_eager_mode(): - self.assertIsInstance(x, base_layers._DeferredTensor) - self.assertEqual(x.dtype.name, 'float32') - self.assertEqual(x.shape.as_list(), [None, 2]) - - outputs = core_layers.Dense(4)(x) - network = base_layers.Network(inputs, outputs) - self.assertIsInstance(network, base_layers.Network) - - if context.in_eager_mode(): - # It should be possible to call such a network on EagerTensors. - inputs = constant_op.constant( - np.random.random((10, 32)).astype('float32')) - outputs = network(inputs) - self.assertEqual(outputs.shape.as_list(), [10, 4]) - - @test_util.run_in_graph_and_eager_modes() - def testMultiIONetworkbuilding(self): - input_a = base_layers.Input(shape=(32,)) - input_b = base_layers.Input(shape=(16,)) - a = core_layers.Dense(16)(input_a) - - class AddLayer(base_layers.Layer): - - def call(self, inputs): - return inputs[0] + inputs[1] - - def _compute_output_shape(self, input_shape): - return input_shape[0] - - c = AddLayer()([a, input_b]) # pylint: disable=not-callable - c = core_layers.Dense(2)(c) - - network = base_layers.Network([input_a, input_b], [a, c]) - if context.in_eager_mode(): - a_val = constant_op.constant( - np.random.random((10, 32)).astype('float32')) - b_val = constant_op.constant( - np.random.random((10, 16)).astype('float32')) - outputs = network([a_val, b_val]) - self.assertEqual(len(outputs), 2) - self.assertEqual(outputs[0].shape.as_list(), [10, 16]) - self.assertEqual(outputs[1].shape.as_list(), [10, 2]) - if __name__ == '__main__': test.main() diff --git a/tensorflow/python/layers/layers.py b/tensorflow/python/layers/layers.py index d3f532e79c1..0a52b1e8d92 100644 --- a/tensorflow/python/layers/layers.py +++ b/tensorflow/python/layers/layers.py @@ -65,8 +65,8 @@ from tensorflow.python.util.all_util import remove_undocumented # Base objects. from tensorflow.python.layers.base import Layer -from tensorflow.python.layers.base import Input from tensorflow.python.layers.base import InputSpec +from tensorflow.python.layers.network import Input # Core layers. from tensorflow.python.layers.core import Dense diff --git a/tensorflow/python/layers/network.py b/tensorflow/python/layers/network.py new file mode 100644 index 00000000000..9a33a5c7269 --- /dev/null +++ b/tensorflow/python/layers/network.py @@ -0,0 +1,957 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""Contains Network, a composition of layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +from tensorflow.python.eager import context +from tensorflow.python.estimator import util as estimator_util +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.framework import tensor_shape +from tensorflow.python.layers import base +from tensorflow.python.layers import utils as layers_util +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import variable_scope as vs +from tensorflow.python.platform import tf_logging as logging +from tensorflow.python.util import nest + + +class InputLayer(base.Layer): + """Layer to be used as an entry point into a Network (a graph of layers). + + It can either wrap an existing tensor (pass an `input_tensor` argument) + or create its a placeholder tensor (pass arguments `input_shape` + as well as `dtype`). + + It is generally recommend to use the functional layer API via `Input`, + (which creates an `InputLayer`) without directly using `InputLayer`. + + Arguments: + input_shape: Shape tuple (not including the batch axis), or `TensorShape` + instance (not including the batch axis). + batch_size: Optional input batch size (integer or None). + dtype: Datatype of the input. + input_tensor: Optional tensor to use as layer input + instead of creating a placeholder. + sparse: Boolean, whether the placeholder created + is meant to be sparse. + name: Name of the layer (string). + + Raises: + RuntimeError: If created in Eager mode. + """ + + def __init__(self, + input_shape=None, + batch_size=None, + dtype=dtypes.float32, + input_tensor=None, + sparse=False, + name=None): + super(InputLayer, self).__init__(dtype=dtype, name=name) + self.built = True + self.sparse = sparse + self.batch_size = batch_size + + if isinstance(input_shape, tensor_shape.TensorShape): + input_shape = tuple(input_shape.as_list()) + + if input_tensor is None: + if input_shape is not None: + batch_input_shape = (batch_size,) + tuple(input_shape) + else: + batch_input_shape = None + + if context.in_eager_mode(): + # In eager mode, create a temporary placeholder to call the layer on. + input_tensor = base._DeferredTensor( # pylint: disable=protected-access + shape=batch_input_shape, + dtype=dtype, + name=self.name) + else: + # In graph mode, create a graph placeholder to call the layer on. + if sparse: + input_tensor = array_ops.sparse_placeholder( + shape=batch_input_shape, + dtype=dtype, + name=self.name) + else: + input_tensor = array_ops.placeholder( + shape=batch_input_shape, + dtype=dtype, + name=self.name) + + # For compatibility with Keras API. + self.is_placeholder = True + self._batch_input_shape = batch_input_shape + else: + # For compatibility with Keras API. + self.is_placeholder = False + self._batch_input_shape = tuple(input_tensor.get_shape().as_list()) + + # Create an input node to add to self.outbound_node + # and set output_tensors' _keras_history. + input_tensor._keras_history = (self, 0, 0) # pylint: disable=protected-access + base.Node( + self, + inbound_layers=[], + node_indices=[], + tensor_indices=[], + input_tensors=[input_tensor], + output_tensors=[input_tensor]) + + +def Input( # pylint: disable=invalid-name + shape=None, + batch_size=None, + name=None, + dtype=dtypes.float32, + sparse=False, + tensor=None): + """`Input()` is used to instantiate an input tensor for use with a `Network`. + + For instance, if a, b and c are tensors created via `Input`, + it becomes possible to do: + + `network = Network(inputs=[a, b], outputs=c)` + + Example: + + ```python + # This is a logistic regression + x = tf.layers.Input(shape=(32,)) + y = tf.layers.Dense(16, activation='softmax')(x) + network = tf.layers.Network(x, y) + ``` + + Arguments: + shape: A shape tuple (integer), not including the batch size. + For instance, `shape=(32,)` indicates that the expected input + will be batches of 32-dimensional vectors. + batch_size: Optional input batch size (integer or None). + name: An optional name string for the layer. + Should be unique in a model (do not reuse the same name twice). + It will be autogenerated if it isn't provided. + dtype: The data type expected by the input, as a string + (`float32`, `float64`, `int32`...) + sparse: A boolean specifying whether the placeholder + to be created is sparse. + tensor: Optional existing tensor to wrap into the `Input` layer. + If set, the layer will not create a placeholder tensor. + + Returns: + A tensor: either a new placeholder (with history metadata) or + `tensor` (if passed), with added history metadata. + + Raises: + RuntimeError: If called in Eager mode. + """ + input_layer = InputLayer( + input_shape=shape, + batch_size=batch_size, + name=name, + dtype=dtype, + sparse=sparse, + input_tensor=tensor) + # Return tensor including `_keras_history` metadata. + # Note that in this case train_output and test_output are the same pointer. + outputs = input_layer._inbound_nodes[0].output_tensors # pylint: disable=protected-access + if len(outputs) == 1: + return outputs[0] + else: + return outputs + + +class GraphNetwork(base.Layer): + """A GraphNetwork is a directed acyclic graph of layers. + + It is the topological form of a "model". + A Model is simply a GraphNetwork with added training/evaluation routines. + + A GraphNetwork instance implements the full Layer API. In particular, a + GraphNetwork can be called on new inputs. + + Example: + + ```python + # This is a logistic regression + x = tf.layers.Input(shape=(32,)) + y = tf.layers.Dense(16, activation='softmax')(x) + network = tf.layers.GraphNetwork(x, y) + + # It is then possible to call the network on compatible inputs: + z = tf.layers.Input(shape=(32,)) + w = network(z) + + # It is possible to retrieve the same properties as a layer: + weights = network.trainable_weights + ``` + + Arguments: + inputs: Input tensor or list of input tensors. + Must come from `tf.layers.Input`. + output: Output tensor or list of output tensors. Must come from + tf.layers Layers or Keras layers. + name: Optional name of the model (string). + + Attributes: + GraphNetwork has the same attributes as Layer. On top of it, it also has: + - layers: a list of the children layers of the network, + a list of layer instances, ordered from "earlier in the graph" + to "later in the graph". + + Methods: + GraphNetwork has the same methods as Layer. On top of it, it also has: + - get_layer: retrieves a child layer by name or index in the graph. + + Raises: + RuntimeError: If created in Eager mode. + """ + + def __init__(self, inputs, outputs, name=None): # pylint: disable=super-init-not-called + if context.in_eager_mode(): + # TODO(fchollet): check that all inputs and outputs are DeferredTensors. + pass + + self._init_set_name(name) + self._activity_regularizer = None + with vs.variable_scope( + None, default_name=self._base_name) as captured_scope: + self._scope = captured_scope + call_fn_args = estimator_util.fn_args(self.call) + self._compute_previous_mask = ('mask' in call_fn_args or + hasattr(self, 'compute_mask')) + self._call_has_scope_arg = 'scope' in call_fn_args + + # This acts just like the `trainable` attribute of any layer instance. + # It does not affect users of the underlying layers, only users of the + # GraphNetwork instance. + self.trainable = True + # A GraphNetwork does not create weights of its own, thus it is already + # built. + self.built = True + # A GraphNetwork does not create weights of its own, thus has no dtype. + self._dtype = None + # The following are implemented as property functions: + # self.trainable_weights + # self.non_trainable_weights + # self.input_spec + + # Private attributes to implement compatibility with Layer. + self._per_input_losses = {} + self._per_input_updates = {} + self._updates = [] + self._losses = [] + self._scope = None + self._reuse = None + self._graph = ops.get_default_graph() + + # GraphNetwork-specific properties. + if isinstance(inputs, (list, tuple)): + self.inputs = list(inputs) # Tensor or list of tensors. + else: + self.inputs = [inputs] + if isinstance(outputs, (list, tuple)): + self.outputs = list(outputs) + else: + self.outputs = [outputs] + # All layers in order of horizontal graph traversal. + # Entries are unique. Includes input and output layers. + self.layers = [] + + # Check for redundancy in inputs. + if len(set(self.inputs)) != len(self.inputs): + raise ValueError('The list of inputs passed to the model ' + 'is redundant. ' + 'All inputs should only appear once.' + ' Found: ' + str(self.inputs)) + + # # List of initial layers (1 to 1 mapping with self.inputs, + # # hence the same layer might appear twice) + # self._input_layers = [] + # self._input_layers_node_indices = [] + # self._input_layers_tensor_indices = [] + # # list of layers (1 to 1 mapping with self.inputs, + # # hence the same layer might appear twice) + # self._output_layers = [] + # self._output_layers_node_indices = [] + # self._output_layers_tensor_indices = [] + + self._input_layers = [] + self._output_layers = [] + self._input_coordinates = [] + self._output_coordinates = [] + + # This is for performance optimization when calling the GraphNetwork on new + # inputs. Every time the GraphNetwork is called on a set on input tensors, + # we compute the output tensors, output masks and output shapes in one pass, + # then cache them here. When any of these outputs is queried later, we + # retrieve it from there instead of recomputing it. + self._output_mask_cache = {} + self._output_tensor_cache = {} + self._output_shape_cache = {} + + # User-provided arguments validation. + for x in self.inputs: + # Check that x has appropriate `_keras_history` metadata. + if not hasattr(x, '_keras_history'): + cls_name = self.__class__.__name__ + raise ValueError('Input tensors to a ' + cls_name + ' ' + + 'must come from `tf.layers.Input`. ' + 'Received: ' + str(x) + + ' (missing previous layer metadata).') + # Check that x is an input tensor. + # pylint: disable=protected-access + layer, node_index, tensor_index = x._keras_history + if len(layer._inbound_nodes) > 1 or ( + layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers): + cls_name = self.__class__.__name__ + logging.warning(cls_name + ' inputs must come from ' + '`tf.layers.Input` (thus holding past layer metadata), ' + 'they cannot be the output of ' + 'a previous non-Input layer. ' + 'Here, a tensor specified as ' + 'input to "' + self.name + '" was not an Input tensor, ' + 'it was generated by layer ' + layer.name + '.\n' + 'Note that input tensors are ' + 'instantiated via `tensor = tf.layers.Input(shape)`.\n' + 'The tensor that caused the issue was: ' + str(x.name)) + # pylint: enable=protected-access + for x in self.outputs: + if not hasattr(x, '_keras_history'): + cls_name = self.__class__.__name__ + raise ValueError('Output tensors to a ' + cls_name + ' must be ' + 'the output of a TensorFlow `Layer` ' + '(thus holding past layer metadata). Found: ' + str(x)) + + # Build self._output_layers: + for x in self.outputs: + layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access + self._output_layers.append(layer) + self._output_coordinates.append((layer, node_index, tensor_index)) + + # Build self._input_layers: + for x in self.inputs: + layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access + # It's supposed to be an input layer, so only one node + # and one tensor output. + assert node_index == 0 + assert tensor_index == 0 + self._input_layers.append(layer) + self._input_coordinates.append((layer, node_index, tensor_index)) + + # Network_nodes: set of nodes included in the graph + # (not all nodes included in the layers + # are relevant to the current graph). + network_nodes = set() # ids of all nodes relevant to the GraphNetwork + nodes_depths = {} # dict {node: depth value} + layers_depths = {} # dict {layer: depth value} + layer_indices = {} # dict {layer: index in traversal} + nodes_in_decreasing_depth = [] + + def build_map_of_graph(tensor, + finished_nodes, + nodes_in_progress, + layer, + node_index, + tensor_index): + """Builds a map of the graph of layers. + + This recursively updates the map `layer_indices`, + the list `nodes_in_decreasing_depth` and the set `network_nodes`. + + Arguments: + tensor: Some tensor in a graph. + finished_nodes: Set of nodes whose subgraphs have been traversed + completely. Useful to prevent duplicated work. + nodes_in_progress: Set of nodes that are currently active on the + recursion stack. Useful to detect cycles. + layer: Layer from which `tensor` comes from. If not provided, + will be obtained from `tensor._keras_history`. + node_index: Node index from which `tensor` comes from. + tensor_index: Tensor_index from which `tensor` comes from. + + Raises: + ValueError: if a cycle is detected. + """ + node = layer._inbound_nodes[node_index] # pylint: disable=protected-access + + # Prevent cycles. + if node in nodes_in_progress: + raise ValueError('The tensor ' + str(tensor) + ' at layer "' + + layer.name + '" is part of a cycle.') + + # Don't repeat work for shared subgraphs + if node in finished_nodes: + return + + node_key = _make_node_key(layer.name, node_index) + # Update network_nodes. + network_nodes.add(node_key) + + # Store the traversal order for layer sorting. + if layer not in layer_indices: + layer_indices[layer] = len(layer_indices) + + nodes_in_progress.add(node) + + # Propagate to all previous tensors connected to this node. + for i in range(len(node.inbound_layers)): + x = node.input_tensors[i] + layer = node.inbound_layers[i] + node_index = node.node_indices[i] + tensor_index = node.tensor_indices[i] + build_map_of_graph(x, finished_nodes, nodes_in_progress, layer, + node_index, tensor_index) + + finished_nodes.add(node) + nodes_in_progress.remove(node) + nodes_in_decreasing_depth.append(node) + + finished_nodes = set() + nodes_in_progress = set() + for x in self.outputs: + layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access + build_map_of_graph(x, finished_nodes, nodes_in_progress, + layer=layer, + node_index=node_index, + tensor_index=tensor_index) + + for node in reversed(nodes_in_decreasing_depth): + # If the depth is not set, the node has no outbound nodes (depth 0). + depth = nodes_depths.setdefault(node, 0) + + # Update the depth of the corresponding layer + previous_depth = layers_depths.get(node.outbound_layer, 0) + # If we've seen this layer before at a higher depth, + # we should use that depth instead of the node depth. + # This is necessary for shared layers that have inputs at different + # depth levels in the graph. + depth = max(depth, previous_depth) + layers_depths[node.outbound_layer] = depth + nodes_depths[node] = depth + + # Update the depth of inbound nodes. + # The "depth" of a node is the max of the depths + # of all layers it is connected to. + for i in range(len(node.inbound_layers)): + inbound_layer = node.inbound_layers[i] + node_index = node.node_indices[i] + inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access + previous_depth = nodes_depths.get(inbound_node, 0) + nodes_depths[inbound_node] = max(depth + 1, previous_depth) + + # Build a dict {depth: list of nodes with this depth} + nodes_by_depth = {} + for node, depth in nodes_depths.items(): + if depth not in nodes_by_depth: + nodes_by_depth[depth] = [] + nodes_by_depth[depth].append(node) + + # Build a dict {depth: list of layers with this depth} + layers_by_depth = {} + for layer, depth in layers_depths.items(): + if depth not in layers_by_depth: + layers_by_depth[depth] = [] + layers_by_depth[depth].append(layer) + + # Get sorted list of layer depths. + depth_keys = list(layers_by_depth.keys()) + depth_keys.sort(reverse=True) + + # Set self.layers and self._layers_by_depth. + layers = [] + for depth in depth_keys: + layers_for_depth = layers_by_depth[depth] + # GraphNetwork.layers needs to have a deterministic order: + # here we order them by traversal order. + layers_for_depth.sort(key=lambda x: layer_indices[x]) + layers.extend(layers_for_depth) + self.layers = layers + self._layers_by_depth = layers_by_depth + + # Get sorted list of node depths. + depth_keys = list(nodes_by_depth.keys()) + depth_keys.sort(reverse=True) + + # Check that all tensors required are computable. + # computable_tensors: all tensors in the graph + # that can be computed from the inputs provided. + computable_tensors = [] + for x in self.inputs: + computable_tensors.append(x) + + layers_with_complete_input = [] # To provide a better error msg. + for depth in depth_keys: + for node in nodes_by_depth[depth]: + layer = node.outbound_layer + if layer: + for x in node.input_tensors: + if x not in computable_tensors: + raise ValueError('Graph disconnected: ' + 'cannot obtain value for tensor ' + str(x) + + ' at layer "' + layer.name + '". ' + 'The following previous layers ' + 'were accessed without issue: ' + + str(layers_with_complete_input)) + for x in node.output_tensors: + computable_tensors.append(x) + layers_with_complete_input.append(layer.name) + + # Keep track of the network's nodes. + self._network_nodes = network_nodes + self._nodes_by_depth = nodes_by_depth + + # Ensure name unicity, which will be crucial for serialization + # (since serialized nodes refer to layers by their name). + all_names = [layer.name for layer in self.layers] + for name in all_names: + if all_names.count(name) != 1: + raise ValueError('The name "' + name + '" is used ' + + str(all_names.count(name)) + ' times in the model. ' + 'All layer names should be unique.') + + # Layer parameters. + # The new network starts with a single inbound node + # for its inputs, and no outbound nodes. + self._outbound_nodes = [] # Will be appended to by future calls to __call__ + self._inbound_nodes = [ + ] # Will be appended to below, and by future calls to __call__ + # Create the node linking internal inputs to internal outputs. + base.Node( + outbound_layer=self, + inbound_layers=[], + node_indices=[], + tensor_indices=[], + input_tensors=self.inputs, + output_tensors=self.outputs) + + def get_layer(self, name=None, index=None): + """Retrieves a layer based on either its name (unique) or index. + + Indices are based on order of horizontal graph traversal (bottom-up). + + Arguments: + name: String, name of layer. + index: Integer, index of layer. + + Returns: + A layer instance. + + Raises: + ValueError: In case of invalid layer name or index. + """ + # TODO(fchollet): We could build a dictionary based on layer names + # since they are constant, but we have not done that yet. + if index is not None: + if len(self.layers) <= index: + raise ValueError('Was asked to retrieve layer at index ' + str(index) + + ' but model only has ' + str(len(self.layers)) + + ' layers.') + else: + return self.layers[index] + else: + if not name: + raise ValueError('Provide either a layer name or layer index.') + for layer in self.layers: + if layer.name == name: + return layer + raise ValueError('No such layer: ' + name) + + @property + def updates(self): + """Retrieve the network's updates. + + Will only include updates that are either + unconditional, or conditional on inputs to this model + (e.g. will not include updates that depend on tensors + that aren't inputs to this model). + + Returns: + A list of update ops. + """ + updates = [] + for layer in self.layers: + if hasattr(layer, 'updates'): + # Collect updates that are dependent on inputs + # that are part of the model. + for node_index, node in enumerate(layer._inbound_nodes): # pylint: disable=protected-access + node_key = _make_node_key(layer.name, node_index) + if node_key in self._network_nodes: + # The model owns this layer node. + inputs = node.input_tensors + updates += layer.get_updates_for(inputs) + # Collect unconditional updates. + updates += layer.get_updates_for(None) + return updates + + @property + def losses(self): + """Retrieve the network's losses. + + Will only include losses that are either + unconditional, or conditional on inputs to this model + (e.g. will not include losses that depend on tensors + that aren't inputs to this model). + + Returns: + A list of loss tensors. + """ + losses = [] + # Retrieve losses for all internal layers. + for layer in self.layers: + if hasattr(layer, 'losses'): + # Collect losses that are dependent on inputs + # that are part of the model. + for node_index, node in enumerate(layer._inbound_nodes): # pylint: disable=protected-access + node_key = _make_node_key(layer.name, node_index) + if node_key in self._network_nodes: + # The model owns this layer node. + inputs = node.input_tensors + losses += layer.get_losses_for(inputs) + # Collect unconditional losses. + losses += layer.get_losses_for(None) + # Add any potential unconditional model-level loss. + losses += self.get_losses_for(None) + return losses + + @property + def trainable_weights(self): + if not self.trainable: + return [] + weights = [] + for layer in self.layers: + weights += layer.trainable_weights + return weights + + @property + def non_trainable_weights(self): + weights = [] + for layer in self.layers: + weights += layer.non_trainable_weights + if not self.trainable: + trainable_weights = [] + for layer in self.layers: + trainable_weights += layer.trainable_weights + return trainable_weights + weights + return weights + + @property + def input_spec(self): + """Gets the network's input specs. + + Returns: + A list of `InputSpec` instances (one per input to the model) + or a single instance if the model has only one input. + """ + specs = [] + for layer in self._input_layers: + if layer.input_spec is None: + specs.append(None) + else: + if not isinstance(layer.input_spec, list): + raise TypeError('Layer ' + layer.name + + ' has an input_spec attribute that ' + 'is not a list. We expect a list. ' + 'Found input_spec = ' + str(layer.input_spec)) + specs += layer.input_spec + if len(specs) == 1: + return specs[0] + return specs + + def call(self, inputs, mask=None): + """Call the model on new inputs. + + In this case `call` just reapplies + all ops in the graph to the new inputs + (e.g. build a new computational graph from the provided inputs). + + Arguments: + inputs: A tensor or list of tensors. + mask: A mask or list of masks. A mask can be + either a tensor or None (no mask). + + Returns: + A tensor if there is a single output, or + a list of tensors if there are more than one outputs. + """ + inputs = nest.flatten(inputs) + if mask is None: + masks = [None for _ in range(len(inputs))] + else: + masks = nest.flatten(mask) + + if context.in_graph_mode(): + # Try to retrieve cached outputs if the layer has already been called + # on these exact inputs. + cache_key = (layers_util.object_list_uid(inputs) + + '_' + layers_util.object_list_uid(masks)) + if cache_key in self._output_tensor_cache: + # Cache hit. + return self._output_tensor_cache[cache_key] + # Actually apply the network graph to the new inputs. + outputs, _ = self._run_internal_graph(inputs, masks) + return outputs + + def _compute_output_shape(self, input_shape): + if isinstance(input_shape, list): + input_shapes = [] + for shape in input_shape: + if shape is not None: + input_shapes.append(tuple(tensor_shape.TensorShape(shape).as_list())) + else: + input_shapes.append(None) + else: + if input_shape is not None: + input_shapes = [tuple(tensor_shape.TensorShape(input_shape).as_list())] + else: + input_shapes = [None] + + if len(input_shapes) != len(self._input_layers): + raise ValueError('Invalid input_shape argument ' + str(input_shape) + + ': model has ' + str(len(self._input_layers)) + + ' tensor inputs.') + + cache_key = layers_util.object_list_uid(input_shapes) + if cache_key not in self._output_shape_cache: + # Cache miss. We have to run the network graph manually (recursive calls + # to `_compute_output_shape`). + layers_to_output_shapes = {} + for i in range(len(input_shapes)): + layer = self._input_layers[i] + input_shape = input_shapes[i] + # It's an input layer: then `_compute_output_shape` is identity, + # and there is only one node and one tensor output. + shape_key = layer.name + '_0_0' + layers_to_output_shapes[shape_key] = input_shape + + depth_keys = list(self._nodes_by_depth.keys()) + depth_keys.sort(reverse=True) + # Iterate over nodes, by depth level. + if len(depth_keys) > 1: + for depth in depth_keys: + nodes = self._nodes_by_depth[depth] + for node in nodes: + # This is always a single layer, never a list. + layer = node.outbound_layer + if layer in self._input_layers: + # We've already covered the input layers + # a few lines above. + continue + # Potentially redundant list, + # same size as node.input_tensors. + input_shapes = [] + for j in range(len(node.inbound_layers)): + inbound_layer = node.inbound_layers[j] + node_index = node.node_indices[j] + tensor_index = node.tensor_indices[j] + shape_key = inbound_layer.name + '_%s_%s' % (node_index, + tensor_index) + input_shape = layers_to_output_shapes[shape_key] + input_shapes.append(input_shape) + + if len(input_shapes) == 1: + output_shape = layer._compute_output_shape(input_shapes[0]) # pylint: disable=protected-access + else: + output_shape = layer._compute_output_shape(input_shapes) # pylint: disable=protected-access + if isinstance(output_shape, list): + output_shapes = [ + tuple(tensor_shape.TensorShape(shape).as_list()) + for shape in output_shape + ] + else: + output_shapes = [ + tuple(tensor_shape.TensorShape(output_shape).as_list()) + ] + + node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access + for j in range(len(output_shapes)): + shape_key = layer.name + '_%s_%s' % (node_index, j) + layers_to_output_shapes[shape_key] = output_shapes[j] + + # Read final output shapes from layers_to_output_shapes. + output_shapes = [] + for i in range(len(self._output_layers)): + layer, node_index, tensor_index = self._output_coordinates[i] + shape_key = layer.name + '_%s_%s' % (node_index, tensor_index) + output_shapes.append(layers_to_output_shapes[shape_key]) + + # Store in cache. + self._output_shape_cache[cache_key] = output_shapes + else: + # Cache hit. + output_shapes = self._output_shape_cache[cache_key] + + if isinstance(output_shapes, list): + if len(output_shapes) == 1: + return tensor_shape.TensorShape(output_shapes[0]) + else: + return [tensor_shape.TensorShape(shape) for shape in output_shapes] + else: + return tensor_shape.TensorShape(output_shapes) + + def _run_internal_graph(self, inputs, masks=None): + """Computes output tensors for new inputs. + + # Note: + - Expects `inputs` to be a list (potentially with 1 element). + - Can be run on non-Keras tensors. + + Arguments: + inputs: List of tensors + masks: List of masks (tensors or None). + + Returns: + Three lists: output_tensors, output_masks, output_shapes + """ + # Note: masking support is relevant mainly for Keras. + # It cannot be factored out without having the fully reimplement the network + # calling logic on the Keras side. We choose to incorporate it in + # GraphNetwork because 1) it may be useful to fully support in tf.layers in + # the future and 2) Keras is a major user of GraphNetwork. If you don't + # use masking, it does not interfere with regular behavior at all and you + # can ignore it. + if masks is None: + masks = [None for _ in range(len(inputs))] + + # Dictionary mapping reference tensors to tuples + # (computed tensor, compute mask) + # we assume a 1:1 mapping from tensor to mask + # TODO(fchollet): raise exception when a `.compute_mask()` call + # does not return a list the same size as `call` + tensor_map = {} + for x, y, mask in zip(self.inputs, inputs, masks): + tensor_map[str(id(x))] = (y, mask) + + depth_keys = list(self._nodes_by_depth.keys()) + depth_keys.sort(reverse=True) + for depth in depth_keys: + nodes = self._nodes_by_depth[depth] + for node in nodes: + # This is always a single layer, never a list. + layer = node.outbound_layer + + reference_input_tensors = node.input_tensors + reference_output_tensors = node.output_tensors + + # If all previous input tensors are available in tensor_map, + # then call node.inbound_layer on them. + computed_data = [] # List of tuples (input, mask). + for x in reference_input_tensors: + if str(id(x)) in tensor_map: + computed_data.append(tensor_map[str(id(x))]) + + if len(computed_data) == len(reference_input_tensors): + # Call layer (reapplying ops to new inputs). + with ops.name_scope(layer.name): + if node.arguments: + kwargs = node.arguments + else: + kwargs = {} + if len(computed_data) == 1: + computed_tensor, computed_mask = computed_data[0] + # Ensure mask propagation if applicable. + if 'mask' in estimator_util.fn_args(layer.call): + if 'mask' not in kwargs: + kwargs['mask'] = computed_mask + + output_tensors = nest.flatten( + layer.call(computed_tensor, **kwargs)) + if hasattr(layer, 'compute_mask'): + output_masks = nest.flatten( + layer.compute_mask(computed_tensor, computed_mask)) + else: + output_masks = [None for _ in range(len(output_tensors))] + computed_tensors = [computed_tensor] + computed_masks = [computed_mask] + else: + computed_tensors = [x[0] for x in computed_data] + computed_masks = [x[1] for x in computed_data] + if 'mask' in estimator_util.fn_args(layer.call): + if 'mask' not in kwargs: + kwargs['mask'] = computed_masks + output_tensors = nest.flatten( + layer.call(computed_tensors, **kwargs)) + if hasattr(layer, 'compute_mask'): + output_masks = nest.flatten( + layer.compute_mask(computed_tensors, computed_masks)) + else: + output_masks = [None for _ in range(len(output_tensors))] + + # Apply activity regularizer if any: + if layer.activity_regularizer is not None: + regularization_losses = [ + layer.activity_regularizer(x) for x in computed_tensors + ] + layer.add_loss(regularization_losses, computed_tensors) + + if context.in_graph_mode(): + # Update model updates and losses: + # Keep track of updates that depend on the inputs + # (e.g. BN updates). + self.add_update(layer.get_updates_for(computed_tensors), inputs) + # Keep track of unconditional updates (e.g. a counter). + self.add_update(layer.get_updates_for(None), None) + # Keep track of losses that depend on the inputs + # (e.g. activity regularizers). + self.add_loss(layer.get_losses_for(computed_tensors), inputs) + # Keep track of unconditional losses + # (e.g. weight regularizers). + self.add_loss(layer.get_losses_for(None), None) + + # Update tensor_map. + for x, y, mask in zip(reference_output_tensors, output_tensors, + output_masks): + tensor_map[str(id(x))] = (y, mask) + + output_tensors = [] + output_masks = [] + output_shapes = [] + for x in self.outputs: + assert str(id(x)) in tensor_map, 'Could not compute output ' + str(x) + tensor, mask = tensor_map[str(id(x))] + output_shapes.append(layers_util.static_shape(x)) + output_tensors.append(tensor) + output_masks.append(mask) + + if len(output_tensors) == 1: + output_tensors = output_tensors[0] + if output_shapes is not None: + output_shapes = output_shapes[0] + if output_masks is not None: + output_masks = output_masks[0] + + if context.in_graph_mode(): + # Update cache; + # keys are based on ids on input tensors and inputs masks. + cache_key = (layers_util.object_list_uid(inputs) + + '_' + layers_util.object_list_uid(masks)) + self._output_tensor_cache[cache_key] = output_tensors + if output_masks is not None: + self._output_mask_cache[cache_key] = output_masks + if output_shapes is not None: + input_shapes = [layers_util.static_shape(x) for x in inputs] + cache_key = layers_util.object_list_uid(input_shapes) + self._output_shape_cache[cache_key] = output_shapes + + return output_tensors, output_masks + + +def _make_node_key(layer_name, node_index): + return layer_name + '_ib-' + str(node_index) diff --git a/tensorflow/python/layers/network_test.py b/tensorflow/python/layers/network_test.py new file mode 100644 index 00000000000..af7813e2642 --- /dev/null +++ b/tensorflow/python/layers/network_test.py @@ -0,0 +1,525 @@ +# Copyright 2015 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Tests for tf.layers.network.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensorflow.python.eager import context +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import test_util +from tensorflow.python.layers import base as base_layers +from tensorflow.python.layers import core as core_layers +from tensorflow.python.layers import network as network_layers +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import sparse_ops +from tensorflow.python.platform import test + + +class BaseLayerCompatibilityTest(test.TestCase): + + def test_get_updates_for(self): + a = network_layers.Input(shape=(2,)) + dense_layer = core_layers.Dense(1) + dense_layer.add_update(0, inputs=a) + dense_layer.add_update(1, inputs=None) + + self.assertEqual(dense_layer.get_updates_for(a), [0]) + self.assertEqual(dense_layer.get_updates_for(None), [1]) + + def test_get_losses_for(self): + a = network_layers.Input(shape=(2,)) + dense_layer = core_layers.Dense(1) + dense_layer.add_loss(0, inputs=a) + dense_layer.add_loss(1, inputs=None) + + self.assertEqual(dense_layer.get_losses_for(a), [0]) + self.assertEqual(dense_layer.get_losses_for(None), [1]) + + def testTopologicalAttributes(self): + # test layer attributes / methods related to cross-layer connectivity. + a = network_layers.Input(shape=(32,), name='input_a') + b = network_layers.Input(shape=(32,), name='input_b') + + # test input, output, input_shape, output_shape + test_layer = core_layers.Dense(16, name='test_layer') + a_test = test_layer(a) + self.assertEqual(test_layer.input, a) + self.assertEqual(test_layer.output, a_test) + self.assertEqual(test_layer.input_shape, (None, 32)) + self.assertEqual(test_layer.output_shape, (None, 16)) + + # test `get_*_at` methods + dense = core_layers.Dense(16, name='dense_1') + a_2 = dense(a) + b_2 = dense(b) + + self.assertEqual(dense.get_input_at(0), a) + self.assertEqual(dense.get_input_at(1), b) + self.assertEqual(dense.get_output_at(0), a_2) + self.assertEqual(dense.get_output_at(1), b_2) + self.assertEqual(dense.get_input_shape_at(0), (None, 32)) + self.assertEqual(dense.get_input_shape_at(1), (None, 32)) + self.assertEqual(dense.get_output_shape_at(0), (None, 16)) + self.assertEqual(dense.get_output_shape_at(1), (None, 16)) + + # Test invalid value for attribute retrieval. + with self.assertRaises(ValueError): + dense.get_input_at(2) + with self.assertRaises(AttributeError): + new_dense = core_layers.Dense(16) + _ = new_dense.input + with self.assertRaises(AttributeError): + new_dense = core_layers.Dense(16) + _ = new_dense.output + with self.assertRaises(AttributeError): + new_dense = core_layers.Dense(16) + _ = new_dense.output_shape + with self.assertRaises(AttributeError): + new_dense = core_layers.Dense(16) + _ = new_dense.input_shape + with self.assertRaises(AttributeError): + new_dense = core_layers.Dense(16) + a = network_layers.Input(shape=(3, 32)) + a = network_layers.Input(shape=(5, 32)) + a_2 = dense(a) + b_2 = dense(b) + _ = new_dense.input_shape + with self.assertRaises(AttributeError): + new_dense = core_layers.Dense(16) + a = network_layers.Input(shape=(3, 32)) + a = network_layers.Input(shape=(5, 32)) + a_2 = dense(a) + b_2 = dense(b) + _ = new_dense.output_shape + + def testTopologicalAttributesMultiOutputLayer(self): + + class PowersLayer(base_layers.Layer): + + def call(self, inputs): + return [inputs**2, inputs**3] + + x = network_layers.Input(shape=(32,)) + test_layer = PowersLayer() + p1, p2 = test_layer(x) # pylint: disable=not-callable + + self.assertEqual(test_layer.input, x) + self.assertEqual(test_layer.output, [p1, p2]) + self.assertEqual(test_layer.input_shape, (None, 32)) + self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)]) + + def testTopologicalAttributesMultiInputLayer(self): + + class AddLayer(base_layers.Layer): + + def call(self, inputs): + assert len(inputs) == 2 + return inputs[0] + inputs[1] + + a = network_layers.Input(shape=(32,)) + b = network_layers.Input(shape=(32,)) + test_layer = AddLayer() + y = test_layer([a, b]) # pylint: disable=not-callable + + self.assertEqual(test_layer.input, [a, b]) + self.assertEqual(test_layer.output, y) + self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)]) + self.assertEqual(test_layer.output_shape, (None, 32)) + + +class NetworkTest(test.TestCase): + + def testBasicNetwork(self): + # minimum viable network + x = network_layers.Input(shape=(32,)) + dense = core_layers.Dense(2) + y = dense(x) + network = network_layers.GraphNetwork(x, y, name='dense_network') + + # test basic attributes + self.assertEqual(network.name, 'dense_network') + self.assertEqual(len(network.layers), 2) # InputLayer + Dense + self.assertEqual(network.layers[1], dense) + self.assertEqual(network.weights, dense.weights) + self.assertEqual(network.trainable_weights, dense.trainable_weights) + self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights) + + # test callability on Input + x_2 = network_layers.Input(shape=(32,)) + y_2 = network(x_2) + self.assertEqual(y_2.get_shape().as_list(), [None, 2]) + + # test callability on regular tensor + x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32)) + y_2 = network(x_2) + self.assertEqual(y_2.get_shape().as_list(), [None, 2]) + + # test network `trainable` attribute + network.trainable = False + self.assertEqual(network.weights, dense.weights) + self.assertEqual(network.trainable_weights, []) + self.assertEqual(network.non_trainable_weights, + dense.trainable_weights + dense.non_trainable_weights) + + def test_node_construction(self): + # test graph topology construction basics + a = network_layers.Input(shape=(32,), name='input_a') + b = network_layers.Input(shape=(32,), name='input_b') + + self.assertEqual(a.get_shape().as_list(), [None, 32]) + a_layer, a_node_index, a_tensor_index = a._keras_history + b_layer, _, _ = b._keras_history + self.assertEqual(len(a_layer._inbound_nodes), 1) + self.assertEqual(a_tensor_index, 0) + node = a_layer._inbound_nodes[a_node_index] + self.assertEqual(node.outbound_layer, a_layer) + + self.assertEqual(node.inbound_layers, []) + self.assertEqual(node.input_tensors, [a]) + self.assertEqual(node.input_shapes, [(None, 32)]) + self.assertEqual(node.output_tensors, [a]) + self.assertEqual(node.output_shapes, [(None, 32)]) + + dense = core_layers.Dense(16, name='dense_1') + dense(a) + dense(b) + + self.assertEqual(len(dense._inbound_nodes), 2) + self.assertEqual(len(dense._outbound_nodes), 0) + self.assertEqual(dense._inbound_nodes[0].inbound_layers, [a_layer]) + self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense) + self.assertEqual(dense._inbound_nodes[1].inbound_layers, [b_layer]) + self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense) + self.assertEqual(dense._inbound_nodes[0].input_tensors, [a]) + self.assertEqual(dense._inbound_nodes[1].input_tensors, [b]) + + # Test config + config_0 = dense._inbound_nodes[0].get_config() + self.assertEqual(config_0['outbound_layer'], dense.name) + + def testMultiInputNetwork(self): + a = network_layers.Input(shape=(32,), name='input_a') + b = network_layers.Input(shape=(32,), name='input_b') + + class AddLayer(base_layers.Layer): + + def call(self, inputs): + assert len(inputs) == 2 + return inputs[0] + inputs[1] + + c = AddLayer()([a, b]) # pylint: disable=not-callable + network = network_layers.GraphNetwork([a, b], c) + self.assertEqual(len(network.layers), 3) # 2 * InputLayer + AddLayer + + # Test callability. + a2 = network_layers.Input(shape=(32,)) + b2 = network_layers.Input(shape=(32,)) + c2 = network([a2, b2]) + self.assertEqual(c2.get_shape().as_list(), [None, 32]) + + def testMultiOutputNetwork(self): + x = network_layers.Input(shape=(32,)) + y1 = core_layers.Dense(2)(x) + y2 = core_layers.Dense(3)(x) + network = network_layers.GraphNetwork(x, [y1, y2]) + + self.assertEqual(len(network.layers), 3) # InputLayer + 2 * Dense + + # Test callability. + x2 = network_layers.Input(shape=(32,)) + outputs = network(x2) + + self.assertEqual(type(outputs), list) + self.assertEqual(len(outputs), 2) + self.assertEqual(outputs[0].get_shape().as_list(), [None, 2]) + self.assertEqual(outputs[1].get_shape().as_list(), [None, 3]) + + def testMultiInputMultiOutputNetworkSharedLayer(self): + a = network_layers.Input(shape=(32,), name='input_a') + b = network_layers.Input(shape=(32,), name='input_b') + + dense = core_layers.Dense(2) + + y1 = dense(a) + y2 = dense(b) + network = network_layers.GraphNetwork([a, b], [y1, y2]) + self.assertEqual(len(network.layers), 3) # 2 * InputLayer + Dense + + # Test callability. + a2 = network_layers.Input(shape=(32,)) + b2 = network_layers.Input(shape=(32,)) + outputs = network([a2, b2]) + + self.assertEqual(type(outputs), list) + self.assertEqual(len(outputs), 2) + self.assertEqual(outputs[0].get_shape().as_list(), [None, 2]) + self.assertEqual(outputs[1].get_shape().as_list(), [None, 2]) + + def testCrossDataFlows(self): + # Test the ability to have multi-output layers with outputs that get routed + # to separate layers + + class PowersLayer(base_layers.Layer): + + def call(self, inputs): + return [inputs**2, inputs**3] + + x = network_layers.Input(shape=(32,)) + p1, p2 = PowersLayer()(x) # pylint: disable=not-callable + y1 = core_layers.Dense(2)(p1) + y2 = core_layers.Dense(3)(p2) + network = network_layers.GraphNetwork(x, [y1, y2]) + + self.assertEqual(len(network.layers), 4) # InputLayer + 2 * Dense + PLayer + + # Test callability. + x2 = network_layers.Input(shape=(32,)) + outputs = network(x2) + + self.assertEqual(type(outputs), list) + self.assertEqual(len(outputs), 2) + self.assertEqual(outputs[0].get_shape().as_list(), [None, 2]) + self.assertEqual(outputs[1].get_shape().as_list(), [None, 3]) + + def testNetworkAttributes(self): + x = network_layers.Input(shape=(32,)) + z = core_layers.Dense(2, kernel_regularizer=lambda x: 0.01 * (x**2))(x) + dense = core_layers.Dense(2, name='dense') + dense.add_update(1) + y = dense(z) + net = network_layers.GraphNetwork(x, y) + + # losses + self.assertEqual(len(net.losses), 1) + + # updates + self.assertEqual(len(net.updates), 1) + + # get_layer + self.assertEqual(net.get_layer('dense'), dense) + self.assertEqual(net.get_layer(index=2), dense) + with self.assertRaises(ValueError): + net.get_layer('dense_unknown') + with self.assertRaises(ValueError): + net.get_layer() + with self.assertRaises(ValueError): + net.get_layer(index=4) + + # input, output + self.assertEqual(net.input, x) + self.assertEqual(net.output, y) + + # input_shape, output_shape + self.assertEqual(net.input_shape, (None, 32)) + self.assertEqual(net.output_shape, (None, 2)) + + # get_*_at + self.assertEqual(net.get_input_at(0), x) + self.assertEqual(net.get_output_at(0), y) + + # _compute_output_shape + self.assertEqual(net._compute_output_shape((3, 32)).as_list(), [3, 2]) + + def testInvalidNetworks(self): + # redundant inputs + x = network_layers.Input(shape=(32,)) + y = core_layers.Dense(2)(x) + with self.assertRaises(ValueError): + network_layers.GraphNetwork([x, x], y) + + # inputs that don't come from Input + x = array_ops.placeholder(dtype='float32', shape=(None, 32)) + y = core_layers.Dense(2)(x) + with self.assertRaises(ValueError): + network_layers.GraphNetwork(x, y) + + # inputs that don't come from Input but have a layer history + x = network_layers.Input(shape=(32,)) + x = core_layers.Dense(32)(x) + y = core_layers.Dense(2)(x) + with self.assertRaises(ValueError): + network_layers.GraphNetwork(x, y) + + # outputs that don't come from layers + x = network_layers.Input(shape=(32,)) + y = core_layers.Dense(2)(x) + y = 2 * y + with self.assertRaises(ValueError): + network_layers.GraphNetwork(x, y) + + # disconnected graphs + x1 = network_layers.Input(shape=(32,)) + x2 = network_layers.Input(shape=(32,)) + y = core_layers.Dense(2)(x1) + with self.assertRaises(ValueError): + network_layers.GraphNetwork(x2, y) + + # redundant layer names + x = network_layers.Input(shape=(32,)) + z = core_layers.Dense(2, name='dense')(x) + y = core_layers.Dense(2, name='dense')(z) + with self.assertRaises(ValueError): + network_layers.GraphNetwork(x, y) + + def testInputTensorWrapping(self): + x = array_ops.placeholder(dtype='float32', shape=(None, 32)) + x = network_layers.Input(tensor=x) + y = core_layers.Dense(2)(x) + network_layers.GraphNetwork(x, y) + + def testExplicitBatchSize(self): + x = network_layers.Input(shape=(32,), batch_size=3) + y = core_layers.Dense(2)(x) + self.assertEqual(y.get_shape().as_list(), [3, 2]) + + def testNetworkRecursion(self): + # test the ability of networks to be used as layers inside networks. + a = network_layers.Input(shape=(32,)) + b = core_layers.Dense(2)(a) + net = network_layers.GraphNetwork(a, b) + + c = network_layers.Input(shape=(32,)) + d = net(c) + + recursive_net = network_layers.GraphNetwork(c, d) + self.assertEqual(len(recursive_net.layers), 2) + self.assertEqual(recursive_net.layers[1], net) + self.assertEqual(len(recursive_net.weights), 2) + + # test callability + x = array_ops.placeholder(dtype='float32', shape=(None, 32)) + y = recursive_net(x) + self.assertEqual(y.get_shape().as_list(), [None, 2]) + + def testSparseInput(self): + + class SparseSoftmax(base_layers.Layer): + + def call(self, inputs): + return sparse_ops.sparse_softmax(inputs) + + x = network_layers.Input(shape=(32,), sparse=True) + y = SparseSoftmax()(x) # pylint: disable=not-callable + network = network_layers.GraphNetwork(x, y) + + self.assertEqual(len(network.layers), 2) + self.assertEqual(network.layers[0].sparse, True) + + @test_util.run_in_graph_and_eager_modes() + def testMaskingSingleInput(self): + + class MaskedLayer(base_layers.Layer): + + def call(self, inputs, mask=None): + if mask is not None: + return inputs * mask + return inputs + + def compute_mask(self, inputs, mask=None): + return array_ops.ones_like(inputs) + + if context.in_graph_mode(): + x = network_layers.Input(shape=(32,)) + y = MaskedLayer()(x) # pylint: disable=not-callable + network = network_layers.GraphNetwork(x, y) + + # test callability on Input + x_2 = network_layers.Input(shape=(32,)) + y_2 = network(x_2) + self.assertEqual(y_2.get_shape().as_list(), [None, 32]) + + # test callability on regular tensor + x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32)) + y_2 = network(x_2) + self.assertEqual(y_2.get_shape().as_list(), [None, 32]) + else: + a = constant_op.constant([2] * 32) + mask = constant_op.constant([0, 1] * 16) + a._keras_mask = mask + b = MaskedLayer().apply(a) + self.assertTrue(hasattr(b, '_keras_mask')) + self.assertAllEqual(self.evaluate(array_ops.ones_like(mask)), + self.evaluate(getattr(b, '_keras_mask'))) + self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b)) + + +class DeferredModeTest(test.TestCase): + + def testDeferredTensorAttributes(self): + x = base_layers._DeferredTensor(shape=(None, 2), dtype='float32', name='x') + self.assertEqual(str(x), + 'DeferredTensor(\'x\', shape=(?, 2), dtype=float32)') + self.assertEqual(repr(x), + '<_DeferredTensor \'x\' shape=(?, 2) dtype=float32>') + + @test_util.run_in_graph_and_eager_modes() + def testSimpleNetworkBuilding(self): + inputs = network_layers.Input(shape=(32,)) + if context.in_eager_mode(): + self.assertIsInstance(inputs, base_layers._DeferredTensor) + self.assertEqual(inputs.dtype.name, 'float32') + self.assertEqual(inputs.shape.as_list(), [None, 32]) + + x = core_layers.Dense(2)(inputs) + if context.in_eager_mode(): + self.assertIsInstance(x, base_layers._DeferredTensor) + self.assertEqual(x.dtype.name, 'float32') + self.assertEqual(x.shape.as_list(), [None, 2]) + + outputs = core_layers.Dense(4)(x) + network = network_layers.GraphNetwork(inputs, outputs) + self.assertIsInstance(network, network_layers.GraphNetwork) + + if context.in_eager_mode(): + # It should be possible to call such a network on EagerTensors. + inputs = constant_op.constant( + np.random.random((10, 32)).astype('float32')) + outputs = network(inputs) + self.assertEqual(outputs.shape.as_list(), [10, 4]) + + @test_util.run_in_graph_and_eager_modes() + def testMultiIONetworkbuilding(self): + input_a = network_layers.Input(shape=(32,)) + input_b = network_layers.Input(shape=(16,)) + a = core_layers.Dense(16)(input_a) + + class AddLayer(base_layers.Layer): + + def call(self, inputs): + return inputs[0] + inputs[1] + + def _compute_output_shape(self, input_shape): + return input_shape[0] + + c = AddLayer()([a, input_b]) # pylint: disable=not-callable + c = core_layers.Dense(2)(c) + + network = network_layers.GraphNetwork([input_a, input_b], [a, c]) + if context.in_eager_mode(): + a_val = constant_op.constant( + np.random.random((10, 32)).astype('float32')) + b_val = constant_op.constant( + np.random.random((10, 16)).astype('float32')) + outputs = network([a_val, b_val]) + self.assertEqual(len(outputs), 2) + self.assertEqual(outputs[0].shape.as_list(), [10, 16]) + self.assertEqual(outputs[1].shape.as_list(), [10, 2]) + +if __name__ == '__main__': + test.main() diff --git a/tensorflow/python/layers/utils.py b/tensorflow/python/layers/utils.py index 7c71d3c952c..766a6800d44 100644 --- a/tensorflow/python/layers/utils.py +++ b/tensorflow/python/layers/utils.py @@ -24,6 +24,7 @@ from tensorflow.python.ops import variables from tensorflow.python.ops import control_flow_ops from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util +from tensorflow.python.util import nest def convert_data_format(data_format, ndim): @@ -232,3 +233,19 @@ def constant_value(pred): else: raise TypeError('`pred` must be a Tensor, a Variable, or a Python bool.') return pred_value + + +def object_list_uid(object_list): + """Creates a single string from object ids.""" + object_list = nest.flatten(object_list) + return ', '.join([str(abs(id(x))) for x in object_list]) + + +def static_shape(x): + """Get the static shape of a Tensor, or None if it is unavailable.""" + if x is None: + return None + try: + return tuple(x.get_shape().as_list()) + except ValueError: + return None diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt index 64352508b58..07b8d900da5 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.-model.pbtxt @@ -2,7 +2,7 @@ path: "tensorflow.keras.Model" tf_class { is_instance: "" is_instance: "" - is_instance: "" + is_instance: "" is_instance: "" is_instance: "" is_instance: "" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt index f69800b918c..546bac44e4c 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt @@ -3,7 +3,7 @@ tf_class { is_instance: "" is_instance: "" is_instance: "" - is_instance: "" + is_instance: "" is_instance: "" is_instance: "" is_instance: "" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt index b2df5fba8fd..49841237cef 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-input-layer.pbtxt @@ -1,7 +1,7 @@ path: "tensorflow.keras.layers.InputLayer" tf_class { is_instance: "" - is_instance: "" + is_instance: "" is_instance: "" is_instance: "" is_instance: "" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt index 8916925b3ba..4e522813a5a 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-model.pbtxt @@ -2,7 +2,7 @@ path: "tensorflow.keras.models.Model" tf_class { is_instance: "" is_instance: "" - is_instance: "" + is_instance: "" is_instance: "" is_instance: "" is_instance: "" diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt index 8397b373f42..ddbb358c84c 100644 --- a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt +++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt @@ -3,7 +3,7 @@ tf_class { is_instance: "" is_instance: "" is_instance: "" - is_instance: "" + is_instance: "" is_instance: "" is_instance: "" is_instance: "" From 7d126c49aea63a283386cd73d04ab1bed5eae2f0 Mon Sep 17 00:00:00 2001 From: Skye Wanderman-Milne Date: Wed, 15 Nov 2017 17:58:51 -0800 Subject: [PATCH 082/104] Refactor Operation.__init__ to create some state after creating _c_op This change moves around the Operation.__init__ logic to create the TF_Operation before initializing _outputs and before adding the op to the control flow context (if any). This is in preparation for creating Operation objects around TF_Operations indirectly created by the C API (e.g. ops created by TF_ImportGraphDef). This also disables running HessianTest with the C API enabled, since it's broken for now (but will be fixed soon). PiperOrigin-RevId: 175910443 --- tensorflow/python/framework/ops.py | 49 ++++++++++++------------- tensorflow/python/ops/gradients_test.py | 4 +- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index 0e647a27f56..6ac3b862c86 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -1526,13 +1526,6 @@ class Operation(object): raise TypeError("input needs to be a Tensor: %s" % a) # Mark that we consume the inputs. a._add_consumer(self) # pylint: disable=protected-access - if output_types is None: - output_types = [] - self._output_types_val = output_types - self._outputs = [ - Tensor(self, i, output_type) - for i, output_type in enumerate(output_types) - ] if input_types is None: input_types = [i.dtype.base_dtype for i in self._inputs] else: @@ -1562,25 +1555,6 @@ class Operation(object): self._original_op = original_op self._op_def = op_def self._traceback = self._graph._extract_stack() # pylint: disable=protected-access - # Define self._c_op before calling self._control_flow_context.AddOp(), since - # that will call methods on this op that check if self._c_op is set. - self._c_op = None - # Add this op to the current control flow context: - self._control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access - if self._control_flow_context is not None: - # TODO(skyewm): consider refactoring this to call self._create_c_op() - # first. This would require updating the TF_Operation's ID (see the - # comment and self._id_value update below). The disadvantage of calling - # AddOp() first is that we need to maintain Operation state that is - # accessed by AddOp() in Python, e.g. the input Tensors. - self._control_flow_context.AddOp(self) - # NOTE(keveman): Control flow context's AddOp could be creating new ops and - # setting op.inputs[index] = new_op. Thus the new ops' id could be larger - # than this op's id even though this op depend on them. Therefore, delaying - # assigning id to this op until all ops this could be dependent on are - # created. - self._id_value = self._graph._next_id() # pylint: disable=protected-access - self._recompute_node_def() if self._graph._c_graph: # pylint: disable=protected-access if self._op_def: @@ -1594,6 +1568,29 @@ class Operation(object): self._c_op = _create_c_op(self._graph, self._node_def, grouped_inputs, self._control_inputs) + else: + self._c_op = None + + # Initialize self._outputs + if output_types is None: + output_types = [] + self._output_types_val = output_types + self._outputs = [ + Tensor(self, i, output_type) + for i, output_type in enumerate(output_types) + ] + + # Add this op to the current control flow context: + self._control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access + if self._control_flow_context is not None: + self._control_flow_context.AddOp(self) + # NOTE(keveman): Control flow context's AddOp could be creating new ops and + # setting op.inputs[index] = new_op. Thus the new ops' id could be larger + # than this op's id even though this op depend on them. Therefore, delaying + # assigning id to this op until all ops this could be dependent on are + # created. + self._id_value = self._graph._next_id() # pylint: disable=protected-access + self._recompute_node_def() def _reconstruct_sequence_inputs(self, op_def, inputs, attrs): """Regroups a flat list of input tensors into scalar and sequence inputs. diff --git a/tensorflow/python/ops/gradients_test.py b/tensorflow/python/ops/gradients_test.py index 1211b2e9230..dacc2947fe3 100644 --- a/tensorflow/python/ops/gradients_test.py +++ b/tensorflow/python/ops/gradients_test.py @@ -573,7 +573,9 @@ class HessianVectorProductTest(test_util.TensorFlowTestCase): self.assertAllClose(hess_v_value, hess_v_actual) -@test_util.with_c_api +# TODO(skyewm): reenable C API once +# ControlFlowContext._RemoveExternalControlEdges works with C API enabled +# @test_util.with_c_api class HessianTest(test_util.TensorFlowTestCase): def testHessian1D(self): From b1d89c147d079ab97356b6a677cbf5ee726313d6 Mon Sep 17 00:00:00 2001 From: Brennan Saeta Date: Wed, 15 Nov 2017 18:00:52 -0800 Subject: [PATCH 083/104] Support user-space DNS caching for the GCS filesystem. In some environments, DNS resolution is unreliable. This change adds an optional userspace caching mechanism to radically reduce the amount of DNS queries sent to upstream resolvers. PiperOrigin-RevId: 175910642 --- tensorflow/core/platform/cloud/BUILD | 24 ++++ .../core/platform/cloud/curl_http_request.cc | 17 +++ .../core/platform/cloud/curl_http_request.h | 4 + .../core/platform/cloud/gcs_dns_cache.cc | 135 ++++++++++++++++++ .../core/platform/cloud/gcs_dns_cache.h | 74 ++++++++++ .../core/platform/cloud/gcs_dns_cache_test.cc | 113 +++++++++++++++ .../core/platform/cloud/gcs_file_system.cc | 37 ++++- .../core/platform/cloud/gcs_file_system.h | 2 + tensorflow/core/platform/cloud/http_request.h | 8 ++ 9 files changed, 408 insertions(+), 6 deletions(-) create mode 100644 tensorflow/core/platform/cloud/gcs_dns_cache.cc create mode 100644 tensorflow/core/platform/cloud/gcs_dns_cache.h create mode 100644 tensorflow/core/platform/cloud/gcs_dns_cache_test.cc diff --git a/tensorflow/core/platform/cloud/BUILD b/tensorflow/core/platform/cloud/BUILD index 901fb79d6aa..624145da751 100644 --- a/tensorflow/core/platform/cloud/BUILD +++ b/tensorflow/core/platform/cloud/BUILD @@ -41,6 +41,17 @@ cc_library( deps = ["//tensorflow/core:lib"], ) +cc_library( + name = "gcs_dns_cache", + srcs = ["gcs_dns_cache.cc"], + hdrs = ["gcs_dns_cache.h"], + visibility = ["//tensorflow:__subpackages__"], + deps = [ + ":http_request", + "//tensorflow/core:lib", + ], +) + cc_library( name = "gcs_file_system", srcs = ["gcs_file_system.cc"], @@ -51,6 +62,7 @@ cc_library( ":curl_http_request", ":expiring_lru_cache", ":file_block_cache", + ":gcs_dns_cache", ":google_auth_provider", ":http_request", ":retrying_file_system", @@ -231,6 +243,18 @@ tf_cc_test( ], ) +tf_cc_test( + name = "gcs_dns_cache_test", + size = "small", + srcs = ["gcs_dns_cache_test.cc"], + deps = [ + ":gcs_dns_cache", + "//tensorflow/core:lib", + "//tensorflow/core:test", + "//tensorflow/core:test_main", + ], +) + tf_cc_test( name = "curl_http_request_test", size = "small", diff --git a/tensorflow/core/platform/cloud/curl_http_request.cc b/tensorflow/core/platform/cloud/curl_http_request.cc index e2d935f35eb..d01734ba3a6 100644 --- a/tensorflow/core/platform/cloud/curl_http_request.cc +++ b/tensorflow/core/platform/cloud/curl_http_request.cc @@ -131,6 +131,9 @@ CurlHttpRequest::~CurlHttpRequest() { if (curl_headers_) { libcurl_->curl_slist_free_all(curl_headers_); } + if (resolve_list_) { + libcurl_->curl_slist_free_all(resolve_list_); + } if (put_body_) { fclose(put_body_); } @@ -212,6 +215,17 @@ Status CurlHttpRequest::AddHeader(const string& name, const string& value) { return Status::OK(); } +Status CurlHttpRequest::AddResolveOverride(const string& hostname, int64 port, + const string& ip_addr) { + TF_RETURN_IF_ERROR(CheckInitialized()); + TF_RETURN_IF_ERROR(CheckNotSent()); + // Resolve values are hostname:port:IP.add.ress + resolve_list_ = libcurl_->curl_slist_append( + resolve_list_, + strings::StrCat(hostname, ":", port, ":", ip_addr).c_str()); + return Status::OK(); +} + Status CurlHttpRequest::AddAuthBearerHeader(const string& auth_token) { TF_RETURN_IF_ERROR(CheckInitialized()); TF_RETURN_IF_ERROR(CheckNotSent()); @@ -376,6 +390,9 @@ Status CurlHttpRequest::Send() { if (curl_headers_) { libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTPHEADER, curl_headers_); } + if (resolve_list_) { + libcurl_->curl_easy_setopt(curl_, CURLOPT_RESOLVE, resolve_list_); + } libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERDATA, reinterpret_cast(this)); libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERFUNCTION, diff --git a/tensorflow/core/platform/cloud/curl_http_request.h b/tensorflow/core/platform/cloud/curl_http_request.h index c7a555de10c..2396593d6de 100644 --- a/tensorflow/core/platform/cloud/curl_http_request.h +++ b/tensorflow/core/platform/cloud/curl_http_request.h @@ -71,6 +71,9 @@ class CurlHttpRequest : public HttpRequest { /// Sets a request header. Status AddHeader(const string& name, const string& value) override; + Status AddResolveOverride(const string& hostname, int64 port, + const string& ip_addr) override; + /// Sets the 'Authorization' header to the value of 'Bearer ' + auth_token. Status AddAuthBearerHeader(const string& auth_token) override; @@ -146,6 +149,7 @@ class CurlHttpRequest : public HttpRequest { std::vector* response_buffer_ = nullptr; CURL* curl_ = nullptr; curl_slist* curl_headers_ = nullptr; + curl_slist* resolve_list_ = nullptr; std::vector default_response_buffer_; diff --git a/tensorflow/core/platform/cloud/gcs_dns_cache.cc b/tensorflow/core/platform/cloud/gcs_dns_cache.cc new file mode 100644 index 00000000000..63f2da065db --- /dev/null +++ b/tensorflow/core/platform/cloud/gcs_dns_cache.cc @@ -0,0 +1,135 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/platform/cloud/gcs_dns_cache.h" + +#include +#include +#include + +namespace tensorflow { + +namespace { + +constexpr char kStorageHost[] = "storage.googleapis.com"; +constexpr char kWwwHost[] = "www.googleapis.com"; + +} // namespace + +GcsDnsCache::GcsDnsCache(Env* env, int64 refresh_rate_secs) + : env_(env), refresh_rate_secs_(refresh_rate_secs) {} + +Status GcsDnsCache::AnnotateRequest(HttpRequest* request) { + // TODO(saeta): Blacklist failing IP addresses. + mutex_lock l(mu_); + if (!started_) { + DCHECK(!worker_) << "Worker thread already exists!"; + // Perform DNS resolutions to warm the cache. + std::vector www_addresses = ResolveName(kWwwHost); + std::vector storage_addresses = ResolveName(kStorageHost); + www_addresses.swap(www_addresses_); + storage_addresses.swap(storage_addresses_); + + // Note: we opt to use a thread instead of a delayed closure. + worker_.reset(env_->StartThread( + {}, "gcs_dns_worker", std::bind(&GcsDnsCache::WorkerThread, this))); + started_ = true; + } + if (!storage_addresses_.empty()) { + std::uniform_int_distribution<> storage_dist(0, + storage_addresses_.size() - 1); + size_t index = storage_dist(random_); + TF_RETURN_IF_ERROR(request->AddResolveOverride(kStorageHost, 443, + storage_addresses_[index])); + } else { + LOG(WARNING) << "No IP addresses available for " << kStorageHost; + } + if (!www_addresses_.empty()) { + std::uniform_int_distribution<> www_dist(0, www_addresses_.size() - 1); + size_t index = www_dist(random_); + TF_RETURN_IF_ERROR( + request->AddResolveOverride(kWwwHost, 443, www_addresses_[index])); + } else { + LOG(WARNING) << "No IP addresses available for " << kWwwHost; + } + return Status::OK(); +} + +/* static */ std::vector GcsDnsCache::ResolveName(const string& name) { + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; // Only use IPv4 for now. + hints.ai_socktype = SOCK_STREAM; + addrinfo* result = nullptr; + int return_code = getaddrinfo(name.c_str(), nullptr, &hints, &result); + + std::vector output; + if (return_code == 0) { + for (addrinfo* i = result; i != nullptr; i = i->ai_next) { + if (i->ai_family != AF_INET || i->ai_addr->sa_family != AF_INET) { + LOG(WARNING) << "Non-IPv4 address returned. ai_family: " << i->ai_family + << ". sa_family: " << i->ai_addr->sa_family << "."; + continue; + } + char buf[INET_ADDRSTRLEN]; + void* address_ptr = + &(reinterpret_cast(i->ai_addr)->sin_addr); + const char* formatted = nullptr; + if ((formatted = inet_ntop(i->ai_addr->sa_family, address_ptr, buf, + INET_ADDRSTRLEN)) == nullptr) { + LOG(ERROR) << "Error converting response to IP address for " << name + << ": " << strerror(errno); + } else { + output.emplace_back(buf); + } + } + } else { + if (return_code == EAI_SYSTEM) { + LOG(ERROR) << "Error resolving " << name + << " (EAI_SYSTEM): " << strerror(errno); + } else { + LOG(ERROR) << "Error resolving " << name << ": " + << gai_strerror(return_code); + } + } + if (result != nullptr) { + freeaddrinfo(result); + } + return output; +} + +void GcsDnsCache::WorkerThread() { + while (true) { + { + // Don't immediately re-resolve the addresses. + mutex_lock l(mu_); + if (cancelled_) return; + cond_var_.wait_for(l, std::chrono::seconds(refresh_rate_secs_)); + if (cancelled_) return; + } + // Resolve DNS values + std::vector www_addresses = ResolveName(kWwwHost); + std::vector storage_addresses = ResolveName(kStorageHost); + + { + mutex_lock l(mu_); + // Update instance variables. + www_addresses.swap(www_addresses_); + storage_addresses.swap(storage_addresses_); + } + } +} + +} // namespace tensorflow diff --git a/tensorflow/core/platform/cloud/gcs_dns_cache.h b/tensorflow/core/platform/cloud/gcs_dns_cache.h new file mode 100644 index 00000000000..7a4d3847a5a --- /dev/null +++ b/tensorflow/core/platform/cloud/gcs_dns_cache.h @@ -0,0 +1,74 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef THIRD_PARTY_TENSORFLOW_PLATNFORM_CLOUD_DNS_CACHE_H_ +#define THIRD_PARTY_TENSORFLOW_PLATNFORM_CLOUD_DNS_CACHE_H_ + +#include + +#include "tensorflow/core/platform/cloud/http_request.h" +#include "tensorflow/core/platform/env.h" + +namespace tensorflow { +const int64 kDefaultRefreshRateSecs = 60; + +// DnsCache is a userspace DNS cache specialized for the GCS filesystem. +// +// Some environments have unreliable DNS resolvers. DnsCache ameliorates the +// situation by radically reducing the number of DNS requests by performing +// 2 DNS queries per minute (by default) on a background thread. Updated cache +// entries are used to override curl's DNS resolution processes. +class GcsDnsCache { + public: + // Default no-argument constructor. + GcsDnsCache() : GcsDnsCache(kDefaultRefreshRateSecs) {} + + // Constructs a GcsDnsCache with the specified refresh rate. + GcsDnsCache(int64 refresh_rate_secs) + : GcsDnsCache(Env::Default(), refresh_rate_secs) {} + + GcsDnsCache(Env* env, int64 refresh_rate_secs); + + ~GcsDnsCache() { + mutex_lock l(mu_); + cancelled_ = true; + cond_var_.notify_one(); + } + + // Annotate the given HttpRequest with resolve overrides from the cache. + Status AnnotateRequest(HttpRequest* request); + + private: + static std::vector ResolveName(const string& name); + void WorkerThread(); + + // Define a friend class for testing. + friend class GcsDnsCacheTest; + + mutex mu_; + Env* env_; + condition_variable cond_var_; + std::default_random_engine random_ GUARDED_BY(mu_); + bool started_ GUARDED_BY(mu_) = false; + bool cancelled_ GUARDED_BY(mu_) = false; + std::vector www_addresses_ GUARDED_BY(mu_); + std::vector storage_addresses_ GUARDED_BY(mu_); + std::unique_ptr worker_ GUARDED_BY(mu_); // After mutable vars. + const int64 refresh_rate_secs_; +}; + +} // namespace tensorflow + +#endif // THIRD_PARTY_TENSORFLOW_PLATNFORM_CLOUD_DNS_CACHE_H_ diff --git a/tensorflow/core/platform/cloud/gcs_dns_cache_test.cc b/tensorflow/core/platform/cloud/gcs_dns_cache_test.cc new file mode 100644 index 00000000000..cba6caff22e --- /dev/null +++ b/tensorflow/core/platform/cloud/gcs_dns_cache_test.cc @@ -0,0 +1,113 @@ +/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/platform/cloud/gcs_dns_cache.h" +#include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/test.h" + +namespace tensorflow { + +class TestHttpRequest : public HttpRequest { + public: + Status Init() override { return Status::OK(); } + Status SetUri(const string& uri) override { return Status::OK(); } + Status SetRange(uint64 start, uint64 end) override { return Status::OK(); } + Status AddHeader(const string& name, const string& value) override { + return Status::OK(); + } + Status AddResolveOverride(const string& hostname, int64 port, + const string& ip_addr) override { + EXPECT_EQ(port, 443) << "Unexpected port set for hostname: " << hostname; + auto itr = resolve_overrides_.find(hostname); + EXPECT_EQ(itr, resolve_overrides_.end()) + << "Hostname " << hostname << "already in map: " << itr->second; + + resolve_overrides_.insert( + std::map::value_type(hostname, ip_addr)); + return Status::OK(); + } + + Status AddAuthBearerHeader(const string& auth_token) override { + return Status::OK(); + } + + Status SetDeleteRequest() override { return Status::OK(); } + + Status SetPutFromFile(const string& body_filepath, size_t offset) override { + return Status::OK(); + } + Status SetPutEmptyBody() override { return Status::OK(); } + + Status SetPostFromBuffer(const char* buffer, size_t size) override { + return Status::OK(); + } + Status SetPostEmptyBody() override { return Status::OK(); } + + Status SetResultBuffer(std::vector* out_buffer) override { + return Status::OK(); + } + + string GetResponseHeader(const string& name) const override { return ""; } + uint64 GetResponseCode() const override { return 0; } + Status Send() override { return Status::OK(); } + string EscapeString(const string& str) override { return ""; } + + std::map resolve_overrides_; +}; + +// Friend class for testing. +// +// It is written this way (as opposed to using FRIEND_TEST) to avoid a +// non-test-time dependency on gunit. +class GcsDnsCacheTest : public ::testing::Test { + protected: + void ResolveNameTest() { + auto response = GcsDnsCache::ResolveName("www.googleapis.com"); + EXPECT_LT(1, response.size()) << str_util::Join(response, ", "); + } + + void AnnotateRequestTest() { + GcsDnsCache d; + { + mutex_lock l(d.mu_); + d.started_ = true; // Avoid creating a thread. + d.www_addresses_ = {"192.168.1.1"}; + d.storage_addresses_ = {"172.134.1.1"}; + } + + TestHttpRequest req; + Status s = d.AnnotateRequest(&req); + EXPECT_TRUE(s.ok()) << s; + EXPECT_EQ("192.168.1.1", req.resolve_overrides_["www.googleapis.com"]); + EXPECT_EQ("172.134.1.1", req.resolve_overrides_["storage.googleapis.com"]); + } + + void SuccessfulCleanupTest() { + // Create a DnsCache object, start the worker thread, ensure it cleans up in + // a timely manner. + GcsDnsCache d; + TestHttpRequest req; + Status s = d.AnnotateRequest(&req); + EXPECT_TRUE(s.ok()) << s; + } +}; + +TEST_F(GcsDnsCacheTest, ResolveName) { ResolveNameTest(); } + +TEST_F(GcsDnsCacheTest, AnnotateRequest) { AnnotateRequestTest(); } + +TEST_F(GcsDnsCacheTest, SuccessfulCleanup) { SuccessfulCleanupTest(); } + +} // namespace tensorflow diff --git a/tensorflow/core/platform/cloud/gcs_file_system.cc b/tensorflow/core/platform/cloud/gcs_file_system.cc index 17fe704b79a..9287de7237d 100644 --- a/tensorflow/core/platform/cloud/gcs_file_system.cc +++ b/tensorflow/core/platform/cloud/gcs_file_system.cc @@ -89,6 +89,10 @@ constexpr char kMatchingPathsCacheMaxEntries[] = constexpr size_t kMatchingPathsCacheDefaultMaxEntries = 1024; // The file statistics returned by Stat() for directories. const FileStatistics DIRECTORY_STAT(0, 0, true); +// Some environments exhibit unreliable DNS resolution. Set this environment +// variable to a positive integer describing the frequency used to refresh the +// userspace DNS cache. +constexpr char kResolveCacheSecs[] = "GCS_RESOLVE_REFRESH_SECS"; Status GetTmpFilename(string* filename) { if (!filename) { @@ -434,8 +438,8 @@ class GcsWritableFile : public WritableFile { std::unique_ptr request(http_request_factory_->Create()); TF_RETURN_IF_ERROR(request->Init()); TF_RETURN_IF_ERROR(request->SetUri(strings::StrCat( - kGcsUploadUriBase, "b/", bucket_, "/o?uploadType=resumable&name=", - request->EscapeString(object_)))); + kGcsUploadUriBase, "b/", bucket_, + "/o?uploadType=resumable&name=", request->EscapeString(object_)))); TF_RETURN_IF_ERROR(request->AddAuthBearerHeader(auth_token)); TF_RETURN_IF_ERROR(request->AddHeader("X-Upload-Content-Length", std::to_string(file_size))); @@ -624,6 +628,12 @@ GcsFileSystem::GcsFileSystem() } matching_paths_cache_.reset(new ExpiringLRUCache>( matching_paths_cache_max_age, matching_paths_cache_max_entries)); + + int64 resolve_frequency_secs; + if (GetEnvVar(kResolveCacheSecs, strings::safe_strto64, + &resolve_frequency_secs)) { + dns_cache_.reset(new GcsDnsCache(resolve_frequency_secs)); + } } GcsFileSystem::GcsFileSystem( @@ -678,6 +688,11 @@ Status GcsFileSystem::LoadBufferFromGCS(const string& filename, size_t offset, TF_RETURN_IF_ERROR(request->AddAuthBearerHeader(auth_token)); TF_RETURN_IF_ERROR(request->SetRange(offset, offset + n - 1)); TF_RETURN_IF_ERROR(request->SetResultBuffer(out)); + + if (dns_cache_) { + TF_RETURN_IF_ERROR(dns_cache_->AnnotateRequest(request.get())); + } + TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when reading gs://", bucket, "/", object); return Status::OK(); @@ -821,6 +836,11 @@ Status GcsFileSystem::StatForObject(const string& fname, const string& bucket, "?fields=size%2Cupdated"))); TF_RETURN_IF_ERROR(request->AddAuthBearerHeader(auth_token)); TF_RETURN_IF_ERROR(request->SetResultBuffer(&output_buffer)); + + if (dns_cache_) { + TF_RETURN_IF_ERROR(dns_cache_->AnnotateRequest(request.get())); + } + TF_RETURN_WITH_CONTEXT_IF_ERROR( request->Send(), " when reading metadata of gs://", bucket, "/", object); @@ -959,12 +979,12 @@ Status GcsFileSystem::GetChildrenBounded(const string& dirname, uri = strings::StrCat(uri, "&delimiter=%2F"); } if (!object_prefix.empty()) { - uri = strings::StrCat(uri, "&prefix=", - request->EscapeString(object_prefix)); + uri = strings::StrCat(uri, + "&prefix=", request->EscapeString(object_prefix)); } if (!nextPageToken.empty()) { - uri = strings::StrCat(uri, "&pageToken=", - request->EscapeString(nextPageToken)); + uri = strings::StrCat( + uri, "&pageToken=", request->EscapeString(nextPageToken)); } if (max_results - retrieved_results < kGetChildrenDefaultPageSize) { uri = @@ -973,6 +993,11 @@ Status GcsFileSystem::GetChildrenBounded(const string& dirname, TF_RETURN_IF_ERROR(request->SetUri(uri)); TF_RETURN_IF_ERROR(request->AddAuthBearerHeader(auth_token)); TF_RETURN_IF_ERROR(request->SetResultBuffer(&output_buffer)); + + if (dns_cache_) { + TF_RETURN_IF_ERROR(dns_cache_->AnnotateRequest(request.get())); + } + TF_RETURN_WITH_CONTEXT_IF_ERROR(request->Send(), " when reading ", dirname); Json::Value root; StringPiece response_piece = diff --git a/tensorflow/core/platform/cloud/gcs_file_system.h b/tensorflow/core/platform/cloud/gcs_file_system.h index 36a1d42fdef..4b4853c838a 100644 --- a/tensorflow/core/platform/cloud/gcs_file_system.h +++ b/tensorflow/core/platform/cloud/gcs_file_system.h @@ -22,6 +22,7 @@ limitations under the License. #include "tensorflow/core/platform/cloud/auth_provider.h" #include "tensorflow/core/platform/cloud/expiring_lru_cache.h" #include "tensorflow/core/platform/cloud/file_block_cache.h" +#include "tensorflow/core/platform/cloud/gcs_dns_cache.h" #include "tensorflow/core/platform/cloud/http_request.h" #include "tensorflow/core/platform/cloud/retrying_file_system.h" #include "tensorflow/core/platform/file_system.h" @@ -141,6 +142,7 @@ class GcsFileSystem : public FileSystem { std::unique_ptr auth_provider_; std::unique_ptr http_request_factory_; std::unique_ptr file_block_cache_; + std::unique_ptr dns_cache_; using StatCache = ExpiringLRUCache; std::unique_ptr stat_cache_; diff --git a/tensorflow/core/platform/cloud/http_request.h b/tensorflow/core/platform/cloud/http_request.h index 8182b63d5b2..02d9e9054ad 100644 --- a/tensorflow/core/platform/cloud/http_request.h +++ b/tensorflow/core/platform/cloud/http_request.h @@ -64,6 +64,14 @@ class HttpRequest { /// Sets a request header. virtual Status AddHeader(const string& name, const string& value) = 0; + /// Sets a DNS resolve mapping (to skip DNS resolution). + /// + /// Note: because GCS is available over HTTPS, we cannot replace the hostname + /// in the URI with an IP address, as that will cause the certificate check + /// to fail. + virtual Status AddResolveOverride(const string& hostname, int64 port, + const string& ip_addr) = 0; + /// Sets the 'Authorization' header to the value of 'Bearer ' + auth_token. virtual Status AddAuthBearerHeader(const string& auth_token) = 0; From 7d3e728369ed0ee8c982202ea64547488ed9aa1a Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Wed, 15 Nov 2017 18:01:52 -0800 Subject: [PATCH 084/104] Don't assert when processing invalid dimensions PiperOrigin-RevId: 175910804 --- tensorflow/core/grappler/costs/graph_properties.cc | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/grappler/costs/graph_properties.cc b/tensorflow/core/grappler/costs/graph_properties.cc index 26b85217718..d33d86df3a9 100644 --- a/tensorflow/core/grappler/costs/graph_properties.cc +++ b/tensorflow/core/grappler/costs/graph_properties.cc @@ -91,8 +91,15 @@ struct Processor { *result = -counter; counter++; } else { - CHECK_LE(0, InferenceContext::Value(d)); - *result = InferenceContext::Value(d); + int64 val = InferenceContext::Value(d); + if (val >= 0) { + *result = val; + } else { + // A shape inference function generated an invalid dimension handle. + // Use a symbolic dimension to encode this. + *result = -counter; + counter++; + } } } From 8c16cf3ff96ccc17a5953c62bf23616472f7cffc Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 18:05:34 -0800 Subject: [PATCH 085/104] Hlo parser: support infeed and outfeed. PiperOrigin-RevId: 175911331 --- .../compiler/xla/service/hlo_instruction.cc | 9 ++++++- .../compiler/xla/tools/parser/hlo_parser.cc | 26 ++++++++++++++++--- .../xla/tools/parser/hlo_parser_test.cc | 14 ++++++++++ 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc index d3096231dca..045abdac8b7 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction.cc +++ b/tensorflow/compiler/xla/service/hlo_instruction.cc @@ -43,6 +43,7 @@ limitations under the License. namespace xla { +using tensorflow::str_util::CEscape; using ::tensorflow::str_util::Join; using ::tensorflow::strings::StrAppend; using ::tensorflow::strings::StrCat; @@ -1965,6 +1966,13 @@ std::vector HloInstruction::ExtraAttributesToString() const { }), "}")); } + if (opcode() == HloOpcode::kInfeed && !infeed_config_.empty()) { + extra.push_back(StrCat("infeed_config=\"", CEscape(infeed_config_), "\"")); + } + if (opcode() == HloOpcode::kOutfeed && !outfeed_config_.empty()) { + extra.push_back( + StrCat("outfeed_config=\"", CEscape(outfeed_config_), "\"")); + } return extra; } @@ -2920,7 +2928,6 @@ string PaddingConfigToString(const PaddingConfig& padding) { string OpMetadataToString(const OpMetadata& metadata) { std::vector result; - using tensorflow::str_util::CEscape; if (!metadata.op_type().empty()) { result.push_back(StrCat("op_type=\"", CEscape(metadata.op_type()), "\"")); } diff --git a/tensorflow/compiler/xla/tools/parser/hlo_parser.cc b/tensorflow/compiler/xla/tools/parser/hlo_parser.cc index 3e3406e658f..a65e5a856f7 100644 --- a/tensorflow/compiler/xla/tools/parser/hlo_parser.cc +++ b/tensorflow/compiler/xla/tools/parser/hlo_parser.cc @@ -776,11 +776,31 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder, shape, *fusion_kind, operands, *fusion_computation)); break; } + case HloOpcode::kInfeed: { + optional config; + attrs["infeed_config"] = {/*required=*/false, AttrTy::kString, &config}; + if (!ParseOperands(&operands, /*expected_size=*/0) || + !ParseAttributes(attrs)) { + return false; + } + instruction = builder->AddInstruction( + HloInstruction::CreateInfeed(shape, config ? *config : "")); + break; + } + case HloOpcode::kOutfeed: { + optional config; + attrs["outfeed_config"] = {/*required=*/false, AttrTy::kString, &config}; + if (!ParseOperands(&operands, /*expected_size=*/1) || + !ParseAttributes(attrs)) { + return false; + } + instruction = builder->AddInstruction(HloInstruction::CreateOutfeed( + shape, operands[0], config ? *config : "")); + break; + } case HloOpcode::kCustomCall: case HloOpcode::kReducePrecision: case HloOpcode::kRng: - case HloOpcode::kInfeed: - case HloOpcode::kOutfeed: case HloOpcode::kTrace: return TokenError(StrCat("parsing not yet implemented for op: ", HloOpcodeString(opcode))); @@ -805,7 +825,7 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder, instruction->set_metadata(*metadata); } return AddInstruction(name, instruction); -} +} // NOLINT(readability/fn_size) // ::= '{' (single_sharding | tuple_sharding) '}' // diff --git a/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc b/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc index 29ae3296ca2..0ebc0ca44bb 100644 --- a/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc +++ b/tensorflow/compiler/xla/tools/parser/hlo_parser_test.cc @@ -560,6 +560,20 @@ ENTRY %fusion.v3 () -> f32[3,2,1,1] { ROOT %fusion = f32[3,2,1,1]{3,2,1,0} fusion(f32[3,2,1,1]{3,2,1,0} %constant, f32[2]{0} %constant.1), kind=kLoop, calls=%fused_computation } +)" +}, +// infeed/outfeed +{ +"InfeedOutfeed", +R"(HloModule outfeed_module: + +ENTRY %InfeedToOutfeed () -> (u32[3], pred[]) { + %infeed = (u32[3]{0}, pred[]) infeed() + %outfeed = () outfeed((u32[3]{0}, pred[]) %infeed) + ROOT %infeed.1 = (u32[3]{0}, pred[]) infeed() + %outfeed.1 = () outfeed((u32[3]{0}, pred[]) %infeed.1) +} + )" } }); From 0dfcc34c954513ff26d20729712baade9dda93ed Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 18:11:54 -0800 Subject: [PATCH 086/104] Cast offsets into int64 to allow for cases where tag_indices is of type int64 (e.g. the output of a call to tf.lookup.*). PiperOrigin-RevId: 175912063 --- .../crf/python/kernel_tests/crf_test.py | 26 +++++++++---------- tensorflow/contrib/crf/python/ops/crf.py | 9 ++++--- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/tensorflow/contrib/crf/python/kernel_tests/crf_test.py b/tensorflow/contrib/crf/python/kernel_tests/crf_test.py index 9174c5eb989..964ec754413 100644 --- a/tensorflow/contrib/crf/python/kernel_tests/crf_test.py +++ b/tensorflow/contrib/crf/python/kernel_tests/crf_test.py @@ -23,7 +23,6 @@ import itertools import numpy as np from tensorflow.contrib.crf.python.ops import crf -from tensorflow.python.framework import dtypes from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops @@ -58,18 +57,19 @@ class CrfTest(test.TestCase): def testCrfUnaryScore(self): inputs = np.array( [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32) - tag_indices = np.array([1, 2, 1, 0], dtype=np.int32) - sequence_lengths = np.array(3, dtype=np.int32) - with self.test_session() as sess: - unary_score = crf.crf_unary_score( - tag_indices=array_ops.expand_dims(tag_indices, 0), - sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), - inputs=array_ops.expand_dims(inputs, 0)) - unary_score = array_ops.squeeze(unary_score, [0]) - tf_unary_score = sess.run(unary_score) - expected_unary_score = sum(inputs[i][tag_indices[i]] - for i in range(sequence_lengths)) - self.assertAllClose(tf_unary_score, expected_unary_score) + for dtype in (np.int32, np.int64): + tag_indices = np.array([1, 2, 1, 0], dtype=dtype) + sequence_lengths = np.array(3, dtype=np.int32) + with self.test_session() as sess: + unary_score = crf.crf_unary_score( + tag_indices=array_ops.expand_dims(tag_indices, 0), + sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), + inputs=array_ops.expand_dims(inputs, 0)) + unary_score = array_ops.squeeze(unary_score, [0]) + tf_unary_score = sess.run(unary_score) + expected_unary_score = sum(inputs[i][tag_indices[i]] + for i in range(sequence_lengths)) + self.assertAllClose(tf_unary_score, expected_unary_score) def testCrfBinaryScore(self): tag_indices = np.array([1, 2, 1, 0], dtype=np.int32) diff --git a/tensorflow/contrib/crf/python/ops/crf.py b/tensorflow/contrib/crf/python/ops/crf.py index 7166e38b283..4282be5ec8c 100644 --- a/tensorflow/contrib/crf/python/ops/crf.py +++ b/tensorflow/contrib/crf/python/ops/crf.py @@ -193,6 +193,9 @@ def crf_unary_score(tag_indices, sequence_lengths, inputs): offsets = array_ops.expand_dims( math_ops.range(batch_size) * max_seq_len * num_tags, 1) offsets += array_ops.expand_dims(math_ops.range(max_seq_len) * num_tags, 0) + # Use int32 or int64 based on tag_indices' dtype. + if tag_indices.dtype == dtypes.int64: + offsets = math_ops.to_int64(offsets) flattened_tag_indices = array_ops.reshape(offsets + tag_indices, [-1]) unary_scores = array_ops.reshape( @@ -305,7 +308,7 @@ def viterbi_decode(score, transition_params): Returns: viterbi: A [seq_len] list of integers containing the highest scoring tag - indicies. + indices. viterbi_score: A float containing the score for the Viterbi sequence. """ trellis = np.zeros_like(score) @@ -385,7 +388,7 @@ class CrfDecodeBackwardRnnCell(rnn_cell.RNNCell): """Initialize the CrfDecodeBackwardRnnCell. Args: - num_tags + num_tags: The number of tags. """ self._num_tags = num_tags @@ -434,7 +437,7 @@ def crf_decode(potentials, transition_params, sequence_length): Returns: decode_tags: A [batch_size, max_seq_len] tensor, with dtype tf.int32. - Contains the highest scoring tag indicies. + Contains the highest scoring tag indices. best_score: A [batch_size] tensor, containing the score of decode_tags. """ # For simplicity, in shape comments, denote: From 2800d6e92b57caeb68cdda24c58eeffb57219b53 Mon Sep 17 00:00:00 2001 From: Max Galkin Date: Wed, 15 Nov 2017 18:18:50 -0800 Subject: [PATCH 087/104] Minor change in VirtualScheduler logging: there's sometimes a difference between device total uptime and the sum of per-op computation time, because uptime includes waiting for channel communications. PiperOrigin-RevId: 175912780 --- .../core/grappler/costs/virtual_scheduler.cc | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tensorflow/core/grappler/costs/virtual_scheduler.cc b/tensorflow/core/grappler/costs/virtual_scheduler.cc index 2ab3a9144c8..0bb98d37930 100644 --- a/tensorflow/core/grappler/costs/virtual_scheduler.cc +++ b/tensorflow/core/grappler/costs/virtual_scheduler.cc @@ -677,10 +677,10 @@ Costs VirtualScheduler::Summary() const { critical_path_costs.estimated_max_memory_per_device[name] = max_memory_usage; + const Costs::NanoSeconds wall_time_ns = state.GetCurrTime(); VLOG(1) << "Device = " << name << ", num_nodes = " << state.nodes_executed.size() - << ", execution_time = " << state.GetCurrTime().count() - << ", memory usage: " + << ", wall_time_ns = " << wall_time_ns.count() << ", memory usage: " << "persistent = " << strings::HumanReadableNumBytes(persistent_memory_usage) << ", peak = " @@ -698,9 +698,11 @@ Costs VirtualScheduler::Summary() const { op_to_memory[node->op()] += CalculateOutputSize(node_map_.at(node).output_properties, port); } + Costs::NanoSeconds total_compute_time_ns; for (const auto& op_cost_pair : state.op_to_cost) { const auto& op = op_cost_pair.first; const auto& cost = op_cost_pair.second.execution_time.count(); + total_compute_time_ns += op_cost_pair.second.execution_time; int64 op_mem_usage = 0; auto it = op_to_memory.find(op); if (it != op_to_memory.end()) { @@ -718,6 +720,15 @@ Costs VirtualScheduler::Summary() const { << (persisent_ops.count(op) > 0 ? ": persistent op)" : ")"); } } + + int utilization = 0; + if (wall_time_ns.count() > 0) { + utilization = total_compute_time_ns.count() * 100 / wall_time_ns.count(); + } + VLOG(1) << "Device = " << name + << ", total_compute_time_ns = " << total_compute_time_ns.count() + << ", utilization = " << utilization << "%"; + if (critical_path_costs.execution_time <= state.GetCurrTime()) { critical_path_costs = state.device_costs; } From d8af56e3b4cd0ac5096e32c3eee2d2cfb4d4137d Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 18:21:58 -0800 Subject: [PATCH 088/104] Fix data race in tensorflow/core/kernels/map_stage_op.cc, and use tf locking/formatting conventions. - Fix data race in which a routine mysteriously releases a lock which was held when it was called, and which the caller expects will still be held afterwards.(!) See the changes to the routines notify_inserters_if_bounded() and notify_removers(). The data race was in put() on current_bytes_ near the end, after the call to put_complete(). put_complete() calls notify_removers(), which released the lock, thus causing the race. Any reader of the code would have expected that the lock would have been held at that point, but it wasn't. The same pattern in notify_inserters_if_bounded() didn't cause a race in the current code, but is a pitfall for future maintainers, who would not mornally expect something called notify_inserters_if_bounded() to mean "and in some circumstances, unlock". Even in the absense of this bug, and the weird, uncommented spec for those routines, it is almost always a bad idea to release the lock before calling a condition-variable wakeup. First, it complicates the code, and makes it hard to reason about object deletion if the deletion is predicated by one of the signalled conditions. Second, it's an _unnecessary_ complication: a well-written condition variable implementation will requeue the woken thread(s) on the mutex queue, so the expensive part of the wakeup will be deferred until the waking thread unlocks anyway. - Avoid the C++11 unique_lock mechanism that allowed the bug to happen, and instead use tensorflow::mutex, tensorflow::mutex_lock, and tensorflow::condition_variable. - Use while-loops instead of lambdas for condition-variable waits, because while-loops are much easier to read. - Fix the routines would_exceed_memory_limit() and is_capacity_full() to include their own validity check, so it's not needed at each of their call sites. This improves the readability at the call sites. - Use annotalysis locking annotations so that other violations can be found statically by the compiler. - Reformat with clang-format. PiperOrigin-RevId: 175913085 --- tensorflow/core/kernels/map_stage_op.cc | 242 ++++++++++++------------ 1 file changed, 123 insertions(+), 119 deletions(-) diff --git a/tensorflow/core/kernels/map_stage_op.cc b/tensorflow/core/kernels/map_stage_op.cc index 7b5a464b722..bdc3b5778f0 100644 --- a/tensorflow/core/kernels/map_stage_op.cc +++ b/tensorflow/core/kernels/map_stage_op.cc @@ -29,6 +29,7 @@ limitations under the License. #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/mutex.h" +#include "tensorflow/core/platform/thread_annotations.h" namespace tensorflow { namespace { @@ -36,16 +37,14 @@ namespace { // Partial Ordering Comparator for Tensor keys containing scalar int64's struct KeyTensorLess { bool operator()(const Tensor& lhs, const Tensor& rhs) const { - return std::less{}(lhs.scalar()(), - rhs.scalar()()); + return std::less{}(lhs.scalar()(), rhs.scalar()()); } }; // Key Equality operator for Tensor keys containing scalar int64's struct KeyTensorEqual { bool operator()(const Tensor& lhs, const Tensor& rhs) const { - return std::equal_to{}(lhs.scalar()(), - rhs.scalar()()); + return std::equal_to{}(lhs.scalar()(), rhs.scalar()()); } }; @@ -93,24 +92,23 @@ class StagingMap : public ResourceBase { private: // Private variables - DataTypeVector dtypes_; - std::size_t capacity_; - std::size_t memory_limit_; - std::size_t current_bytes_; - std::mutex mu_; - std::condition_variable not_empty_; - std::condition_variable full_; - IncompleteType incomplete_; - MapType map_; + DataTypeVector dtypes_ GUARDED_BY(mu_); + std::size_t capacity_ GUARDED_BY(mu_); + std::size_t memory_limit_ GUARDED_BY(mu_); + std::size_t current_bytes_ GUARDED_BY(mu_); + tensorflow::mutex mu_; + tensorflow::condition_variable not_empty_; + tensorflow::condition_variable full_; + IncompleteType incomplete_ GUARDED_BY(mu_); + MapType map_ GUARDED_BY(mu_); private: // private methods // If map is configured for bounded capacity, notify // waiting inserters that space is now available - void notify_inserters_if_bounded(std::unique_lock* lock) { + void notify_inserters_if_bounded() EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (has_capacity() || has_memory_limit()) { - lock->unlock(); // Notify all inserters. The removal of an element // may make memory available for many inserters // to insert new elements @@ -120,23 +118,29 @@ class StagingMap : public ResourceBase { // Notify all removers waiting to extract values // that data is now available - void notify_removers(std::unique_lock* lock) { - lock->unlock(); + void notify_removers() { // Notify all removers. This is because they are // waiting for specific keys to appear in the map // so we don't know which one to wake up. not_empty_.notify_all(); } - bool has_capacity() const { return capacity_ > 0; } - - bool has_memory_limit() const { return memory_limit_ > 0; } - - bool would_exceed_memory_limit(std::size_t bytes) const { - return bytes + current_bytes_ > memory_limit_; + bool has_capacity() const EXCLUSIVE_LOCKS_REQUIRED(mu_) { + return capacity_ > 0; } - bool is_capacity_full() const { return map_.size() >= capacity_; } + bool has_memory_limit() const EXCLUSIVE_LOCKS_REQUIRED(mu_) { + return memory_limit_ > 0; + } + + bool would_exceed_memory_limit(std::size_t bytes) const + EXCLUSIVE_LOCKS_REQUIRED(mu_) { + return has_memory_limit() && bytes + current_bytes_ > memory_limit_; + } + + bool is_capacity_full() const EXCLUSIVE_LOCKS_REQUIRED(mu_) { + return has_capacity() && map_.size() >= capacity_; + } // Get number of bytes in the tuple std::size_t get_tuple_bytes(const Tuple& tuple) { @@ -157,7 +161,8 @@ class StagingMap : public ResourceBase { } // Check that the index is within bounds - Status check_index(const Tensor& key, std::size_t index) { + Status check_index(const Tensor& key, std::size_t index) + EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (index >= dtypes_.size()) { return Status(errors::InvalidArgument( "Index '", index, "' for key '", key.scalar()(), @@ -169,7 +174,7 @@ class StagingMap : public ResourceBase { Status copy_or_move_tensors(OptionalTuple* map_tuple, const Tensor& key, const Tensor& indices, Tuple* output, - bool copy = false) { + bool copy = false) EXCLUSIVE_LOCKS_REQUIRED(mu_) { auto findices = indices.flat(); // Return values at specified indices @@ -201,11 +206,12 @@ class StagingMap : public ResourceBase { // Check that the optional value at the specified index // is uninitialized Status check_index_uninitialized(const Tensor& key, std::size_t index, - const OptionalTuple& tuple) { + const OptionalTuple& tuple) + EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (tuple[index].has_value()) { - return Status(errors::InvalidArgument("The tensor for index '", - index, "' for key '", key.scalar()(), - "' was already initialized '", dtypes_.size(), "'.")); + return Status(errors::InvalidArgument( + "The tensor for index '", index, "' for key '", key.scalar()(), + "' was already initialized '", dtypes_.size(), "'.")); } return Status::OK(); @@ -228,7 +234,7 @@ class StagingMap : public ResourceBase { } // Check bytes are within memory limits memory limits - Status check_memory_limit(std::size_t bytes) { + Status check_memory_limit(std::size_t bytes) EXCLUSIVE_LOCKS_REQUIRED(mu_) { if (has_memory_limit() && bytes > memory_limit_) { return Status(errors::ResourceExhausted( "Attempted to insert tensors with combined size of '", bytes, @@ -241,8 +247,8 @@ class StagingMap : public ResourceBase { // Insert incomplete data into the Barrier Status put_incomplete(const KeyType& key, const Tensor& indices, - OptionalTuple* tuple, - std::unique_lock* lock) { + OptionalTuple* tuple, tensorflow::mutex_lock* lock) + EXCLUSIVE_LOCKS_REQUIRED(mu_) { auto findices = indices.flat(); // Search for the key in our incomplete set @@ -252,11 +258,9 @@ class StagingMap : public ResourceBase { std::size_t tuple_bytes = get_tuple_bytes(*tuple); TF_RETURN_IF_ERROR(check_memory_limit(tuple_bytes)); - if (has_memory_limit()) { - full_.wait(*lock, [tuple_bytes, this]() { - // Stop waiting if we don't exceed the memory limit - return !would_exceed_memory_limit(tuple_bytes); - }); + // Wait until we don't exceed the memory limit + while (would_exceed_memory_limit(tuple_bytes)) { + full_.wait(*lock); } // This key isn't present in the incomplete set @@ -282,8 +286,7 @@ class StagingMap : public ResourceBase { // Found an entry in the incomplete index // Update with given data and insert complete entries // into the main map - else - { + else { // Reference existing incomplete tuple OptionalTuple& present = it->second; @@ -312,7 +315,7 @@ class StagingMap : public ResourceBase { // Remove from incomplete incomplete_.erase(it); - TF_RETURN_IF_ERROR(put_complete(key, &insert_tuple, lock)); + TF_RETURN_IF_ERROR(put_complete(key, &insert_tuple)); } } @@ -320,12 +323,12 @@ class StagingMap : public ResourceBase { } // Does the insertion into the actual staging area - Status put_complete(const KeyType& key, OptionalTuple* tuple, - std::unique_lock* lock) { + Status put_complete(const KeyType& key, OptionalTuple* tuple) + EXCLUSIVE_LOCKS_REQUIRED(mu_) { // Insert key and tuples into the map map_.insert({key, std::move(*tuple)}); - notify_removers(lock); + notify_removers(); return Status::OK(); } @@ -340,7 +343,7 @@ class StagingMap : public ResourceBase { current_bytes_(0) {} Status put(KeyType* key, const Tensor* indices, OptionalTuple* tuple) { - std::unique_lock lock(mu_); + tensorflow::mutex_lock lock(mu_); // Sanity check the indices TF_RETURN_IF_ERROR(check_index_ordering(*indices)); @@ -354,22 +357,13 @@ class StagingMap : public ResourceBase { // Check that tuple_bytes fits within the memory limit TF_RETURN_IF_ERROR(check_memory_limit(tuple_bytes)); - // If map capacity is bounded wait until map is not full - if (has_capacity() || has_memory_limit()) { - full_.wait(lock, [tuple_bytes, this]() { - // If there's a memory limit, check if there's space for insertion - bool memory_limit_valid = - has_memory_limit() ? !would_exceed_memory_limit(tuple_bytes) : true; - // If we're configured for capacity check if there's space for insertion - bool capacity_valid = has_capacity() ? !is_capacity_full() : true; - - // Stop waiting upon success for both conditions - return memory_limit_valid && capacity_valid; - }); + // Wait until there's space for insertion. + while (would_exceed_memory_limit(tuple_bytes) || is_capacity_full()) { + full_.wait(lock); } // Do the put operation - TF_RETURN_IF_ERROR(put_complete(*key, tuple, &lock)); + TF_RETURN_IF_ERROR(put_complete(*key, tuple)); // Update the current size current_bytes_ += tuple_bytes; @@ -378,7 +372,7 @@ class StagingMap : public ResourceBase { } Status get(const KeyType* key, const Tensor* indices, Tuple* tuple) { - std::unique_lock lock(mu_); + tensorflow::mutex_lock lock(mu_); // Sanity check the indices TF_RETURN_IF_ERROR(check_index_ordering(*indices)); @@ -386,8 +380,9 @@ class StagingMap : public ResourceBase { typename MapType::iterator it; // Wait until the element with the requested key is present - not_empty_.wait( - lock, [&, this]() { return (it = map_.find(*key)) != map_.end(); }); + while ((it = map_.find(*key)) == map_.end()) { + not_empty_.wait(lock); + } TF_RETURN_IF_ERROR( copy_or_move_tensors(&it->second, *key, *indices, tuple, true)); @@ -399,7 +394,7 @@ class StagingMap : public ResourceBase { } Status pop(const KeyType* key, const Tensor* indices, Tuple* tuple) { - std::unique_lock lock(mu_); + tensorflow::mutex_lock lock(mu_); // Sanity check the indices TF_RETURN_IF_ERROR(check_index_ordering(*indices)); @@ -407,8 +402,9 @@ class StagingMap : public ResourceBase { typename MapType::iterator it; // Wait until the element with the requested key is present - not_empty_.wait( - lock, [&, this]() { return (it = map_.find(*key)) != map_.end(); }); + while ((it = map_.find(*key)) == map_.end()) { + not_empty_.wait(lock); + } TF_RETURN_IF_ERROR( copy_or_move_tensors(&it->second, *key, *indices, tuple)); @@ -422,19 +418,21 @@ class StagingMap : public ResourceBase { // Update bytes in the Staging Area current_bytes_ -= get_tuple_bytes(*tuple); - notify_inserters_if_bounded(&lock); + notify_inserters_if_bounded(); return Status::OK(); } Status popitem(KeyType* key, const Tensor* indices, Tuple* tuple) { - std::unique_lock lock(mu_); + tensorflow::mutex_lock lock(mu_); // Sanity check the indices TF_RETURN_IF_ERROR(check_index_ordering(*indices)); // Wait until map is not empty - not_empty_.wait(lock, [this]() { return !this->map_.empty(); }); + while (this->map_.empty()) { + not_empty_.wait(lock); + } // Move from the first element and erase it @@ -454,29 +452,29 @@ class StagingMap : public ResourceBase { // Update bytes in the Staging Area current_bytes_ -= get_tuple_bytes(*tuple); - notify_inserters_if_bounded(&lock); + notify_inserters_if_bounded(); return Status::OK(); } Status clear() { - std::unique_lock lock(mu_); + tensorflow::mutex_lock lock(mu_); map_.clear(); incomplete_.clear(); current_bytes_ = 0; - notify_inserters_if_bounded(&lock); + notify_inserters_if_bounded(); return Status::OK(); } std::size_t incomplete_size() { - std::unique_lock lock(mu_); + tensorflow::mutex_lock lock(mu_); return incomplete_.size(); } std::size_t size() { - std::unique_lock lock(mu_); + tensorflow::mutex_lock lock(mu_); return map_.size(); } @@ -539,10 +537,9 @@ class MapStageOp : public OpKernel { } }; -REGISTER_KERNEL_BUILDER(Name("MapStage").Device(DEVICE_CPU), - MapStageOp); +REGISTER_KERNEL_BUILDER(Name("MapStage").Device(DEVICE_CPU), MapStageOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapStage").Device(DEVICE_CPU), - MapStageOp); + MapStageOp); #if GOOGLE_CUDA REGISTER_KERNEL_BUILDER( @@ -553,7 +550,7 @@ REGISTER_KERNEL_BUILDER(Name("OrderedMapStage") .HostMemory("indices") .Device(DEVICE_GPU), MapStageOp); -#endif // GOOGLE_CUDA +#endif // GOOGLE_CUDA #ifdef TENSORFLOW_USE_SYCL REGISTER_KERNEL_BUILDER(Name("MapStage") @@ -601,30 +598,34 @@ class MapUnstageOp : public OpKernel { }; REGISTER_KERNEL_BUILDER(Name("MapUnstage").Device(DEVICE_CPU), - MapUnstageOp); + MapUnstageOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapUnstage").Device(DEVICE_CPU), - MapUnstageOp); + MapUnstageOp); #if GOOGLE_CUDA REGISTER_KERNEL_BUILDER(Name("MapUnstage") - .HostMemory("key") - .HostMemory("indices") - .Device(DEVICE_GPU), MapUnstageOp); + .HostMemory("key") + .HostMemory("indices") + .Device(DEVICE_GPU), + MapUnstageOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapUnstage") - .HostMemory("key") - .HostMemory("indices") - .Device(DEVICE_GPU), MapUnstageOp); + .HostMemory("key") + .HostMemory("indices") + .Device(DEVICE_GPU), + MapUnstageOp); #endif #ifdef TENSORFLOW_USE_SYCL REGISTER_KERNEL_BUILDER(Name("MapUnstage") - .HostMemory("key") - .HostMemory("indices") - .Device(DEVICE_SYCL), MapUnstageOp); + .HostMemory("key") + .HostMemory("indices") + .Device(DEVICE_SYCL), + MapUnstageOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapUnstage") - .HostMemory("key") - .HostMemory("indices") - .Device(DEVICE_SYCL), MapUnstageOp); -#endif // TENSORFLOW_USE_SYCL + .HostMemory("key") + .HostMemory("indices") + .Device(DEVICE_SYCL), + MapUnstageOp); +#endif // TENSORFLOW_USE_SYCL template class MapPeekOp : public OpKernel { @@ -682,7 +683,7 @@ REGISTER_KERNEL_BUILDER(Name("OrderedMapPeek") .HostMemory("indices") .Device(DEVICE_SYCL), MapPeekOp); -#endif // TENSORFLOW_USE_SYCL +#endif // TENSORFLOW_USE_SYCL template class MapUnstageNoKeyOp : public OpKernel { @@ -715,7 +716,7 @@ class MapUnstageNoKeyOp : public OpKernel { " vs. ", indices_tensor->NumElements())); for (std::size_t i = 0; i < tuple.size(); ++i) { - ctx->set_output(i+1, tuple[i]); + ctx->set_output(i + 1, tuple[i]); } } }; @@ -749,7 +750,7 @@ REGISTER_KERNEL_BUILDER(Name("OrderedMapUnstageNoKey") .HostMemory("indices") .Device(DEVICE_SYCL), MapUnstageNoKeyOp); -#endif // TENSORFLOW_USE_SYCL +#endif // TENSORFLOW_USE_SYCL template class MapSizeOp : public OpKernel { @@ -770,23 +771,24 @@ class MapSizeOp : public OpKernel { } }; -REGISTER_KERNEL_BUILDER(Name("MapSize").Device(DEVICE_CPU), - MapSizeOp); +REGISTER_KERNEL_BUILDER(Name("MapSize").Device(DEVICE_CPU), MapSizeOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapSize").Device(DEVICE_CPU), MapSizeOp); #if GOOGLE_CUDA -REGISTER_KERNEL_BUILDER(Name("MapSize").Device(DEVICE_GPU) - .HostMemory("size"), MapSizeOp); -REGISTER_KERNEL_BUILDER(Name("OrderedMapSize").Device(DEVICE_GPU) - .HostMemory("size"), MapSizeOp); +REGISTER_KERNEL_BUILDER(Name("MapSize").Device(DEVICE_GPU).HostMemory("size"), + MapSizeOp); +REGISTER_KERNEL_BUILDER( + Name("OrderedMapSize").Device(DEVICE_GPU).HostMemory("size"), + MapSizeOp); #endif #ifdef TENSORFLOW_USE_SYCL -REGISTER_KERNEL_BUILDER(Name("MapSize").Device(DEVICE_SYCL) - .HostMemory("size"), MapSizeOp); -REGISTER_KERNEL_BUILDER(Name("OrderedMapSize").Device(DEVICE_SYCL) - .HostMemory("size"), MapSizeOp); -#endif // TENSORFLOW_USE_SYCL +REGISTER_KERNEL_BUILDER(Name("MapSize").Device(DEVICE_SYCL).HostMemory("size"), + MapSizeOp); +REGISTER_KERNEL_BUILDER( + Name("OrderedMapSize").Device(DEVICE_SYCL).HostMemory("size"), + MapSizeOp); +#endif // TENSORFLOW_USE_SYCL template class MapIncompleteSizeOp : public OpKernel { @@ -813,17 +815,21 @@ REGISTER_KERNEL_BUILDER(Name("OrderedMapIncompleteSize").Device(DEVICE_CPU), MapIncompleteSizeOp); #if GOOGLE_CUDA -REGISTER_KERNEL_BUILDER(Name("MapIncompleteSize").Device(DEVICE_GPU) - .HostMemory("size"), MapIncompleteSizeOp); -REGISTER_KERNEL_BUILDER(Name("OrderedMapIncompleteSize").Device(DEVICE_GPU) - .HostMemory("size"), MapIncompleteSizeOp); +REGISTER_KERNEL_BUILDER( + Name("MapIncompleteSize").Device(DEVICE_GPU).HostMemory("size"), + MapIncompleteSizeOp); +REGISTER_KERNEL_BUILDER( + Name("OrderedMapIncompleteSize").Device(DEVICE_GPU).HostMemory("size"), + MapIncompleteSizeOp); #endif #ifdef TENSORFLOW_USE_SYCL -REGISTER_KERNEL_BUILDER(Name("MapIncompleteSize").Device(DEVICE_SYCL) - .HostMemory("size"), MapIncompleteSizeOp); -REGISTER_KERNEL_BUILDER(Name("OrderedMapIncompleteSize").Device(DEVICE_SYCL) - .HostMemory("size"), MapIncompleteSizeOp); -#endif // TENSORFLOW_USE_SYCL +REGISTER_KERNEL_BUILDER( + Name("MapIncompleteSize").Device(DEVICE_SYCL).HostMemory("size"), + MapIncompleteSizeOp); +REGISTER_KERNEL_BUILDER( + Name("OrderedMapIncompleteSize").Device(DEVICE_SYCL).HostMemory("size"), + MapIncompleteSizeOp); +#endif // TENSORFLOW_USE_SYCL template class MapClearOp : public OpKernel { @@ -839,14 +845,12 @@ class MapClearOp : public OpKernel { } }; -REGISTER_KERNEL_BUILDER(Name("MapClear").Device(DEVICE_CPU), - MapClearOp); +REGISTER_KERNEL_BUILDER(Name("MapClear").Device(DEVICE_CPU), MapClearOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapClear").Device(DEVICE_CPU), MapClearOp); #if GOOGLE_CUDA -REGISTER_KERNEL_BUILDER(Name("MapClear").Device(DEVICE_GPU), - MapClearOp); +REGISTER_KERNEL_BUILDER(Name("MapClear").Device(DEVICE_GPU), MapClearOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapClear").Device(DEVICE_GPU), MapClearOp); #endif @@ -855,7 +859,7 @@ REGISTER_KERNEL_BUILDER(Name("MapClear").Device(DEVICE_SYCL), MapClearOp); REGISTER_KERNEL_BUILDER(Name("OrderedMapClear").Device(DEVICE_SYCL), MapClearOp); -#endif // TENSORFLOW_USE_SYCL +#endif // TENSORFLOW_USE_SYCL } // namespace } // namespace tensorflow From 1ddd7bdda493b8212437c2e26f15993ef3186b52 Mon Sep 17 00:00:00 2001 From: Yunxing Dai Date: Wed, 15 Nov 2017 18:40:57 -0800 Subject: [PATCH 089/104] Add necessary shape util support for bfloat16 RELNOTES: Add necessary shape util support for bfloat16. PiperOrigin-RevId: 175914798 --- tensorflow/compiler/xla/shape_util.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/compiler/xla/shape_util.cc b/tensorflow/compiler/xla/shape_util.cc index 2202b6a2c13..c0a0e13f073 100644 --- a/tensorflow/compiler/xla/shape_util.cc +++ b/tensorflow/compiler/xla/shape_util.cc @@ -592,10 +592,10 @@ StatusOr ParseShapeStringInternal(tensorflow::StringPiece* s) { return sizeof(uint32); case U64: return sizeof(uint64); - case F16: - return sizeof(float) / 2; case BF16: return sizeof(float) / 2; + case F16: + return sizeof(float) / 2; case F32: return sizeof(float); case F64: From 542716812332210915d0dfb4dd141c6b768718f4 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 18:42:53 -0800 Subject: [PATCH 090/104] Verify file content before constructing the model PiperOrigin-RevId: 175914923 --- tensorflow/contrib/lite/model.cc | 16 ++++++++++++++-- tensorflow/contrib/lite/model_test.cc | 9 +++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc index f8208f6f98c..e2f3560e61b 100644 --- a/tensorflow/contrib/lite/model.cc +++ b/tensorflow/contrib/lite/model.cc @@ -30,6 +30,17 @@ limitations under the License. namespace tflite { +namespace { +inline const tflite::Model* VerifyAndGetModel(const void* buf, size_t len) { + ::flatbuffers::Verifier verifier(static_cast(buf), len); + if (VerifyModelBuffer(verifier)) { + return ::tflite::GetModel(buf); + } else { + return nullptr; + } +} +} // namespace + const char* kEmptyTensorName = ""; std::unique_ptr FlatBufferModel::BuildFromFile( @@ -64,7 +75,7 @@ FlatBufferModel::FlatBufferModel(const char* filename, bool mmap_file, if (!allocation_->valid()) return; if (!CheckModelIdentifier()) return; - model_ = ::tflite::GetModel(allocation_->base()); + model_ = VerifyAndGetModel(allocation_->base(), allocation_->bytes()); } bool FlatBufferModel::CheckModelIdentifier() const { @@ -84,7 +95,8 @@ FlatBufferModel::FlatBufferModel(const char* ptr, size_t num_bytes, : DefaultErrorReporter()) { allocation_ = new MemoryAllocation(ptr, num_bytes, error_reporter); if (!allocation_->valid()) return; - model_ = ::tflite::GetModel(allocation_->base()); + + model_ = VerifyAndGetModel(allocation_->base(), allocation_->bytes()); } FlatBufferModel::~FlatBufferModel() { delete allocation_; } diff --git a/tensorflow/contrib/lite/model_test.cc b/tensorflow/contrib/lite/model_test.cc index ae823650d6d..61043866420 100644 --- a/tensorflow/contrib/lite/model_test.cc +++ b/tensorflow/contrib/lite/model_test.cc @@ -20,6 +20,7 @@ limitations under the License. #include #include #include +#include #include "tensorflow/contrib/lite/model.h" @@ -245,6 +246,14 @@ TEST(BasicFlatBufferModel, TestNullErrorReporter) { ASSERT_NE(interpreter->Invoke(), kTfLiteOk); } +// Test what happens if we cannot bind any of the ops. +TEST(BasicFlatBufferModel, TestBuildModelFromCorruptedData) { + std::string corrupted_data = "123"; + auto model = FlatBufferModel::BuildFromBuffer(corrupted_data.c_str(), + corrupted_data.length()); + ASSERT_FALSE(model); +} + // TODO(aselle): Add tests for serialization of builtin op data types. // These tests will occur with the evaluation tests of individual operators, // not here. From 106d1960f4acb926c72e185b684bdffb0ebc06d7 Mon Sep 17 00:00:00 2001 From: Allen Lavoie Date: Wed, 15 Nov 2017 18:46:31 -0800 Subject: [PATCH 091/104] Switch the op naming of tfe.Network to match its variable naming scheme. e.g. uses my_network_1/dense_1/ consistently rather than also using my_network_1_1/dense_1/ sometimes. PiperOrigin-RevId: 175915162 --- tensorflow/contrib/eager/python/network.py | 22 +++++++++++++ .../contrib/eager/python/network_test.py | 32 +++++++++++++++++++ tensorflow/python/layers/base.py | 8 +++-- 3 files changed, 60 insertions(+), 2 deletions(-) diff --git a/tensorflow/contrib/eager/python/network.py b/tensorflow/contrib/eager/python/network.py index f7303cb5b4b..97eded7dca2 100644 --- a/tensorflow/contrib/eager/python/network.py +++ b/tensorflow/contrib/eager/python/network.py @@ -37,6 +37,20 @@ from tensorflow.python.training import training_util # functions in base.py which should be reused. +def _network_name_scope_naming(current_variable_scope): + """Name scope naming to match operation names to variable names. + + Used in Networks and also applied to non-Network Layers which are added to + Networks before being built. + + Args: + current_variable_scope: A VariableScope object. + Returns: + A name scope name. + """ + return current_variable_scope.name + "/" + + class Network(base.Layer): """Represents the composition of a set of Layers. @@ -72,6 +86,11 @@ class Network(base.Layer): self._variable_scope_counts_on_init = ( variable_scope._get_default_variable_store().variable_scopes_count) + def _name_scope_name(self, current_variable_scope): + """Overrides Layer op naming to match variable naming.""" + return _network_name_scope_naming( + current_variable_scope=current_variable_scope) + def _init_set_name(self, name): # Anonymous Networks (name=None) defer setting a final name until they are # (1) added to another Network, or (2) built/called (where (2) is only used @@ -205,6 +224,9 @@ class Network(base.Layer): None, use_resource=True, default_name=sublayer.name) as sub_scope: sublayer._scope = sub_scope + # Also switch op naming for this Layer to match Network conventions, + # i.e. op naming matching variable naming. + sublayer._name_scope_name = _network_name_scope_naming @base.Layer.name.getter def name(self): diff --git a/tensorflow/contrib/eager/python/network_test.py b/tensorflow/contrib/eager/python/network_test.py index 555c6e048d2..8718a8b5229 100644 --- a/tensorflow/contrib/eager/python/network_test.py +++ b/tensorflow/contrib/eager/python/network_test.py @@ -19,9 +19,11 @@ from __future__ import print_function import gc from tensorflow.contrib.eager.python import network +from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors_impl +from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.layers import core from tensorflow.python.ops import math_ops @@ -434,6 +436,36 @@ class NetworkTest(test.TestCase): self.assertIsInstance(net.trainable_weights[0], resource_variable_ops.ResourceVariable) + def testGraphOpNames(self): + """Network operation names should match variable naming.""" + + def _check_op_prefixes(expected_prefix, checked_ops): + for operation in ops.get_default_graph().get_operations(): + if operation.name == "ignore": + continue + if operation.name in checked_ops: + continue + checked_ops.add(operation.name) + self.assertStartsWith(expected_start=expected_prefix, + actual=operation.name) + self.assertNotIn("my_network", operation.name[len(expected_prefix):]) + self.assertNotIn("dense", operation.name[len(expected_prefix):]) + + with context.graph_mode(): + net = MyNetwork() + zero = constant_op.constant([[0.]], name="ignore") + net(zero) + checked_ops = set() + _check_op_prefixes(expected_prefix="my_network/dense/", + checked_ops=checked_ops) + net.net2 = net.track_layer(MyNetwork()) + net.net2(zero) + _check_op_prefixes(expected_prefix="my_network/my_network/dense/", + checked_ops=checked_ops) + MyNetwork()(zero) + _check_op_prefixes(expected_prefix="my_network_1/dense/", + checked_ops=checked_ops) + @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testDuplicateNameError(self): one = constant_op.constant([[1.]]) diff --git a/tensorflow/python/layers/base.py b/tensorflow/python/layers/base.py index 9677db2bce1..74b85da8452 100644 --- a/tensorflow/python/layers/base.py +++ b/tensorflow/python/layers/base.py @@ -379,6 +379,10 @@ class Layer(object): """ return inputs + def _name_scope_name(self, current_variable_scope): + """Determines op naming for the Layer.""" + return current_variable_scope.original_name_scope + def _compute_output_shape(self, input_shape): """Computes the output shape of the layer given the input shape. @@ -474,7 +478,7 @@ class Layer(object): self._set_scope(None) with vs.variable_scope( self._scope, reuse=(self.built or self._reuse)) as scope: - with ops.name_scope(scope.original_name_scope): + with ops.name_scope(self._name_scope_name(scope)): variable = vs.get_variable(name, shape=shape, initializer=initializer, @@ -577,7 +581,7 @@ class Layer(object): scope_context_manager = vs.variable_scope( self._scope, reuse=self._reuse) with scope_context_manager as scope: - with ops.name_scope(scope.original_name_scope): + with ops.name_scope(self._name_scope_name(scope)): if not self.built: if not in_graph_mode: # Activity regularization is currently unsupported in Eager mode. From 2efb07ffe5d1f12a4eaef3d673f11615a8ddd6e5 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 18:49:29 -0800 Subject: [PATCH 092/104] Fix a bug when printing fusion_kind in hlo_graph_dumper. PiperOrigin-RevId: 175915347 --- tensorflow/compiler/xla/service/hlo_graph_dumper.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc index 881b7e227c3..3d963a4b1eb 100644 --- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc +++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc @@ -1003,7 +1003,7 @@ string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) { } string extended_opcode = StrCat(HloOpcodeString(instr->opcode()), - instr->opcode() == HloOpcode::kFusion + instr->opcode() != HloOpcode::kFusion ? "" : StrCat(":", xla::ToString(instr->fusion_kind()))); // If the name does not contain the opcode, render both. From fa15669fefdbe7e9a26ac2dd00bc7ce469ca60e1 Mon Sep 17 00:00:00 2001 From: Sanjoy Das Date: Wed, 15 Nov 2017 18:50:58 -0800 Subject: [PATCH 093/104] Rename HloToProfileIndex to HloProfileIndexMap Also fix a typo in a nearby comment. PiperOrigin-RevId: 175915436 --- .../xla/service/hlo_execution_profile.cc | 28 +++++++++---------- .../xla/service/hlo_execution_profile.h | 28 +++++++++---------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/tensorflow/compiler/xla/service/hlo_execution_profile.cc b/tensorflow/compiler/xla/service/hlo_execution_profile.cc index ecce2bd4e51..755374b91d0 100644 --- a/tensorflow/compiler/xla/service/hlo_execution_profile.cc +++ b/tensorflow/compiler/xla/service/hlo_execution_profile.cc @@ -26,7 +26,7 @@ limitations under the License. #include "tensorflow/compiler/xla/util.h" namespace xla { -HloToProfileIndex::HloToProfileIndex(const HloModule& module) { +HloProfileIndexMap::HloProfileIndexMap(const HloModule& module) { size_t current_profile_index = 0; for (xla::HloComputation* computation : module.MakeComputationPostOrder()) { InsertOrDie(&computation_to_profile_idx_, computation, @@ -41,24 +41,24 @@ HloToProfileIndex::HloToProfileIndex(const HloModule& module) { } static HloProfilePrinter CreateOwnedHloProfilePrinter( - const HloToProfileIndex& hlo_to_profile_index, + const HloProfileIndexMap& hlo_profile_index_map, const HloCostAnalysis& cost_analysis) { using HloComputationInfo = HloProfilePrinter::HloComputationInfo; using HloInstructionInfo = HloProfilePrinter::HloInstructionInfo; HloComputationInfo* computation_infos = - new HloComputationInfo[hlo_to_profile_index.computation_count()]; + new HloComputationInfo[hlo_profile_index_map.computation_count()]; // There are two "indices" in play here. The first one is the index of the // HloComputationInfo or HloInstructionInfo in the array that contains said // HloComputationInfo or HloInstructionInfo. The second index is the index of // the HloComputationInfo or HloInstructionInfo in the profile counters array, - // as decided by hlo_to_profile_index. The latter index is always referred to - // as "profile_index". + // as decided by hlo_profile_index_map. The latter index is always referred + // to as "profile_index". size_t computation_index_in_static_data = 0; - size_t max_profile_index = hlo_to_profile_index.total_count(); - for (const auto& pair : hlo_to_profile_index.computation_to_profile_idx()) { + size_t max_profile_index = hlo_profile_index_map.total_count(); + for (const auto& pair : hlo_profile_index_map.computation_to_profile_idx()) { CHECK_LT(pair.second, max_profile_index); const HloComputation* computation = pair.first; size_t current_computation_index = computation_index_in_static_data++; @@ -85,7 +85,7 @@ static HloProfilePrinter CreateOwnedHloProfilePrinter( instruction_info->bytes_accessed = cost_analysis.bytes_accessed(*hlo); instruction_info->seconds = cost_analysis.seconds(*hlo); instruction_info->profile_index = - hlo_to_profile_index.GetProfileIndexFor(*hlo); + hlo_profile_index_map.GetProfileIndexFor(*hlo); CHECK_LT(instruction_info->profile_index, max_profile_index); } } @@ -109,26 +109,26 @@ static HloProfilePrinter CreateOwnedHloProfilePrinter( }; return HloProfilePrinter(computation_infos, - hlo_to_profile_index.computation_count(), deleter); + hlo_profile_index_map.computation_count(), deleter); } HloExecutionProfile::HloExecutionProfile(const HloModule& module, const HloCostAnalysis& cost_analysis) - : hlo_to_profile_index_(module), + : hlo_profile_index_map_(module), hlo_profile_printer_( - CreateOwnedHloProfilePrinter(hlo_to_profile_index_, cost_analysis)), + CreateOwnedHloProfilePrinter(hlo_profile_index_map_, cost_analysis)), profile_counters_( - /*count*/ hlo_to_profile_index_.total_count(), + /*count*/ hlo_profile_index_map_.total_count(), /*value*/ 0) {} void HloExecutionProfile::SetCyclesTakenBy(const HloInstruction* hlo, uint64 cycles_taken) { - profile_counters_[hlo_to_profile_index_.GetProfileIndexFor(*hlo)] = + profile_counters_[hlo_profile_index_map_.GetProfileIndexFor(*hlo)] = cycles_taken; } uint64 HloExecutionProfile::GetCyclesTakenBy(const HloInstruction& hlo) const { - return profile_counters_[hlo_to_profile_index_.GetProfileIndexFor(hlo)]; + return profile_counters_[hlo_profile_index_map_.GetProfileIndexFor(hlo)]; } string HloExecutionProfile::ToString( diff --git a/tensorflow/compiler/xla/service/hlo_execution_profile.h b/tensorflow/compiler/xla/service/hlo_execution_profile.h index f945b9d84c6..84702680c0c 100644 --- a/tensorflow/compiler/xla/service/hlo_execution_profile.h +++ b/tensorflow/compiler/xla/service/hlo_execution_profile.h @@ -29,18 +29,18 @@ namespace xla { class HloInstruction; -// Maps all HloInstructions and HloComputions in an HloModule to integers. -// These integers form the contiguous range [0, GetTotalCount()). -class HloToProfileIndex { +// Maps all HloInstructions and HloComputations in an HloModule to integers. +// These integers form the contiguous range [0, total_count()). +class HloProfileIndexMap { public: - // Scans `module` to populate this instance of HloToProfileIndex. - explicit HloToProfileIndex(const HloModule& module); + // Scans `module` to populate this instance of HloProfileIndexMap. + explicit HloProfileIndexMap(const HloModule& module); - HloToProfileIndex(const HloToProfileIndex&) = default; - HloToProfileIndex(HloToProfileIndex&&) = default; + HloProfileIndexMap(const HloProfileIndexMap&) = default; + HloProfileIndexMap(HloProfileIndexMap&&) = default; - HloToProfileIndex& operator=(const HloToProfileIndex&) = default; - HloToProfileIndex& operator=(HloToProfileIndex&&) = default; + HloProfileIndexMap& operator=(const HloProfileIndexMap&) = default; + HloProfileIndexMap& operator=(HloProfileIndexMap&&) = default; size_t GetProfileIndexFor(const HloInstruction& instruction) const { return FindOrDie(instruction_to_profile_idx(), &instruction); @@ -97,14 +97,14 @@ class HloExecutionProfile { // Return the number of cycles this computation took to execute. uint64 total_cycles_executed(const HloComputation& computation) const { - return profile_counters_[hlo_to_profile_index_.GetProfileIndexFor( + return profile_counters_[hlo_profile_index_map_.GetProfileIndexFor( computation)]; } // Record how many cycles a computation took to execute. void set_total_cycles_executed(const HloComputation& computation, uint64 total_cycles_executed) { - profile_counters_[hlo_to_profile_index_.GetProfileIndexFor(computation)] = + profile_counters_[hlo_profile_index_map_.GetProfileIndexFor(computation)] = total_cycles_executed; } @@ -117,9 +117,9 @@ class HloExecutionProfile { string ToString(const DeviceDescription& device_description) const; private: - // hlo_to_profile_index_ maps an Hlo entity (computation or instruction) to an - // index in profile_counters_. - HloToProfileIndex hlo_to_profile_index_; + // hlo_profile_index_map_ maps an Hlo entity (computation or instruction) to + // an index in profile_counters_. + HloProfileIndexMap hlo_profile_index_map_; // Used to print profile_counters_ in a human readable form. HloProfilePrinter hlo_profile_printer_; From 4916c64836d5f51d6b8878f429bc1622c465fcdf Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 15 Nov 2017 19:58:36 -0800 Subject: [PATCH 094/104] [XLA] Adding kConditional opcode that represents a conditional HLO instruction. PiperOrigin-RevId: 175919301 --- tensorflow/compiler/xla/service/hlo_graph_dumper.cc | 1 + tensorflow/compiler/xla/service/hlo_instruction.cc | 3 +++ tensorflow/compiler/xla/service/hlo_opcode.h | 1 + tensorflow/compiler/xla/service/instruction_fusion.cc | 1 + tensorflow/compiler/xla/tools/parser/hlo_parser.cc | 1 + 5 files changed, 7 insertions(+) diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc index 3d963a4b1eb..d71a4b42c71 100644 --- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc +++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc @@ -970,6 +970,7 @@ ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) { case HloOpcode::kOutfeed: case HloOpcode::kCrossReplicaSum: return kBrown; + case HloOpcode::kConditional: case HloOpcode::kCustomCall: case HloOpcode::kWhile: case HloOpcode::kCall: diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc index 045abdac8b7..f7b5b265d92 100644 --- a/tensorflow/compiler/xla/service/hlo_instruction.cc +++ b/tensorflow/compiler/xla/service/hlo_instruction.cc @@ -1210,6 +1210,7 @@ std::unique_ptr HloInstruction::CloneWithNewOperands( new_operands[2], new_operands[3], new_operands[4], epsilon(), feature_index()); break; + case HloOpcode::kConditional: case HloOpcode::kRecv: case HloOpcode::kRecvDone: case HloOpcode::kSend: @@ -1603,6 +1604,7 @@ bool HloInstruction::IdenticalSlowPath( return dimensions() == other.dimensions(); // These opcodes are not yet supported. + case HloOpcode::kConditional: case HloOpcode::kInfeed: case HloOpcode::kOutfeed: case HloOpcode::kSort: @@ -2355,6 +2357,7 @@ Status HloInstruction::Visit(DfsHloVisitorBase* visitor) { return visitor->HandleSendDone(this); // These opcodes are not handled here. + case HloOpcode::kConditional: case HloOpcode::kTrace: break; } diff --git a/tensorflow/compiler/xla/service/hlo_opcode.h b/tensorflow/compiler/xla/service/hlo_opcode.h index e0d02e0665c..7b070274416 100644 --- a/tensorflow/compiler/xla/service/hlo_opcode.h +++ b/tensorflow/compiler/xla/service/hlo_opcode.h @@ -58,6 +58,7 @@ namespace xla { V(kClamp, "clamp") \ V(kComplex, "complex") \ V(kConcatenate, "concatenate", kHloOpcodeIsVariadic) \ + V(kConditional, "conditional") \ V(kConstant, "constant") \ V(kConvert, "convert") \ V(kConvolution, "convolution") \ diff --git a/tensorflow/compiler/xla/service/instruction_fusion.cc b/tensorflow/compiler/xla/service/instruction_fusion.cc index dea47b1fd7b..de4804996f8 100644 --- a/tensorflow/compiler/xla/service/instruction_fusion.cc +++ b/tensorflow/compiler/xla/service/instruction_fusion.cc @@ -92,6 +92,7 @@ namespace xla { case HloOpcode::kBatchNormInference: case HloOpcode::kBatchNormGrad: case HloOpcode::kCall: + case HloOpcode::kConditional: case HloOpcode::kConvolution: case HloOpcode::kCrossReplicaSum: case HloOpcode::kCustomCall: diff --git a/tensorflow/compiler/xla/tools/parser/hlo_parser.cc b/tensorflow/compiler/xla/tools/parser/hlo_parser.cc index a65e5a856f7..0159d03b11d 100644 --- a/tensorflow/compiler/xla/tools/parser/hlo_parser.cc +++ b/tensorflow/compiler/xla/tools/parser/hlo_parser.cc @@ -798,6 +798,7 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder, shape, operands[0], config ? *config : "")); break; } + case HloOpcode::kConditional: case HloOpcode::kCustomCall: case HloOpcode::kReducePrecision: case HloOpcode::kRng: From 67e4add65243bf10fb09201d93f7be1f10762066 Mon Sep 17 00:00:00 2001 From: Saurabh Saxena Date: Wed, 15 Nov 2017 20:26:08 -0800 Subject: [PATCH 095/104] Make MapDataset saveable. Also integrate functionality of GraphDefBuilderWrapper::AddDatasetWithInputAsList into AddDataset since MapDatasetOp requires 2 inputs of type single tensor and tensor list. PiperOrigin-RevId: 175921068 --- .../contrib/data/python/kernel_tests/BUILD | 6 + .../kernel_tests/map_dataset_op_test.py | 516 ++++++++++++++++++ tensorflow/core/graph/graph_def_builder.h | 14 + tensorflow/core/kernels/batch_dataset_op.cc | 4 +- tensorflow/core/kernels/captured_function.h | 2 + .../core/kernels/concatenate_dataset_op.cc | 6 +- tensorflow/core/kernels/dataset.h | 161 ++++-- tensorflow/core/kernels/iterator_ops.cc | 52 +- tensorflow/core/kernels/map_dataset_op.cc | 57 +- tensorflow/core/kernels/repeat_dataset_op.cc | 4 +- tensorflow/core/kernels/shuffle_dataset_op.cc | 4 +- tensorflow/core/kernels/skip_dataset_op.cc | 4 +- tensorflow/core/kernels/take_dataset_op.cc | 4 +- tensorflow/core/kernels/tensor_dataset_op.cc | 2 +- .../core/kernels/tensor_slice_dataset_op.cc | 2 +- tensorflow/core/kernels/zip_dataset_op.cc | 8 +- 16 files changed, 768 insertions(+), 78 deletions(-) diff --git a/tensorflow/contrib/data/python/kernel_tests/BUILD b/tensorflow/contrib/data/python/kernel_tests/BUILD index 2b3843b97b3..badabed7019 100644 --- a/tensorflow/contrib/data/python/kernel_tests/BUILD +++ b/tensorflow/contrib/data/python/kernel_tests/BUILD @@ -269,6 +269,7 @@ py_test( srcs_version = "PY2AND3", deps = [ "//tensorflow/contrib/data/python/ops:dataset_ops", + "//tensorflow/contrib/data/python/ops:iterator_ops", "//tensorflow/contrib/data/python/ops:transformation_ops", "//tensorflow/python:array_ops", "//tensorflow/python:client_testlib", @@ -276,15 +277,20 @@ py_test( "//tensorflow/python:data_flow_ops", "//tensorflow/python:dtypes", "//tensorflow/python:errors", + "//tensorflow/python:framework_ops", "//tensorflow/python:functional_ops", "//tensorflow/python:io_ops", "//tensorflow/python:lookup_ops", "//tensorflow/python:math_ops", + "//tensorflow/python:platform", "//tensorflow/python:random_ops", "//tensorflow/python:script_ops", "//tensorflow/python:string_ops", + "//tensorflow/python:training", "//tensorflow/python:util", "//tensorflow/python:variable_scope", + "//tensorflow/python:variables", + "//tensorflow/python/data/ops:iterator_ops", "//third_party/py/numpy", ], ) diff --git a/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py index 8ccf92c17aa..d8e7f9d5933 100644 --- a/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py +++ b/tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py @@ -25,9 +25,13 @@ import numpy as np from tensorflow.contrib.data.python.ops import dataset_ops from tensorflow.contrib.data.python.ops import error_ops +from tensorflow.contrib.data.python.ops import iterator_ops as contrib_iterator_ops +from tensorflow.python.data.ops import iterator_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors +from tensorflow.python.framework import function +from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops @@ -40,7 +44,10 @@ from tensorflow.python.ops import script_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variable_scope +from tensorflow.python.ops import variables +from tensorflow.python.platform import gfile from tensorflow.python.platform import test +from tensorflow.python.training import saver as saver_lib from tensorflow.python.util import compat @@ -668,6 +675,515 @@ class MapDatasetTest(test.TestCase): with self.assertRaises(errors.OutOfRangeError): sess.run(get_next) + def testCaptureResourceInMapFn(self): + + def _build_ds(iterator): + + def _map_fn(x): + get_next = iterator.get_next() + return x * get_next + + return dataset_ops.Dataset.range(10).map(_map_fn) + + def _build_graph(): + captured_iterator = dataset_ops.Dataset.range( + 10).make_initializable_iterator() + ds = _build_ds(captured_iterator) + iterator = ds.make_initializable_iterator() + init_op = iterator.initializer + return captured_iterator.initializer, init_op + + with ops.Graph().as_default() as g: + captured_init_op, init_op = _build_graph() + with self.test_session(graph=g) as sess: + sess.run(captured_init_op) + with self.assertRaises(errors.UnimplementedError): + # CapturedFunction does not support capturing IteratorResource. + sess.run(init_op) + + +class MapDatasetSerializationTest(test.TestCase): + + def setUp(self): + self._tensor_slice_len = 7 + self._num_epochs = 14 + self._num_outputs = self._tensor_slice_len * self._num_epochs + + def tearDown(self): + # Remove all checkpoint files. + prefix = self._ckpt_path() + pattern = prefix + "*" + files = gfile.Glob(pattern) + map(gfile.Remove, files) + + def _build_ds(self, multiplier=37.0): + components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) * + np.arange(self._tensor_slice_len)[:, np.newaxis], + np.array(multiplier) * np.arange(self._tensor_slice_len)) + + def _map_fn(x, y, z): + return math_ops.square(x), math_ops.square(y), math_ops.square(z) + + return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn) + .repeat(self._num_epochs)) + + def _build_graph(self, multiplier=37.0, build_saveable=True): + ds = self._build_ds(multiplier) + iterator = ds.make_initializable_iterator() + + if build_saveable: + saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + init_op = iterator.initializer + get_next = iterator.get_next() + self._add_iterator_ops_to_collection(init_op, get_next) + saver = saver_lib.Saver(allow_empty=True) + return init_op, get_next, saver + + def _build_empty_graph(self, output_types, output_shapes): + iterator = iterator_ops.Iterator.from_structure(output_types, output_shapes) + saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + saver = saver_lib.Saver() + get_next = iterator.get_next() + return get_next, saver + + def _add_iterator_ops_to_collection(self, init_op, get_next): + ops.add_to_collection("iterator_ops", init_op) + ops.add_to_collection("iterator_ops", get_next[0]) + ops.add_to_collection("iterator_ops", get_next[1]) + ops.add_to_collection("iterator_ops", get_next[2]) + + def _get_iterator_ops_from_collection(self): + init_op, get_next_1, get_next_2, get_next_3 = ops.get_collection( + "iterator_ops") + return init_op, (get_next_1, get_next_2, get_next_3) + + def _ckpt_path(self): + return os.path.join(self.get_temp_dir(), "iterator") + + def _latest_ckpt(self): + return saver_lib.latest_checkpoint(self.get_temp_dir()) + + def _save(self, sess, saver): + saver.save(sess, self._ckpt_path()) + + def _restore(self, saver, sess): + saver.restore(sess, self._latest_ckpt()) + + def _import_meta_graph(self): + meta_file_path = self._ckpt_path() + ".meta" + return saver_lib.import_meta_graph(meta_file_path) + + def _testReadWithBreaks(self, break_points, init_before_restore=False): + expected = [] + actual = [] + # Generate the ground truth. + with ops.Graph().as_default() as g: + init_op, get_next_op, _ = self._build_graph() + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(self._num_outputs): + expected.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + # Run and checkpoint after first break_point. + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph() + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(break_points[0]): + actual.append(sess.run(get_next_op)) + self._save(sess, saver) + + # Load from checkpoint and continue running while stopping at each + # subsequent checkpoint. + for i in range(len(break_points)): + with ops.Graph().as_default() as g: + saver = self._import_meta_graph() + init_op, get_next_op = self._get_iterator_ops_from_collection() + with self.test_session(graph=g) as sess: + if init_before_restore: + sess.run(init_op) + self._restore(saver, sess) + start = break_points[i] + end = break_points[ + i + 1] if i < len(break_points) - 1 else self._num_outputs + for _ in range(end - start): + actual.append(sess.run(get_next_op)) + self._save(sess, saver) + if end == self._num_outputs: + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + self._match(expected, actual) + + def _match(self, expected, actual): + self.assertEqual(len(expected), len(actual)) + for expected_tuple, actual_tuple in zip(expected, actual): + self.assertEqual(expected_tuple[0], actual_tuple[0]) + self.assertSequenceEqual(expected_tuple[1].tolist(), + actual_tuple[1].tolist()) + self.assertEqual(expected_tuple[2], actual_tuple[2]) + + def _does_not_match(self, expected, actual): + with self.assertRaises(AssertionError): + self._match(expected, actual) + + def testSaveRestore(self): + self._testReadWithBreaks([4]) + self._testReadWithBreaks([13]) + self._testReadWithBreaks([18]) + self._testReadWithBreaks([23]) + + def testSaveUnusedIterator(self): + self._testReadWithBreaks([0]) + + def testSaveFullyUsedIterator(self): + self._testReadWithBreaks([self._num_outputs]) + + def testMultipleBreaks(self): + self._testReadWithBreaks([0, 5, 9, 15, 25, 32]) + + def testIdempotence(self): + # Attempt to save iterator immediately after restoring. + self._testReadWithBreaks([1, 1, 5, 5, 5, 25, 32]) + + def testInitThenRestore(self): + self._testReadWithBreaks([0, 5, 9, 15, 25, 32], init_before_restore=True) + + def testRestoreExhaustedIterator(self): + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph() + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(self._num_outputs): + sess.run(get_next_op) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + self._save(sess, saver) + + with ops.Graph().as_default() as g: + saver = self._import_meta_graph() + init_op, get_next_op = self._get_iterator_ops_from_collection() + with self.test_session(graph=g) as sess: + self._restore(saver, sess) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + def testResetRestoredIterator(self): + expected = [] + # Collect ground truth containing all outputs. + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph() + break_point = self._num_outputs // 2 + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(break_point): + expected.append(sess.run(get_next_op)) + self._save(sess, saver) + for _ in range(self._num_outputs - break_point): + expected.append(sess.run(get_next_op)) + + actual = [] + # Restore from checkpoint and then run init_op. + with ops.Graph().as_default() as g: + saver = self._import_meta_graph() + init_op, get_next_op = self._get_iterator_ops_from_collection() + with self.test_session(graph=g) as sess: + self._restore(saver, sess) + sess.run(init_op) + for _ in range(self._num_outputs): + actual.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + self._match(expected, actual) + + def testRestoreInModifiedGraph(self): + expected = [] + actual_without_restore = [] + actual = [] + break_point = 10 + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph(multiplier=15.0) + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(break_point): + expected.append(sess.run(get_next_op)) + actual.extend(expected) + self._save(sess, saver) + for _ in range(self._num_outputs - break_point): + expected.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + # Collect outputs by running modified graph. + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph(multiplier=30.0) + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(self._num_outputs): + actual_without_restore.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + # Restore the checkpoint in the modified graph. + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph(multiplier=30.0) + with self.test_session(graph=g) as sess: + self._restore(saver, sess) + for _ in range(self._num_outputs - break_point): + actual.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + # Ensure the modified graph gets overridden when restoring checkpoint. + self._does_not_match(expected, actual_without_restore) + # Expect that the outputs are what we would expect if we ran the old + # graph. + self._match(expected, actual) + + # TODO(srbs): Add this test to dataset_serialization_test_base.py. + def testRestoreInEmptyGraph(self): + expected = [] + actual = [] + break_point = 10 + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph(multiplier=15.0) + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(break_point): + sess.run(get_next_op) + self._save(sess, saver) + for _ in range(self._num_outputs - break_point): + expected.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + with ops.Graph().as_default() as g: + ds = self._build_ds() + output_types = ds.output_types + output_shapes = ds.output_shapes + + with ops.Graph().as_default() as g: + get_next_op, saver = self._build_empty_graph(output_types, output_shapes) + with self.test_session(graph=g) as sess: + self._restore(saver, sess) + for _ in range(self._num_outputs - break_point): + actual.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + # Expect that the outputs are what we would expect if we ran the old + # graph. + self._match(expected, actual) + + def testDoNotBuildSaveable(self): + break_point = 10 + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph(multiplier=15.0) + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(break_point): + sess.run(get_next_op) + self._save(sess, saver) + + expected = [] + # Collect ground truth by running modified graph. + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph(multiplier=30.0) + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(self._num_outputs): + expected.append(sess.run(get_next_op)) + + actual = [] + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = self._build_graph( + multiplier=30.0, build_saveable=False) + with self.test_session(graph=g) as sess: + # Since the SaveableObject was not added to Saver's list + # of saveables, iterator state is not restored by saver.restore(). + self._restore(saver, sess) + with self.assertRaises(errors.FailedPreconditionError): + sess.run(get_next_op) + sess.run(init_op) + for _ in range(self._num_outputs): + actual.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + self._match(expected, actual) + + def testSaveStatefulFunction(self): + + def _build_ds(): + + def _map_fn(x): + return random_ops.random_uniform( + (), 0, 10, dtype=dtypes.int32) * math_ops.to_int32(x) + + return dataset_ops.Dataset.range(100).map(_map_fn) + + def _build_graph(): + ds = _build_ds() + iterator = ds.make_initializable_iterator() + + saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + init_op = iterator.initializer + get_next = iterator.get_next() + saver = saver_lib.Saver(allow_empty=True) + return init_op, get_next, saver + + break_point = 10 + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = _build_graph() + with self.test_session(graph=g) as sess: + sess.run(init_op) + for _ in range(break_point): + sess.run(get_next_op) + with self.assertRaises(errors.InvalidArgumentError): + self._save(sess, saver) + + def testCaptureVariableInMapFn(self): + + def _build_ds(): + counter_var = variable_scope.get_variable( + "counter", (), dtypes.int32, use_resource=True) + return (dataset_ops.Dataset.from_tensors(0).repeat(10).map( + lambda _: counter_var.assign_add(1))) + + def _build_graph(): + ds = _build_ds() + iterator = ds.make_initializable_iterator() + + saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + init_op = iterator.initializer + get_next = iterator.get_next() + saver = saver_lib.Saver(allow_empty=True) + return init_op, get_next, saver + + break_point = 10 + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = _build_graph() + with self.test_session(graph=g) as sess: + sess.run(variables.global_variables_initializer()) + sess.run(init_op) + for _ in range(break_point): + sess.run(get_next_op) + with self.assertRaises(errors.InvalidArgumentError): + self._save(sess, saver) + + def testCaptureDefunInMapFn(self): + num_outputs = 100 + + def _build_ds(): + + @function.Defun(dtypes.int64) + def defun_fn(x): + return constant_op.constant(1000) + math_ops.to_int32(x) + + return dataset_ops.Dataset.range(num_outputs).map(defun_fn) + + def _build_graph(): + ds = _build_ds() + iterator = ds.make_initializable_iterator() + + saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + init_op = iterator.initializer + get_next = iterator.get_next() + saver = saver_lib.Saver(allow_empty=True) + return init_op, get_next, saver + + break_point = 10 + expected = [] + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = _build_graph() + with self.test_session(graph=g) as sess: + sess.run(variables.global_variables_initializer()) + sess.run(init_op) + for _ in range(break_point): + sess.run(get_next_op) + self._save(sess, saver) + for _ in range(num_outputs - break_point): + expected.append(sess.run(get_next_op)) + + with ops.Graph().as_default() as g: + ds = _build_ds() + output_types = ds.output_types + output_shapes = ds.output_shapes + + actual = [] + with ops.Graph().as_default() as g: + get_next_op, saver = self._build_empty_graph(output_types, output_shapes) + with self.test_session(graph=g) as sess: + self._restore(saver, sess) + for _ in range(num_outputs - break_point): + actual.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + self.assertSequenceEqual(expected, actual) + + def testBuildDefunInMapFn(self): + num_outputs = 100 + + def _build_ds(): + + @function.Defun(dtypes.int64) + def defun_fn(x): + + @function.Defun(dtypes.int32) + def defun_fn_deep(x): + return constant_op.constant(1000) + math_ops.to_int32(x) + + return constant_op.constant(11000) + defun_fn_deep(math_ops.to_int32(x)) + + return dataset_ops.Dataset.range(num_outputs).map(defun_fn) + + def _build_graph(): + ds = _build_ds() + iterator = ds.make_initializable_iterator() + + saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) + ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) + init_op = iterator.initializer + get_next = iterator.get_next() + saver = saver_lib.Saver(allow_empty=True) + return init_op, get_next, saver + + break_point = 10 + expected = [] + with ops.Graph().as_default() as g: + init_op, get_next_op, saver = _build_graph() + with self.test_session(graph=g) as sess: + sess.run(variables.global_variables_initializer()) + sess.run(init_op) + for _ in range(break_point): + sess.run(get_next_op) + self._save(sess, saver) + for _ in range(num_outputs - break_point): + expected.append(sess.run(get_next_op)) + + with ops.Graph().as_default() as g: + ds = _build_ds() + output_types = ds.output_types + output_shapes = ds.output_shapes + + actual = [] + with ops.Graph().as_default() as g: + get_next_op, saver = self._build_empty_graph(output_types, output_shapes) + with self.test_session(graph=g) as sess: + self._restore(saver, sess) + for _ in range(num_outputs - break_point): + actual.append(sess.run(get_next_op)) + with self.assertRaises(errors.OutOfRangeError): + sess.run(get_next_op) + + self.assertSequenceEqual(expected, actual) + if __name__ == "__main__": test.main() diff --git a/tensorflow/core/graph/graph_def_builder.h b/tensorflow/core/graph/graph_def_builder.h index 4d9fe1dee97..b389cd80531 100644 --- a/tensorflow/core/graph/graph_def_builder.h +++ b/tensorflow/core/graph/graph_def_builder.h @@ -165,6 +165,20 @@ class GraphDefBuilder { // by name), and makes sure the resulting graph is valid. Status ToGraph(Graph* graph) const; + // Adds the function and gradient definitions in `fdef_lib` to this graph's op + // registry. Ignores duplicate functions, and returns a bad status if an + // imported function differs from an existing function or op with the same + // name. + Status AddFunctionLibrary(const FunctionDefLibrary& fdef_lib) { + return graph_.AddFunctionLibrary(fdef_lib); + } + + // Returns whether a user-defined function with `name` already exists in the + // graph. + bool HasFunction(const string& name) { + return graph_.flib_def().Find(name) != nullptr; + } + private: Graph graph_; Status status_; diff --git a/tensorflow/core/kernels/batch_dataset_op.cc b/tensorflow/core/kernels/batch_dataset_op.cc index 6a5fd17a9e6..46412a554b3 100644 --- a/tensorflow/core/kernels/batch_dataset_op.cc +++ b/tensorflow/core/kernels/batch_dataset_op.cc @@ -80,10 +80,10 @@ class BatchDatasetOp : public UnaryDatasetOpKernel { } protected: - Status AsGraphDefInternal(DatasetGraphDefBuilder* b, + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; - TF_RETURN_IF_ERROR(b->AddParentDataset(input_, &input_graph_node)); + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node)); Node* batch_size = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(batch_size_, &batch_size)); TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/kernels/captured_function.h b/tensorflow/core/kernels/captured_function.h index 55d337d7075..9430127600a 100644 --- a/tensorflow/core/kernels/captured_function.h +++ b/tensorflow/core/kernels/captured_function.h @@ -71,6 +71,8 @@ class CapturedFunction { ResourceMgr* resource_manager() const { return device_->resource_manager(); } + const std::vector& captured_inputs() { return captured_inputs_; } + static int64 generate_step_id() { // Choose a step ID that is guaranteed not to clash with any // Session-generated step ID. DirectSession only generates diff --git a/tensorflow/core/kernels/concatenate_dataset_op.cc b/tensorflow/core/kernels/concatenate_dataset_op.cc index c3bd89c479f..ad78ba01869 100644 --- a/tensorflow/core/kernels/concatenate_dataset_op.cc +++ b/tensorflow/core/kernels/concatenate_dataset_op.cc @@ -79,13 +79,13 @@ class ConcatenateDatasetOp : public BinaryDatasetOpKernel { string DebugString() override { return "ConcatenateDatasetOp::Dataset"; } protected: - Status AsGraphDefInternal(DatasetGraphDefBuilder* b, + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph = nullptr; - TF_RETURN_IF_ERROR(b->AddParentDataset(input_, &input_graph)); + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph)); Node* to_concatenate_graph = nullptr; TF_RETURN_IF_ERROR( - b->AddParentDataset(to_concatenate_, &to_concatenate_graph)); + b->AddParentDataset(ctx, to_concatenate_, &to_concatenate_graph)); TF_RETURN_IF_ERROR( b->AddDataset(this, {input_graph, to_concatenate_graph}, output)); return Status::OK(); diff --git a/tensorflow/core/kernels/dataset.h b/tensorflow/core/kernels/dataset.h index a90590fc7e0..b9b0e5a7c6c 100644 --- a/tensorflow/core/kernels/dataset.h +++ b/tensorflow/core/kernels/dataset.h @@ -137,6 +137,23 @@ class GraphDefBuilderWrapper { const std::vector& inputs, const std::vector>& attrs, Node** output) { + std::vector> enumerated_inputs( + inputs.size()); + for (int i = 0; i < inputs.size(); i++) { + enumerated_inputs[i] = std::make_pair(i, inputs[i]); + } + return AddDataset(dataset, enumerated_inputs, {}, attrs, output); + } + + template + Status AddDataset( + const DatasetType* dataset, + const std::vector>& inputs, + const std::vector< + std::pair>>& + list_inputs, + const std::vector>& attrs, + Node** output) { const string& op_type_name = dataset->op_name(); std::unique_ptr opts( new GraphDefBuilder::Options(b_->opts())); @@ -161,8 +178,22 @@ class GraphDefBuilderWrapper { } NodeBuilder node_builder(opts->GetNameForOp(op_type_name), op_type_name, opts->op_registry()); - for (auto node_out : inputs) { - node_builder.Input(node_out); + { + size_t total_size = inputs.size() + list_inputs.size(); + auto inputs_iter = inputs.begin(); + auto list_inputs_iter = list_inputs.begin(); + for (int i = 0; i < total_size; i++) { + if (inputs_iter != inputs.end() && inputs_iter->first == i) { + node_builder.Input(inputs_iter->second); + inputs_iter++; + } else if (list_inputs_iter != list_inputs.end() && + list_inputs_iter->first == i) { + node_builder.Input(list_inputs_iter->second); + list_inputs_iter++; + } else { + return errors::InvalidArgument("No input found for index ", i); + } + } } *output = opts->FinalizeBuilder(&node_builder); if (*output == nullptr) { @@ -172,35 +203,56 @@ class GraphDefBuilderWrapper { return Status::OK(); } - // TODO(shivaniagrawal): Single method for AddDataset for - // NodeOut/ArrraySlice - template - Status AddDatasetWithInputAsList(const DatasetType* dataset, - gtl::ArraySlice input, - Node** output) { - const string& op_type_name = dataset->op_name(); - std::unique_ptr opts( - new GraphDefBuilder::Options(b_->opts())); - bool has_output_types_attr = HasAttr(op_type_name, "output_types"); - bool has_output_shapes_attr = HasAttr(op_type_name, "output_shapes"); - if (has_output_shapes_attr) { - opts.reset(new GraphDefBuilder::Options( - opts->WithAttr("output_shapes", dataset->output_shapes()))); + // Adds a user-defined function with name `function_name` to the graph and + // recursively adds all functions it references. If a function with a matching + // name has already been added, returns with OK status. If a user-defined with + // name `function_name` is not found in the FunctionLibraryDefinition, returns + // and InvalidArgumentError. If the function with name `function_name` or any + // of its dependent functions are stateful, returns an InvalidArgument error. + Status AddFunction(OpKernelContext* ctx, const string& function_name) { + if (b_->HasFunction(function_name)) { + LOG(INFO) << "Function with name " << function_name << "already exists in" + << " the graph. It will not be added again."; + return Status::OK(); } - if (has_output_types_attr) { - opts.reset(new GraphDefBuilder::Options( - opts->WithAttr("output_types", dataset->output_dtypes()))); + TF_RETURN_IF_ERROR(EnsureFunctionIsStateless(ctx, function_name)); + const FunctionLibraryDefinition* flib_def = + ctx->function_library()->GetFunctionLibraryDefinition(); + const FunctionDef* f_def = flib_def->Find(function_name); + if (f_def == nullptr) { + return errors::InvalidArgument("Unable to find FunctionDef for ", + function_name, " in the registry."); } - if (opts->HaveError()) { - return errors::Internal("AddDataset: Error building Options."); + FunctionDefLibrary def; + *def.add_function() = *f_def; + const string gradient_func = flib_def->FindGradient(function_name); + if (!gradient_func.empty()) { + GradientDef* g_def = def.add_gradient(); + g_def->set_function_name(function_name); + g_def->set_gradient_func(gradient_func); } - NodeBuilder node_builder(opts->GetNameForOp(op_type_name), op_type_name, - opts->op_registry()); - node_builder.Input(input); - *output = opts->FinalizeBuilder(&node_builder); - if (*output == nullptr) { - return errors::Internal("AddDataset: Failed to build ", op_type_name, - " op."); + TF_RETURN_IF_ERROR(b_->AddFunctionLibrary(def)); + + // Recursively add functions in inputs of function_name. + for (const NodeDef& node_def : f_def->node_def()) { + const OpRegistrationData* op_reg_data = nullptr; + TF_RETURN_IF_ERROR(flib_def->LookUp(node_def.op(), &op_reg_data)); + if (op_reg_data->is_function_op) { + TF_RETURN_IF_ERROR(AddFunction(ctx, op_reg_data->op_def.name())); + } + } + + // Recursively add functions in attrs of function_name. + for (auto iter = f_def->attr().begin(); iter != f_def->attr().end(); + iter++) { + const AttrValue& attr_value = iter->second; + if (attr_value.has_func()) { + TF_RETURN_IF_ERROR(AddFunction(ctx, attr_value.func().name())); + } else if (attr_value.has_list()) { + for (const NameAttrList& name_attr_list : attr_value.list().func()) { + TF_RETURN_IF_ERROR(AddFunction(ctx, name_attr_list.name())); + } + } } return Status::OK(); } @@ -217,6 +269,28 @@ class GraphDefBuilderWrapper { b_->opts().WithAttr("dtype", val.dtype()).WithAttr("value", val)); } + Status EnsureFunctionIsStateless(OpKernelContext* ctx, + const string& function_name) const { + const FunctionLibraryDefinition* lib_def = + ctx->function_library()->GetFunctionLibraryDefinition(); + const FunctionDef* function_def = lib_def->Find(function_name); + if (!function_def) { + return errors::InvalidArgument("Unable to find FunctionDef for ", + function_name, " in registry."); + } + for (const NodeDef& node_def : function_def->node_def()) { + const OpDef* op_def; + TF_RETURN_IF_ERROR(lib_def->LookUpOpDef(node_def.op(), &op_def)); + if (op_def->is_stateful()) { + return errors::InvalidArgument( + "Op[name: ", node_def.name(), ", type: ", node_def.op(), "] ", + "in function ", function_name, " is stateful. ", + "Saving stateful functions is not supported yet."); + } + } + return Status::OK(); + } + bool HasAttr(const string& op_type_name, const string& attr_name) { const OpDef* op_def = nullptr; Status s = b_->opts().op_registry()->LookUpOpDef(op_type_name, &op_def); @@ -306,7 +380,7 @@ class IteratorBase { virtual const std::vector& output_shapes() const = 0; // Saves the state of this iterator. - virtual Status Save(IteratorStateWriter* writer) { + virtual Status Save(OpKernelContext* ctx, IteratorStateWriter* writer) { return SaveInternal(writer); } @@ -377,7 +451,7 @@ class DatasetBase : public core::RefCounted { virtual string DebugString() = 0; // Serializes the dataset and writes it to the `writer`. - virtual Status Save(IteratorStateWriter* writer) const { + virtual Status Save(OpKernelContext* ctx, IteratorStateWriter* writer) const { return errors::Unimplemented("DatasetBase::Save"); } @@ -389,11 +463,18 @@ class DatasetBase : public core::RefCounted { class DatasetGraphDefBuilder : public GraphDefBuilderWrapper { public: DatasetGraphDefBuilder(GraphDefBuilder* b) : GraphDefBuilderWrapper(b) {} - Status AddParentDataset(const DatasetBase* dataset, Node** output) { - return dataset->AsGraphDefInternal(this, output); + Status AddParentDataset(OpKernelContext* ctx, const DatasetBase* dataset, + Node** output) { + return dataset->AsGraphDefInternal(ctx, this, output); } }; + virtual Status AsGraphDefInternal(OpKernelContext* ctx, + DatasetGraphDefBuilder* b, + Node** node) const { + return AsGraphDefInternal(b, node); + } + virtual Status AsGraphDefInternal(DatasetGraphDefBuilder* b, Node** node) const { return errors::Unimplemented("AsGraphDefInternal"); @@ -408,10 +489,11 @@ class GraphDatasetBase : public DatasetBase { const string op_name() const { return op_name_; } - Status Save(IteratorStateWriter* writer) const override { + Status Save(OpKernelContext* ctx, + IteratorStateWriter* writer) const override { string serialized_graph_def; string output_node; - TF_RETURN_IF_ERROR(Serialize(&serialized_graph_def, &output_node)); + TF_RETURN_IF_ERROR(Serialize(ctx, &serialized_graph_def, &output_node)); TF_RETURN_IF_ERROR( writer->WriteScalar(kDatasetGraphKey, serialized_graph_def)); TF_RETURN_IF_ERROR( @@ -427,11 +509,12 @@ class GraphDatasetBase : public DatasetBase { static const char kDatasetGraphOutputNodeKey[]; private: - Status Serialize(string* serialized_graph_def, string* output_node) const { + Status Serialize(OpKernelContext* ctx, string* serialized_graph_def, + string* output_node) const { GraphDefBuilder b; DatasetGraphDefBuilder db(&b); Node* node = nullptr; - TF_RETURN_IF_ERROR(AsGraphDefInternal(&db, &node)); + TF_RETURN_IF_ERROR(AsGraphDefInternal(ctx, &db, &node)); *output_node = node->name(); GraphDef graph_def; TF_RETURN_IF_ERROR(b.ToGraphDef(&graph_def)); @@ -480,9 +563,9 @@ class DatasetIterator : public IteratorBase { return GetNextInternal(ctx, out_tensors, end_of_sequence); } - Status Save(IteratorStateWriter* writer) final { - TF_RETURN_IF_ERROR(dataset()->Save(writer)); - return IteratorBase::Save(writer); + Status Save(OpKernelContext* ctx, IteratorStateWriter* writer) final { + TF_RETURN_IF_ERROR(dataset()->Save(ctx, writer)); + return IteratorBase::Save(ctx, writer); } protected: diff --git a/tensorflow/core/kernels/iterator_ops.cc b/tensorflow/core/kernels/iterator_ops.cc index ae77ae64338..b48da5b3263 100644 --- a/tensorflow/core/kernels/iterator_ops.cc +++ b/tensorflow/core/kernels/iterator_ops.cc @@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/core/kernels/dataset.h" - #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_runner.h" #include "tensorflow/core/framework/iterator.pb.h" @@ -22,6 +20,7 @@ limitations under the License. #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/variant_op_registry.h" #include "tensorflow/core/graph/graph_constructor.h" +#include "tensorflow/core/kernels/dataset.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/gtl/cleanup.h" @@ -79,10 +78,12 @@ Status VerifyShapesCompatible(const std::vector& expected, class IteratorResource : public ResourceBase { public: IteratorResource(const DataTypeVector& output_dtypes, - const std::vector& output_shapes) + const std::vector& output_shapes, + const int graph_def_version) : iterator_(nullptr), output_dtypes_(output_dtypes), - output_shapes_(output_shapes) {} + output_shapes_(output_shapes), + graph_def_version_(graph_def_version) {} Status GetNext(IteratorContext* ctx, std::vector* out_tensors, bool* end_of_sequence) { @@ -97,10 +98,10 @@ class IteratorResource : public ResourceBase { } } - Status Save(IteratorStateWriter* writer) { + Status Save(OpKernelContext* ctx, IteratorStateWriter* writer) { std::shared_ptr captured_iterator(iterator_); if (captured_iterator) { - return captured_iterator->Save(writer); + return captured_iterator->Save(ctx, writer); } else { return errors::FailedPrecondition( "Save() failed because the iterator has not been initialized. " @@ -125,8 +126,21 @@ class IteratorResource : public ResourceBase { TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr)); std::vector outputs; GraphRunner graph_runner(ctx->env()); - TF_RETURN_IF_ERROR(graph_runner.Run(&graph, ctx->function_library(), {}, - {output_node}, &outputs)); + + // Build a new FLR that knows about the functions in the graph. + std::unique_ptr flib_def( + new FunctionLibraryDefinition( + *ctx->function_library()->GetFunctionLibraryDefinition())); + TF_RETURN_IF_ERROR(flib_def->AddLibrary(graph_def.library())); + std::unique_ptr pflr( + new ProcessFunctionLibraryRuntime(nullptr, ctx->env(), + graph_def_version_, flib_def.get(), + {}, nullptr)); + FunctionLibraryRuntime* lib = + pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); + + TF_RETURN_IF_ERROR( + graph_runner.Run(&graph, lib, {}, {output_node}, &outputs)); TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(outputs[0], &dataset)); TF_RETURN_IF_ERROR(set_iterator(dataset->MakeIterator("Iterator"))); @@ -166,6 +180,7 @@ class IteratorResource : public ResourceBase { std::shared_ptr iterator_; const DataTypeVector output_dtypes_; const std::vector output_shapes_; + const int graph_def_version_; }; // Helper class for reading data from a VariantTensorData object. @@ -319,11 +334,12 @@ class IteratorStateVariant { } // Initializes this object with the current state of the iterator so // that it can be written on the next call to Encode(). - Status InitializeFromIterator(IteratorResource* iterator_resource) { + Status InitializeFromIterator(OpKernelContext* ctx, + IteratorResource* iterator_resource) { data_.reset(new VariantTensorData()); data_->set_type_name(TypeName()); VariantTensorDataWriter writer(data_.get()); - TF_RETURN_IF_ERROR(iterator_resource->Save(&writer)); + TF_RETURN_IF_ERROR(iterator_resource->Save(ctx, &writer)); TF_RETURN_IF_ERROR(writer.Flush()); return Status::OK(); } @@ -375,7 +391,8 @@ REGISTER_UNARY_VARIANT_DECODE_FUNCTION(IteratorStateVariant, class IteratorHandleOp : public ResourceOpKernel { public: explicit IteratorHandleOp(OpKernelConstruction* ctx) - : ResourceOpKernel(ctx) { + : ResourceOpKernel(ctx), + graph_def_version_(ctx->graph_def_version()) { OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_dtypes_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_)); } @@ -383,7 +400,8 @@ class IteratorHandleOp : public ResourceOpKernel { private: Status CreateResource(IteratorResource** ret) override EXCLUSIVE_LOCKS_REQUIRED(mu_) { - *ret = new IteratorResource(output_dtypes_, output_shapes_); + *ret = new IteratorResource(output_dtypes_, output_shapes_, + graph_def_version_); return Status::OK(); } @@ -398,6 +416,7 @@ class IteratorHandleOp : public ResourceOpKernel { private: DataTypeVector output_dtypes_; std::vector output_shapes_; + const int graph_def_version_; }; class MakeIteratorOp : public OpKernel { @@ -460,7 +479,8 @@ class OneShotIteratorOp : public AsyncOpKernel { ctx->env(), ThreadOptions(), strings::StrCat("one_shot_iterator_initialization_thread_", SanitizeThreadSuffix(name())), - 1 /* num_threads */, false /* low_latency_hint */)) + 1 /* num_threads */, false /* low_latency_hint */)), + graph_def_version_(ctx->graph_def_version()) { string shared_name; @@ -544,7 +564,8 @@ class OneShotIteratorOp : public AsyncOpKernel { ctx->resource_manager()->LookupOrCreate( cinfo->container(), cinfo->name(), iterator, [this](IteratorResource** ret) EXCLUSIVE_LOCKS_REQUIRED(mu_) { - *ret = new IteratorResource(output_dtypes_, output_shapes_); + *ret = new IteratorResource(output_dtypes_, output_shapes_, + graph_def_version_); return Status::OK(); })); @@ -634,6 +655,7 @@ class OneShotIteratorOp : public AsyncOpKernel { Status initialization_status_ GUARDED_BY(mu_); std::vector> done_callbacks_ GUARDED_BY(mu_); + const int graph_def_version_; }; class IteratorGetNextOp : public AsyncOpKernel { @@ -787,7 +809,7 @@ class SerializeIteratorOp : public OpKernel { Tensor* variant_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &variant_t)); IteratorStateVariant v; - OP_REQUIRES_OK(ctx, v.InitializeFromIterator(iterator_resource)); + OP_REQUIRES_OK(ctx, v.InitializeFromIterator(ctx, iterator_resource)); variant_t->scalar()() = v; } }; diff --git a/tensorflow/core/kernels/map_dataset_op.cc b/tensorflow/core/kernels/map_dataset_op.cc index ac458701fe2..4ba09bc335e 100644 --- a/tensorflow/core/kernels/map_dataset_op.cc +++ b/tensorflow/core/kernels/map_dataset_op.cc @@ -53,18 +53,21 @@ class MapDatasetOp : public UnaryDatasetOpKernel { std::move(other_arguments), &captured_func)); - *output = new Dataset(input, std::move(captured_func), output_types_, - output_shapes_); + *output = new Dataset(ctx, input, func_, std::move(captured_func), + output_types_, output_shapes_); } private: - class Dataset : public DatasetBase { + class Dataset : public GraphDatasetBase { public: - Dataset(const DatasetBase* input, + Dataset(OpKernelContext* ctx, const DatasetBase* input, + const NameAttrList& func, std::unique_ptr captured_func, const DataTypeVector& output_types, const std::vector& output_shapes) - : input_(input), + : GraphDatasetBase(ctx), + input_(input), + func_(func), captured_func_(std::move(captured_func)), output_types_(output_types), output_shapes_(output_shapes) { @@ -88,6 +91,37 @@ class MapDatasetOp : public UnaryDatasetOpKernel { string DebugString() override { return "MapDatasetOp::Dataset"; } + protected: + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, + Node** output) const override { + TF_RETURN_IF_ERROR(b->AddFunction(ctx, func_.name())); + Node* input_graph_node = nullptr; + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node)); + + DataTypeVector other_arguments_types( + captured_func_->captured_inputs().size()); + std::vector other_arguments( + captured_func_->captured_inputs().size()); + for (const Tensor& t : captured_func_->captured_inputs()) { + Node* node; + TF_RETURN_IF_ERROR(b->AddTensor(t, &node)); + other_arguments.emplace_back(node); + other_arguments_types.emplace_back(t.dtype()); + } + AttrValue f; + b->BuildAttrValue(func_, &f); + AttrValue other_arguments_types_attr; + b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr); + + TF_RETURN_IF_ERROR(b->AddDataset( + this, {std::make_pair(0, input_graph_node)}, // Single tensor inputs. + {std::make_pair(1, other_arguments)}, // Tensor list inputs. + {std::make_pair("f", f), + std::make_pair("Targuments", other_arguments_types_attr)}, // Attrs + output)); + return Status::OK(); + } + private: class Iterator : public DatasetIterator { public: @@ -133,11 +167,24 @@ class MapDatasetOp : public UnaryDatasetOpKernel { } } + protected: + Status SaveInternal(IteratorStateWriter* writer) override { + TF_RETURN_IF_ERROR(SaveParent(writer, input_impl_)); + return Status::OK(); + } + + Status RestoreInternal(OpKernelContext* ctx, + IteratorStateReader* reader) override { + TF_RETURN_IF_ERROR(RestoreParent(ctx, reader, input_impl_)); + return Status::OK(); + } + private: const std::unique_ptr input_impl_; }; const DatasetBase* const input_; + const NameAttrList func_; const std::unique_ptr captured_func_; const DataTypeVector output_types_; const std::vector output_shapes_; diff --git a/tensorflow/core/kernels/repeat_dataset_op.cc b/tensorflow/core/kernels/repeat_dataset_op.cc index 0167b9ea64b..3d977a0fa38 100644 --- a/tensorflow/core/kernels/repeat_dataset_op.cc +++ b/tensorflow/core/kernels/repeat_dataset_op.cc @@ -73,10 +73,10 @@ class RepeatDatasetOp : public UnaryDatasetOpKernel { string DebugString() override { return "RepeatDatasetOp::Dataset"; } protected: - Status AsGraphDefInternal(DatasetGraphDefBuilder* b, + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; - TF_RETURN_IF_ERROR(b->AddParentDataset(input_, &input_graph_node)); + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node)); Node* count = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(count_, &count)); TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/kernels/shuffle_dataset_op.cc b/tensorflow/core/kernels/shuffle_dataset_op.cc index dd0ab57e9dc..72facb3a0d0 100644 --- a/tensorflow/core/kernels/shuffle_dataset_op.cc +++ b/tensorflow/core/kernels/shuffle_dataset_op.cc @@ -308,10 +308,10 @@ class ShuffleDatasetOp : public UnaryDatasetOpKernel { } protected: - Status AsGraphDefInternal(DatasetGraphDefBuilder* b, + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; - TF_RETURN_IF_ERROR(b->AddParentDataset(input_, &input_graph_node)); + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node)); Node* buffer_size = nullptr; Node* seed = nullptr; Node* seed2 = nullptr; diff --git a/tensorflow/core/kernels/skip_dataset_op.cc b/tensorflow/core/kernels/skip_dataset_op.cc index 7ee945dd4c4..1fe49271e29 100644 --- a/tensorflow/core/kernels/skip_dataset_op.cc +++ b/tensorflow/core/kernels/skip_dataset_op.cc @@ -72,10 +72,10 @@ class SkipDatasetOp : public UnaryDatasetOpKernel { string DebugString() override { return "SkipDatasetOp::Dataset"; } protected: - Status AsGraphDefInternal(DatasetGraphDefBuilder* b, + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; - TF_RETURN_IF_ERROR(b->AddParentDataset(input_, &input_graph_node)); + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node)); Node* count = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(count_, &count)); TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/kernels/take_dataset_op.cc b/tensorflow/core/kernels/take_dataset_op.cc index fb294a96b15..7a6d20d6c7c 100644 --- a/tensorflow/core/kernels/take_dataset_op.cc +++ b/tensorflow/core/kernels/take_dataset_op.cc @@ -73,10 +73,10 @@ class TakeDatasetOp : public UnaryDatasetOpKernel { string DebugString() override { return "TakeDatasetOp::Dataset"; } protected: - Status AsGraphDefInternal(DatasetGraphDefBuilder* b, + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { Node* input_graph_node = nullptr; - TF_RETURN_IF_ERROR(b->AddParentDataset(input_, &input_graph_node)); + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node)); Node* count = nullptr; TF_RETURN_IF_ERROR(b->AddScalar(count_, &count)); TF_RETURN_IF_ERROR( diff --git a/tensorflow/core/kernels/tensor_dataset_op.cc b/tensorflow/core/kernels/tensor_dataset_op.cc index db7c9473287..1f690820316 100644 --- a/tensorflow/core/kernels/tensor_dataset_op.cc +++ b/tensorflow/core/kernels/tensor_dataset_op.cc @@ -78,7 +78,7 @@ class TensorDatasetOp : public DatasetOpKernel { components.emplace_back(node); } TF_RETURN_IF_ERROR( - b->AddDatasetWithInputAsList(this, components, output)); + b->AddDataset(this, {}, {std::make_pair(0, components)}, {}, output)); return Status::OK(); } diff --git a/tensorflow/core/kernels/tensor_slice_dataset_op.cc b/tensorflow/core/kernels/tensor_slice_dataset_op.cc index fd36bf524ce..4d0cbdd67c3 100644 --- a/tensorflow/core/kernels/tensor_slice_dataset_op.cc +++ b/tensorflow/core/kernels/tensor_slice_dataset_op.cc @@ -94,7 +94,7 @@ class TensorSliceDatasetOp : public DatasetOpKernel { components.emplace_back(node); } TF_RETURN_IF_ERROR( - b->AddDatasetWithInputAsList(this, components, output)); + b->AddDataset(this, {}, {std::make_pair(0, components)}, {}, output)); return Status::OK(); } diff --git a/tensorflow/core/kernels/zip_dataset_op.cc b/tensorflow/core/kernels/zip_dataset_op.cc index f466c8b268d..96080863ea1 100644 --- a/tensorflow/core/kernels/zip_dataset_op.cc +++ b/tensorflow/core/kernels/zip_dataset_op.cc @@ -78,17 +78,17 @@ class ZipDatasetOp : public DatasetOpKernel { string DebugString() override { return "ZipDatasetOp::Dataset"; } protected: - Status AsGraphDefInternal(DatasetGraphDefBuilder* b, + Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b, Node** output) const override { std::vector input_graph_nodes; input_graph_nodes.reserve(inputs_.size()); for (const auto& input : inputs_) { Node* input_node; - TF_RETURN_IF_ERROR(b->AddParentDataset(input, &input_node)); + TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input, &input_node)); input_graph_nodes.emplace_back(input_node); } - TF_RETURN_IF_ERROR( - b->AddDatasetWithInputAsList(this, input_graph_nodes, output)); + TF_RETURN_IF_ERROR(b->AddDataset( + this, {}, {std::make_pair(0, input_graph_nodes)}, {}, output)); return Status::OK(); } From 2011c2011ae30c3a40801f0543969fa8f373156a Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 16 Nov 2017 05:56:59 -0800 Subject: [PATCH 096/104] Implements specifying default approximations for layer_collection. Currently, the default approximation to use for each layer is hard-coded as a default argument to each registration function. This CL instead specifies these default values as properties which the user can modify. Additionally, the user can identify groups of linked parameters that should always use a specified approximation when registered. This should make it easier for users to experiment with different approximations. PiperOrigin-RevId: 175955141 --- .../contrib/kfac/python/kernel_tests/BUILD | 2 + .../kernel_tests/layer_collection_test.py | 76 ++++++- .../kfac/python/ops/layer_collection.py | 199 +++++++++++++++--- 3 files changed, 244 insertions(+), 33 deletions(-) diff --git a/tensorflow/contrib/kfac/python/kernel_tests/BUILD b/tensorflow/contrib/kfac/python/kernel_tests/BUILD index 60c245166d6..7d65ac9a43d 100644 --- a/tensorflow/contrib/kfac/python/kernel_tests/BUILD +++ b/tensorflow/contrib/kfac/python/kernel_tests/BUILD @@ -68,6 +68,7 @@ py_test( srcs = ["layer_collection_test.py"], srcs_version = "PY2AND3", deps = [ + "//tensorflow/contrib/kfac/python/ops:fisher_blocks", "//tensorflow/contrib/kfac/python/ops:fisher_factors", "//tensorflow/contrib/kfac/python/ops:layer_collection", "//tensorflow/python:array_ops", @@ -75,6 +76,7 @@ py_test( "//tensorflow/python:dtypes", "//tensorflow/python:framework_ops", "//tensorflow/python:linalg_ops", + "//tensorflow/python:math_ops", "//tensorflow/python:random_ops", "//tensorflow/python:random_seed", "//tensorflow/python:variable_scope", diff --git a/tensorflow/contrib/kfac/python/kernel_tests/layer_collection_test.py b/tensorflow/contrib/kfac/python/kernel_tests/layer_collection_test.py index 524e8338fde..c5ad90d1dc7 100644 --- a/tensorflow/contrib/kfac/python/kernel_tests/layer_collection_test.py +++ b/tensorflow/contrib/kfac/python/kernel_tests/layer_collection_test.py @@ -18,6 +18,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from tensorflow.contrib.kfac.python.ops import fisher_blocks from tensorflow.contrib.kfac.python.ops import fisher_factors from tensorflow.contrib.kfac.python.ops import layer_collection from tensorflow.python.framework import dtypes @@ -25,6 +26,7 @@ from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import linalg_ops +from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test @@ -105,8 +107,10 @@ class LayerCollectionTest(test.TestCase): array_ops.constant(4), [1, 1, 1, 1], 'SAME', array_ops.ones((1, 1, 1, 1)), array_ops.constant(3)) lc.register_conv2d( - array_ops.constant(4), [1, 1, 1, 1], 'SAME', - array_ops.ones((1, 1, 1, 1)), array_ops.constant(3), + array_ops.constant(4), [1, 1, 1, 1], + 'SAME', + array_ops.ones((1, 1, 1, 1)), + array_ops.constant(3), approx=layer_collection.APPROX_DIAGONAL_NAME) lc.register_generic( array_ops.constant(5), 16, approx=layer_collection.APPROX_FULL_NAME) @@ -122,8 +126,8 @@ class LayerCollectionTest(test.TestCase): random_seed.set_random_seed(200) lc = layer_collection.LayerCollection() key = array_ops.constant(1) - lc.register_fully_connected(key, - array_ops.constant(2), array_ops.constant(3)) + lc.register_fully_connected(key, array_ops.constant(2), + array_ops.constant(3)) with self.assertRaises(ValueError): lc.register_generic(key, 16) @@ -191,8 +195,8 @@ class LayerCollectionTest(test.TestCase): lc.register_block((x, y), MockFisherBlock('foo')) self.assertEqual( - set([MockFisherBlock('2'), MockFisherBlock('foo')]), - set(lc.get_blocks())) + set([MockFisherBlock('2'), MockFisherBlock('foo')]), set( + lc.get_blocks())) def testRegisterTupleVarSomeRegisteredInOtherTuples(self): x = variable_scope.get_variable('x', initializer=array_ops.constant(1,)) @@ -464,6 +468,66 @@ class LayerCollectionTest(test.TestCase): use_count_map = lc.get_use_count_map() self.assertDictEqual({'a': 4, 'b': 2, 'c': 4}, use_count_map) + def testIdentifyLinkedParametersSomeRegisteredInOtherTuples(self): + x = variable_scope.get_variable('x', shape=()) + y = variable_scope.get_variable('y', shape=()) + z = variable_scope.get_variable('z', shape=()) + lc = layer_collection.LayerCollection() + lc.define_linked_parameters((x, y)) + + with self.assertRaises(ValueError): + lc.define_linked_parameters((x, z)) + + def testIdentifySubsetPreviouslyRegisteredTensor(self): + x = variable_scope.get_variable('x', shape=()) + y = variable_scope.get_variable('y', shape=()) + lc = layer_collection.LayerCollection() + lc.define_linked_parameters((x, y)) + + with self.assertRaises(ValueError): + lc.define_linked_parameters(x) + + def testSpecifyApproximation(self): + w_0 = variable_scope.get_variable('w_0', [10, 10]) + w_1 = variable_scope.get_variable('w_1', [10, 10]) + + b_0 = variable_scope.get_variable('b_0', [10]) + b_1 = variable_scope.get_variable('b_1', [10]) + + x_0 = array_ops.placeholder(dtypes.float32, shape=(32, 10)) + x_1 = array_ops.placeholder(dtypes.float32, shape=(32, 10)) + + pre_bias_0 = math_ops.matmul(x_0, w_0) + pre_bias_1 = math_ops.matmul(x_1, w_1) + + # Build the fully connected layers in the graph. + pre_bias_0 + b_0 # pylint: disable=pointless-statement + pre_bias_1 + b_1 # pylint: disable=pointless-statement + + lc = layer_collection.LayerCollection() + lc.define_linked_parameters( + w_0, approximation=layer_collection.APPROX_DIAGONAL_NAME) + lc.define_linked_parameters( + w_1, approximation=layer_collection.APPROX_DIAGONAL_NAME) + lc.define_linked_parameters( + b_0, approximation=layer_collection.APPROX_FULL_NAME) + lc.define_linked_parameters( + b_1, approximation=layer_collection.APPROX_FULL_NAME) + + lc.register_fully_connected(w_0, x_0, pre_bias_0) + lc.register_fully_connected( + w_1, x_1, pre_bias_1, approx=layer_collection.APPROX_KRONECKER_NAME) + self.assertIsInstance(lc.fisher_blocks[w_0], + fisher_blocks.FullyConnectedDiagonalFB) + self.assertIsInstance(lc.fisher_blocks[w_1], + fisher_blocks.FullyConnectedKFACBasicFB) + + lc.register_generic(b_0, batch_size=1) + lc.register_generic( + b_1, batch_size=1, approx=layer_collection.APPROX_DIAGONAL_NAME) + self.assertIsInstance(lc.fisher_blocks[b_0], fisher_blocks.FullFB) + self.assertIsInstance(lc.fisher_blocks[b_1], fisher_blocks.NaiveDiagonalFB) + if __name__ == '__main__': test.main() diff --git a/tensorflow/contrib/kfac/python/ops/layer_collection.py b/tensorflow/contrib/kfac/python/ops/layer_collection.py index 7300a7998c2..2139a261e05 100644 --- a/tensorflow/contrib/kfac/python/ops/layer_collection.py +++ b/tensorflow/contrib/kfac/python/ops/layer_collection.py @@ -38,12 +38,26 @@ from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest - # Names for various approximations that can be requested for Fisher blocks. APPROX_KRONECKER_NAME = "kron" APPROX_DIAGONAL_NAME = "diagonal" APPROX_FULL_NAME = "full" +_GENERIC_APPROX_TO_BLOCK_TYPES = { + APPROX_FULL_NAME: fb.FullFB, + APPROX_DIAGONAL_NAME: fb.NaiveDiagonalFB, +} + +_FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES = { + APPROX_KRONECKER_NAME: fb.FullyConnectedKFACBasicFB, + APPROX_DIAGONAL_NAME: fb.FullyConnectedDiagonalFB, +} + +_CONV2D_APPROX_TO_BLOCK_TYPES = { + APPROX_KRONECKER_NAME: fb.ConvKFCBasicFB, + APPROX_DIAGONAL_NAME: fb.ConvDiagonalFB, +} + # Possible value for 'reuse' keyword argument. Sets 'reuse' to # tf.get_variable_scope().reuse. VARIABLE_SCOPE = "VARIABLE_SCOPE" @@ -51,6 +65,14 @@ VARIABLE_SCOPE = "VARIABLE_SCOPE" # TODO(jamesmartens): need to add find_canonical_output back into this somewhere +def ensure_sequence(obj): + """If `obj` isn't a tuple or list, return a tuple containing `obj`.""" + if isinstance(obj, (tuple, list)): + return obj + else: + return (obj,) + + class LayerParametersDict(OrderedDict): """An OrderedDict where keys are Tensors or tuples of Tensors. @@ -110,9 +132,14 @@ class LayerCollection(object): def __init__(self, graph=None, name="LayerCollection"): self.fisher_blocks = LayerParametersDict() self.fisher_factors = OrderedDict() + self._linked_parameters = dict( + ) # dict mapping sets of variables to optionally specified approximations. self._graph = graph or ops.get_default_graph() self._loss_dict = {} # {str: LossFunction} self._subgraph = None + self._default_generic_approximation = APPROX_FULL_NAME + self._default_fully_connected_approximation = APPROX_KRONECKER_NAME + self._default_convolution_2d_approximation = APPROX_KRONECKER_NAME with variable_scope.variable_scope(None, default_name=name) as scope: self._var_scope = scope.name @@ -122,6 +149,70 @@ class LayerCollection(object): """LossFunctions registered with this LayerCollection.""" return list(self._loss_dict.values()) + def is_variable_registered(self, variable): + """Checks whether the variable has already been registered. + + Args: + variable: A single variable or tensor. + Returns: + True if the variable has been registered either by itself or as part of a + tuple. + """ + return any([ + variable in key if isinstance(key, (tuple, list)) else variable == key + for key in self.fisher_blocks.keys() + ]) + + @property + def linked_parameters(self): + """Groups of parameters with an optionally specified approximation. + + Linked parameters can be added using `define_linked_parameters`. + If an approximation is specified, then this approximation will be used + when registering a layer with exactly these parameters, unless an + approximation is specified when calling the registration function. + + Returns: + A `dict` mapping tuples of parameters to an optional string. + """ + return self._linked_parameters + + @property + def default_generic_approximation(self): + return self._default_generic_approximation + + @default_generic_approximation.setter + def default_generic_approximation(self, value): + if value not in _GENERIC_APPROX_TO_BLOCK_TYPES: + raise ValueError( + "{} is not a valid approximation for generic variables.".format( + value)) + self._default_generic_approximation = value + + @property + def default_fully_connected_approximation(self): + return self._default_fully_connected_approximation + + @default_fully_connected_approximation.setter + def default_fully_connected_approximation(self, value): + if value not in _FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES: + raise ValueError( + "{} is not a valid approximation for fully connected layers.".format( + value)) + self._default_fully_connected_approximation = value + + @property + def default_conv2d_approximation(self): + return self._default_convolution_2d_approximation + + @default_conv2d_approximation.setter + def default_conv2d_approximation(self, value): + if value not in _CONV2D_APPROX_TO_BLOCK_TYPES: + raise ValueError( + "{} is not a valid approximation for 2d convolutional layers.".format( + value)) + self._default_convolution_2d_approximation = value + def register_block(self, layer_key, fisher_block, reuse=VARIABLE_SCOPE): """Validates and registers the layer_key associated with the fisher_block. @@ -187,7 +278,8 @@ class LayerCollection(object): # Find all keys that are either supersets or subsets of 'layer_key'. inclusions = { fisher_elt - for layer_elt in layer_key for fisher_elt in self.fisher_blocks + for layer_elt in layer_key + for fisher_elt in self.fisher_blocks if self._equal_or_subset(layer_elt, fisher_elt) } @@ -294,6 +386,49 @@ class LayerCollection(object): def subgraph(self): return self._subgraph + def define_linked_parameters(self, params, approximation=None): + """Identify a set of parameters that should be grouped together. + + During automatic graph scanning, any matches containing variables that have + been identified as part of a linked group will be filtered out unless + the match parameters are exactly equal to the ones specified in the linked + group. + + Args: + params: A variable, or a tuple or list of variables. The variables + to be linked. + approximation: Optional string specifying the type of approximation to use + for these variables. If unspecified, this layer collection's default + approximation for the layer type will be used. + + Raises: + ValueError: If the parameters were already registered in a layer or + identified as part of an incompatible group. + """ + params = frozenset(ensure_sequence(params)) + + # Check if any of the variables in 'params' is already in + # 'self.fisher_blocks.keys()'. + for registered_params, fisher_block in self.fisher_blocks.items(): + registered_params_set = set(ensure_sequence(registered_params)) + for variable in params: + if (variable in registered_params_set and + params != registered_params_set): + raise ValueError( + "Can't link parameters {}, variable {} was already registered in " + "group {} with layer {}".format(params, variable, + registered_params, fisher_block)) + + # Check if any of the variables in 'params' is already in + # 'self.linked_parameters'. + for variable in params: + for other_linked_params in self.linked_parameters: + if variable in other_linked_params: + raise ValueError("Can't link parameters {}, variable {} was already " + "linked in group {}.".format(params, variable, + other_linked_params)) + self._linked_parameters[params] = approximation + def create_subgraph(self): if not self.losses: raise ValueError("Must have at least one registered loss.") @@ -307,11 +442,19 @@ class LayerCollection(object): return math_ops.add_n( tuple(loss.evaluate_on_sample() for loss in self.losses)) + def _get_linked_approx(self, params): + """If params were linked, return their specified approximation.""" + params_set = frozenset(ensure_sequence(params)) + if params_set in self.linked_parameters: + return self.linked_parameters[params_set] + else: + return None + def register_fully_connected(self, params, inputs, outputs, - approx=APPROX_KRONECKER_NAME, + approx=None, reuse=VARIABLE_SCOPE): """Registers a fully connnected layer. @@ -332,15 +475,15 @@ class LayerCollection(object): KeyError: If reuse == True but no FisherBlock found for 'params'. ValueError: If reuse == True and FisherBlock found but of the wrong type. """ - approx_to_block_types = { - APPROX_KRONECKER_NAME: fb.FullyConnectedKFACBasicFB, - APPROX_DIAGONAL_NAME: fb.FullyConnectedDiagonalFB, - } + if approx is None: + approx = self._get_linked_approx(params) + if approx is None: + approx = self.default_fully_connected_approximation - if approx not in approx_to_block_types: + if approx not in _FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES: raise ValueError("Bad value {} for approx.".format(approx)) - block_type = approx_to_block_types[approx] + block_type = _FULLY_CONNECTED_APPROX_TO_BLOCK_TYPES[approx] has_bias = isinstance(params, (tuple, list)) block = self.register_block(params, block_type(self, has_bias), reuse=reuse) @@ -352,7 +495,7 @@ class LayerCollection(object): padding, inputs, outputs, - approx=APPROX_KRONECKER_NAME, + approx=None, reuse=VARIABLE_SCOPE): """Registers a convolutional layer. @@ -377,15 +520,16 @@ class LayerCollection(object): KeyError: If reuse == True but no FisherBlock found for 'params'. ValueError: If reuse == True and FisherBlock found but of the wrong type. """ - approx_to_block_types = { - APPROX_KRONECKER_NAME: fb.ConvKFCBasicFB, - APPROX_DIAGONAL_NAME: fb.ConvDiagonalFB, - } - if approx not in approx_to_block_types: + if approx is None: + approx = self._get_linked_approx(params) + if approx is None: + approx = self.default_conv2d_approximation + + if approx not in _CONV2D_APPROX_TO_BLOCK_TYPES: raise ValueError("Bad value {} for approx.".format(approx)) - block_type = approx_to_block_types[approx] + block_type = _CONV2D_APPROX_TO_BLOCK_TYPES[approx] block = self.register_block( params, block_type(self, params, strides, padding), reuse=reuse) block.register_additional_minibatch(inputs, outputs) @@ -393,7 +537,7 @@ class LayerCollection(object): def register_generic(self, params, batch_size, - approx=APPROX_DIAGONAL_NAME, + approx=None, reuse=VARIABLE_SCOPE): """Registers a generic layer. @@ -413,15 +557,16 @@ class LayerCollection(object): KeyError: If reuse == True but no FisherBlock found for 'params'. ValueError: If reuse == True and FisherBlock found but of the wrong type. """ - approx_to_block_types = { - APPROX_FULL_NAME: fb.FullFB, - APPROX_DIAGONAL_NAME: fb.NaiveDiagonalFB, - } - if approx not in approx_to_block_types: + if approx is None: + approx = self._get_linked_approx(params) + if approx is None: + approx = self.default_generic_approximation + + if approx not in _GENERIC_APPROX_TO_BLOCK_TYPES: raise ValueError("Bad value {} for approx.".format(approx)) - block_type = approx_to_block_types[approx] + block_type = _GENERIC_APPROX_TO_BLOCK_TYPES[approx] block = self.register_block(params, block_type(self, params), reuse=reuse) block.register_additional_minibatch(batch_size) @@ -560,10 +705,10 @@ class LayerCollection(object): try: hash(args) except TypeError: - raise TypeError(( - "Unable to use (cls, args) = ({}, {}) as a key in " - "LayerCollection.fisher_factors. The pair cannot be hashed." - ).format(cls, args)) + raise TypeError( + ("Unable to use (cls, args) = ({}, {}) as a key in " + "LayerCollection.fisher_factors. The pair cannot be hashed.").format( + cls, args)) with variable_scope.variable_scope(self._var_scope): return utils.setdefault(self.fisher_factors, (cls, args), From 2a3429d702699012425eb3fa9cd2a1d796a14b20 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 16 Nov 2017 06:06:49 -0800 Subject: [PATCH 097/104] Add docstring note about .transform() backprop behaviour. PiperOrigin-RevId: 175955706 --- tensorflow/contrib/image/python/ops/image_ops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow/contrib/image/python/ops/image_ops.py b/tensorflow/contrib/image/python/ops/image_ops.py index 011ddeaa9a1..faedee6f877 100644 --- a/tensorflow/contrib/image/python/ops/image_ops.py +++ b/tensorflow/contrib/image/python/ops/image_ops.py @@ -224,7 +224,8 @@ def transform(images, transforms, interpolation="NEAREST", name=None): `(x, y)` to a transformed *input* point `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where `k = c0 x + c1 y + 1`. The transforms are *inverted* compared to - the transform mapping input points to output points. + the transform mapping input points to output points. Note that gradients + are not backpropagated into transformation parameters. interpolation: Interpolation mode. Supported values: "NEAREST", "BILINEAR". Returns: From 8dccbde8ab5fe0c7dd2ee0af0e4a91c1a807c004 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Thu, 16 Nov 2017 09:02:22 -0800 Subject: [PATCH 098/104] MultiHead adds individual head loss in eval_metric_ops. PiperOrigin-RevId: 175970818 --- tensorflow/contrib/estimator/BUILD | 1 + .../estimator/python/estimator/multi_head.py | 22 ++++++++++++------- .../python/estimator/multi_head_test.py | 2 ++ 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/tensorflow/contrib/estimator/BUILD b/tensorflow/contrib/estimator/BUILD index bc67ef83541..008ca7a5d17 100644 --- a/tensorflow/contrib/estimator/BUILD +++ b/tensorflow/contrib/estimator/BUILD @@ -208,6 +208,7 @@ py_library( "//tensorflow/python:control_flow_ops", "//tensorflow/python:framework_ops", "//tensorflow/python:math_ops", + "//tensorflow/python:metrics", "//tensorflow/python:summary", "//tensorflow/python/estimator:head", "//tensorflow/python/estimator:metric_keys", diff --git a/tensorflow/contrib/estimator/python/estimator/multi_head.py b/tensorflow/contrib/estimator/python/estimator/multi_head.py index 73bae5acf9c..f2a6eae03ec 100644 --- a/tensorflow/contrib/estimator/python/estimator/multi_head.py +++ b/tensorflow/contrib/estimator/python/estimator/multi_head.py @@ -27,6 +27,7 @@ from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops +from tensorflow.python.ops import metrics as metrics_lib from tensorflow.python.saved_model import signature_constants from tensorflow.python.summary import summary @@ -342,14 +343,19 @@ class _MultiHead(head_lib._Head): # pylint:disable=protected-access predictions = {} metrics = {} losses = [] - for head, spec in zip(self._heads, all_estimator_spec): - losses.append(spec.loss) - head_name = head.name - # Metric keys already contain head.name. - metrics.update(spec.eval_metric_ops or {}) - for k, v in six.iteritems(spec.predictions): - predictions[(head_name, k)] = v - loss = _merge_losses(losses, self._head_weights) + with ops.name_scope('merge_eval'): + for head, spec in zip(self._heads, all_estimator_spec): + losses.append(spec.loss) + head_name = head.name + # Loss metric is not added by default. + loss_name = head_lib._summary_key( # pylint:disable=protected-access + head_name, metric_keys.MetricKeys.LOSS) + metrics[loss_name] = metrics_lib.mean(spec.loss, name=loss_name) + # Metric keys already contain head.name. + metrics.update(spec.eval_metric_ops or {}) + for k, v in six.iteritems(spec.predictions): + predictions[(head_name, k)] = v + loss = _merge_losses(losses, self._head_weights) return model_fn.EstimatorSpec( mode=model_fn.ModeKeys.EVAL, diff --git a/tensorflow/contrib/estimator/python/estimator/multi_head_test.py b/tensorflow/contrib/estimator/python/estimator/multi_head_test.py index 8d51a298b23..68f2d5d1cd5 100644 --- a/tensorflow/contrib/estimator/python/estimator/multi_head_test.py +++ b/tensorflow/contrib/estimator/python/estimator/multi_head_test.py @@ -297,6 +297,8 @@ class MultiHeadTest(test.TestCase): keys = metric_keys.MetricKeys expected_metrics = { + keys.LOSS + '/head1': expected_loss_head1, + keys.LOSS + '/head2': expected_loss_head2, # Average loss over examples. keys.LOSS_MEAN + '/head1': expected_loss_head1 / 2, keys.LOSS_MEAN + '/head2': expected_loss_head2 / 2, From 1e3c712e32d5796ff4c93aa64570fb454b2c499e Mon Sep 17 00:00:00 2001 From: Ian Langmore Date: Thu, 16 Nov 2017 09:18:49 -0800 Subject: [PATCH 099/104] linear_operator_test_util.py. Adding hooks for turning off placeholders/adjoint in tests. PiperOrigin-RevId: 175972993 --- .../ops/linalg/linear_operator_test_util.py | 36 ++++++++++++------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/tensorflow/python/ops/linalg/linear_operator_test_util.py b/tensorflow/python/ops/linalg/linear_operator_test_util.py index 3d0ea3e11be..2c11f90e6d9 100644 --- a/tensorflow/python/ops/linalg/linear_operator_test_util.py +++ b/tensorflow/python/ops/linalg/linear_operator_test_util.py @@ -66,11 +66,23 @@ class LinearOperatorDerivedClassTest(test.TestCase): rtol = self._rtol[dtype] self.assertAllClose(x, y, atol=atol, rtol=rtol) + @property + def _adjoint_options(self): + return [False, True] + + @property + def _adjoint_arg_options(self): + return [False, True] + @property def _dtypes_to_test(self): # TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit. return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] + @property + def _use_placeholder_options(self): + return [False, True] + @abc.abstractproperty def _shapes_to_test(self): """Returns list of tuples, each is one shape that will be tested.""" @@ -151,7 +163,7 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_to_dense(self): self._skip_if_tests_to_skip_contains("to_dense") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: @@ -166,7 +178,7 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_det(self): self._skip_if_tests_to_skip_contains("det") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: @@ -183,7 +195,7 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_log_abs_det(self): self._skip_if_tests_to_skip_contains("log_abs_det") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: @@ -200,11 +212,11 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_matmul(self): self._skip_if_tests_to_skip_contains("matmul") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: - for adjoint in False, True: - for adjoint_arg in False, True: + for adjoint in self._adjoint_options: + for adjoint_arg in self._adjoint_arg_options: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( @@ -228,11 +240,11 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_solve(self): self._skip_if_tests_to_skip_contains("solve") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: - for adjoint in False, True: - for adjoint_arg in False, True: + for adjoint in self._adjoint_options: + for adjoint_arg in self._adjoint_arg_options: with self.test_session(graph=ops.Graph()) as sess: sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED operator, mat, feed_dict = self._operator_and_mat_and_feed_dict( @@ -257,7 +269,7 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_trace(self): self._skip_if_tests_to_skip_contains("trace") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: @@ -274,7 +286,7 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_add_to_tensor(self): self._skip_if_tests_to_skip_contains("add_to_tensor") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: @@ -293,7 +305,7 @@ class LinearOperatorDerivedClassTest(test.TestCase): def test_diag_part(self): self._skip_if_tests_to_skip_contains("diag_part") - for use_placeholder in False, True: + for use_placeholder in self._use_placeholder_options: for shape in self._shapes_to_test: for dtype in self._dtypes_to_test: with self.test_session(graph=ops.Graph()) as sess: From 9d737356147a730326cfcbdc08b0b876dd0766e6 Mon Sep 17 00:00:00 2001 From: Benoit Steiner Date: Thu, 16 Nov 2017 09:44:06 -0800 Subject: [PATCH 100/104] Simplify reductions in more cases. PiperOrigin-RevId: 175975917 --- .../grappler/optimizers/constant_folding.cc | 244 ++++++++++++------ .../grappler/optimizers/constant_folding.h | 6 + .../optimizers/constant_folding_test.cc | 41 ++- 3 files changed, 212 insertions(+), 79 deletions(-) diff --git a/tensorflow/core/grappler/optimizers/constant_folding.cc b/tensorflow/core/grappler/optimizers/constant_folding.cc index 993831c4121..b722905032a 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding.cc +++ b/tensorflow/core/grappler/optimizers/constant_folding.cc @@ -339,92 +339,180 @@ bool ExtractShape(const NodeDef& shape_node, const GraphProperties& properties, } } // namespace +Status ConstantFolding::MaterializeBroadcastGradientArgs( + const NodeDef& node, const GraphProperties& properties) { + const NodeDef* shape_node1 = node_map_->GetNode(node.input(0)); + const NodeDef* shape_node2 = node_map_->GetNode(node.input(1)); + if (shape_node1 == nullptr || + (shape_node1->op() != "Shape" && shape_node1->op() != "Const") || + shape_node2 == nullptr || + (shape_node2->op() != "Shape" && shape_node2->op() != "Const")) { + return Status::OK(); + } + int64 min_id = 0; + BCast::Vec shape1; + if (!ExtractShape(*shape_node1, properties, &shape1, &min_id)) { + return Status::OK(); + } + BCast::Vec shape2; + if (!ExtractShape(*shape_node2, properties, &shape2, &min_id)) { + return Status::OK(); + } + // A value of -1 means we don't known anything about the dimension. Replace + // the -1 values with unique dimension ids since we don't want two '-1' + // dimensions to be considered equal. + for (auto& id : shape1) { + if (id == -1) { + id = --min_id; + } + } + for (auto& id : shape2) { + if (id == -1) { + id = --min_id; + } + } + BCast bcast(shape1, shape2); + if (!bcast.IsValid()) { + return Status::OK(); + } + BCast::Vec reduce_dims[2]; + reduce_dims[0] = bcast.grad_x_reduce_idx(); + reduce_dims[1] = bcast.grad_y_reduce_idx(); + + const DataType type = node.attr().at("T").type(); + NodeDef* out[2]; + for (int j = 0; j < 2; ++j) { + if (!reduce_dims[j].empty()) { + // This is the case when a tensor dimension of 1 is matched against an + // unknown dimension. The unknown dimension could also be equal to 1, in + // which case there would be no reduction. + out[j] = nullptr; + } else { + string const_name = AddPrefixToNodeName( + strings::StrCat(node.name(), "-", j), kConstantFoldingConst); + out[j] = node_map_->GetNode(const_name); + if (out[j] == nullptr) { + out[j] = graph_.add_node(); + Tensor value(type, TensorShape({0})); + *out[j] = CreateNodeDef(const_name, TensorValue(&value)); + out[j]->set_device(node.device()); + node_map_->AddNode(const_name, out[j]); + string ctrl_dep = + AddControlDependency(node.name(), &graph_, node_map_.get()); + *out[j]->add_input() = ctrl_dep; + node_map_->AddOutput(NodeName(ctrl_dep), const_name); + } + } + } + + auto outputs = node_map_->GetOutputs(node.name()); + for (const auto& output : outputs) { + for (int k = 0; k < output->input_size(); ++k) { + int port; + string node_name = ParseNodeName(output->input(k), &port); + if (node_name == node.name() && port >= 0 && port < 2 && out[port]) { + *output->mutable_input(k) = out[port]->name(); + node_map_->UpdateInput(output->name(), node_name, out[port]->name()); + } + } + } + + return Status::OK(); +} + +Status ConstantFolding::MaterializeReductionIndices( + NodeDef* node, const GraphProperties& properties) { + if (node->input_size() < 2) { + return Status::OK(); + } + const NodeDef* indices = node_map_->GetNode(node->input(1)); + if (!indices || IsConstant(*indices)) { + // The reduction indices are already constant, there's nothing to do. + return Status::OK(); + } + + const OpInfo::TensorProperties& input_prop = + properties.GetInputProperties(node->name())[0]; + if (input_prop.shape().unknown_rank()) { + // We can't do anything if we don't know the rank of the input. + return Status::OK(); + } + const int rank = input_prop.shape().dim_size(); + if (rank == 0) { + // Unexpected graph, don't try to change it. + return Status::OK(); + } + const OpInfo::TensorProperties& output_prop = + properties.GetOutputProperties(node->name())[0]; + PartialTensorShape output_shape(output_prop.shape()); + if (output_shape.num_elements() != 1) { + bool full_reduction = false; + for (const NodeDef* fanout : node_map_->GetOutputs(node->name())) { + if (!IsReshape(*fanout)) { + continue; + } + const OpInfo::TensorProperties& reshape_prop = + properties.GetOutputProperties(fanout->name())[0]; + PartialTensorShape shape(reshape_prop.shape()); + if (shape.num_elements() != 1) { + return Status::OK(); + } else { + full_reduction = true; + } + } + if (!full_reduction) { + return Status::OK(); + } + } + + const OpInfo::TensorProperties& reduction_prop = + properties.GetInputProperties(node->name())[1]; + DataType dtype = reduction_prop.dtype(); + if (dtype != DT_INT32 && dtype != DT_INT64) { + return Status::OK(); + } + // We know it's a full reduction. We can generate the set of indices to + // reduce. + string const_name = + AddPrefixToNodeName(strings::StrCat(node->name(), "-reduction_indices"), + kConstantFoldingConst); + if (node_map_->GetNode(const_name)) { + return Status::OK(); + } + NodeDef* reduction_indices = graph_.add_node(); + Tensor value(dtype, TensorShape({rank})); + for (int i = 0; i < rank; ++i) { + if (dtype == DT_INT32) { + value.vec()(i) = i; + } else { + value.vec()(i) = i; + } + } + *reduction_indices = CreateNodeDef(const_name, TensorValue(&value)); + reduction_indices->set_device(node->device()); + *reduction_indices->add_input() = + AddControlDependency(node->input(1), &graph_, node_map_.get()); + node_map_->AddNode(const_name, reduction_indices); + + node->set_input(1, reduction_indices->name()); + node_map_->UpdateInput(node->name(), indices->name(), + reduction_indices->name()); + + return Status::OK(); +} + Status ConstantFolding::MaterializeConstants( const GrapplerItem& item, const GraphProperties& properties) { const int node_count = graph_.node_size(); for (int i = 0; i < node_count; ++i) { NodeDef& node = *graph_.mutable_node(i); const string& op = node.op(); - if (op != "BroadcastGradientArgs") { - continue; - } - const NodeDef* shape_node1 = node_map_->GetNode(node.input(0)); - const NodeDef* shape_node2 = node_map_->GetNode(node.input(1)); - if (shape_node1 == nullptr || - (shape_node1->op() != "Shape" && shape_node1->op() != "Const") || - shape_node2 == nullptr || - (shape_node2->op() != "Shape" && shape_node2->op() != "Const")) { - continue; - } - int64 min_id = 0; - BCast::Vec shape1; - if (!ExtractShape(*shape_node1, properties, &shape1, &min_id)) { - continue; - } - BCast::Vec shape2; - if (!ExtractShape(*shape_node2, properties, &shape2, &min_id)) { - continue; - } - // A value of -1 means we don't known anything about the dimension. Replace - // the -1 values with unique dimension ids since we don't want two '-1' - // dimensions to be considered equal. - for (auto& id : shape1) { - if (id == -1) { - id = --min_id; - } - } - for (auto& id : shape2) { - if (id == -1) { - id = --min_id; - } - } - BCast bcast(shape1, shape2); - if (!bcast.IsValid()) { - continue; - } - BCast::Vec reduce_dims[2]; - reduce_dims[0] = bcast.grad_x_reduce_idx(); - reduce_dims[1] = bcast.grad_y_reduce_idx(); - - const DataType type = node.attr().at("T").type(); - NodeDef* out[2]; - for (int j = 0; j < 2; ++j) { - if (!reduce_dims[j].empty()) { - // This is the case when a tensor dimension 1 is matched against an - // unknown dimension. The unknown dimension could also be equal to 1, in - // which case there would be no reduction. - out[j] = nullptr; - } else { - Tensor value(type, TensorShape({0})); - string const_name = AddPrefixToNodeName( - strings::StrCat(node.name(), "-", j), kConstantFoldingConst); - out[j] = node_map_->GetNode(const_name); - if (!out[j]) { - out[j] = graph_.add_node(); - *out[j] = CreateNodeDef(const_name, TensorValue(&value)); - out[j]->set_device(node.device()); - node_map_->AddNode(const_name, out[j]); - string ctrl_dep = - AddControlDependency(node.name(), &graph_, node_map_.get()); - *out[j]->add_input() = ctrl_dep; - node_map_->AddOutput(NodeName(ctrl_dep), const_name); - } - } - } - - auto outputs = node_map_->GetOutputs(node.name()); - for (const auto& output : outputs) { - for (int k = 0; k < output->input_size(); ++k) { - int port; - string node_name = ParseNodeName(output->input(k), &port); - if (node_name == node.name() && port >= 0 && port < 2 && out[port]) { - *output->mutable_input(k) = out[port]->name(); - node_map_->UpdateInput(output->name(), node_name, out[port]->name()); - } - } + if (op == "BroadcastGradientArgs") { + TF_RETURN_IF_ERROR(MaterializeBroadcastGradientArgs(node, properties)); + } else if (IsReduction(node)) { + TF_RETURN_IF_ERROR(MaterializeReductionIndices(&node, properties)); } } - return Status::OK(); } diff --git a/tensorflow/core/grappler/optimizers/constant_folding.h b/tensorflow/core/grappler/optimizers/constant_folding.h index dd988f336cb..f04f413c10a 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding.h +++ b/tensorflow/core/grappler/optimizers/constant_folding.h @@ -53,6 +53,12 @@ class ConstantFolding : public GraphOptimizer { private: Status MaterializeShapes(const GrapplerItem& item, const GraphProperties& properties); + + Status MaterializeBroadcastGradientArgs(const NodeDef& node, + const GraphProperties& properties); + Status MaterializeReductionIndices(NodeDef* node, + const GraphProperties& properties); + Status MaterializeConstants(const GrapplerItem& item, const GraphProperties& properties); bool IsFoldable(const NodeDef& node) const; diff --git a/tensorflow/core/grappler/optimizers/constant_folding_test.cc b/tensorflow/core/grappler/optimizers/constant_folding_test.cc index 43f84b1ddfd..428376c02cc 100644 --- a/tensorflow/core/grappler/optimizers/constant_folding_test.cc +++ b/tensorflow/core/grappler/optimizers/constant_folding_test.cc @@ -840,7 +840,7 @@ TEST_F(ConstantFoldingTest, Packing) { EXPECT_GT(8000, output.ByteSizeLong()); } -TEST_F(ConstantFoldingTest, ConstantMaterialization) { +TEST_F(ConstantFoldingTest, MaterializeBroadcastGradientArgs) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT, @@ -918,6 +918,45 @@ TEST_F(ConstantFoldingTest, ConstantMaterialization) { EXPECT_EQ(7, found); } +TEST_F(ConstantFoldingTest, MaterializeReductionIndices) { + tensorflow::Scope s = tensorflow::Scope::NewRootScope(); + Output input = + ops::Placeholder(s.WithOpName("input"), DT_FLOAT, + ops::Placeholder::Shape(PartialTensorShape({-1, -1}))); + Output indices = ops::Placeholder(s.WithOpName("indices"), DT_INT32); + Output sum = ops::Sum(s.WithOpName("sum"), input, indices); + Output size = ops::Const(s.WithOpName("size"), 1, {1}); + Output reshape = ops::Reshape(s.WithOpName("reshape"), sum, size); + + GrapplerItem item; + TF_CHECK_OK(s.ToGraphDef(&item.graph)); + + ConstantFolding fold(RewriterConfig::AGGRESSIVE, nullptr /* cpu_device */); + GraphDef output; + Status status = fold.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + // Run a second time to make sure the optimization is idempotent. + item.graph.Swap(&output); + status = fold.Optimize(nullptr, item, &output); + TF_EXPECT_OK(status); + + int found = 0; + for (const auto& node : output.node()) { + if (node.name() == "ConstantFolding/sum-reduction_indices") { + ++found; + EXPECT_EQ("Const", node.op()); + EXPECT_EQ("^indices", node.input(0)); + EXPECT_EQ(2, TensorShape(node.attr().at("value").tensor().tensor_shape()) + .num_elements()); + } else if (node.name() == "sum") { + ++found; + EXPECT_EQ("ConstantFolding/sum-reduction_indices", node.input(1)); + } + } + EXPECT_EQ(2, found); +} + } // namespace } // namespace grappler } // namespace tensorflow From aa4162ac9f1812a0966d3cd9b5e441e47f035828 Mon Sep 17 00:00:00 2001 From: Shanqing Cai Date: Thu, 16 Nov 2017 10:23:48 -0800 Subject: [PATCH 101/104] contrib/summary: refactor summary_test_util A logdir may contain files other than summary event files, e.g., checkpoints. So add a method "events_from_file" to load events from a single file. The existing "events_from_logdir" method now calls the new method. PiperOrigin-RevId: 175981886 --- .../contrib/eager/python/evaluator_test.py | 4 +-- .../examples/resnet50/resnet50_graph_test.py | 2 +- .../python/examples/resnet50/resnet50_test.py | 2 +- .../contrib/eager/python/metrics_test.py | 2 +- .../contrib/summary/summary_ops_test.py | 6 ++-- .../contrib/summary/summary_test_util.py | 35 +++++++++++++++---- 6 files changed, 36 insertions(+), 15 deletions(-) diff --git a/tensorflow/contrib/eager/python/evaluator_test.py b/tensorflow/contrib/eager/python/evaluator_test.py index 02f82cb2169..7d2274db9b0 100644 --- a/tensorflow/contrib/eager/python/evaluator_test.py +++ b/tensorflow/contrib/eager/python/evaluator_test.py @@ -87,7 +87,7 @@ class EvaluatorTest(test.TestCase): e.all_metric_results(logdir) - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 6.0) @@ -136,7 +136,7 @@ class EvaluatorTest(test.TestCase): variables.global_variables_initializer().run() e.run_evaluation(init_op, call_op, results_op) - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 6.0) diff --git a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_graph_test.py b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_graph_test.py index 736a75332ff..14c82c87a72 100644 --- a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_graph_test.py +++ b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_graph_test.py @@ -95,7 +95,7 @@ class ResNet50GraphTest(tf.test.TestCase): sess.run([train_op, tf.contrib.summary.all_summary_ops()], feed_dict={images: np_images, labels: np_labels}) - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'loss') diff --git a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py index d6389f2e385..582f4837c6f 100644 --- a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py +++ b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py @@ -103,7 +103,7 @@ class ResNet50Test(tf.test.TestCase): images, labels = random_batch(2) train_one_step(model, images, labels, optimizer) self.assertEqual(320, len(model.variables)) - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'loss') diff --git a/tensorflow/contrib/eager/python/metrics_test.py b/tensorflow/contrib/eager/python/metrics_test.py index b4f5973bd11..96eb1b4f2a0 100644 --- a/tensorflow/contrib/eager/python/metrics_test.py +++ b/tensorflow/contrib/eager/python/metrics_test.py @@ -72,7 +72,7 @@ class MetricsTest(test.TestCase): name="t0").as_default(), summary_ops.always_record_summaries(): m.result() # As a side-effect will write summaries. - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 37.0) diff --git a/tensorflow/contrib/summary/summary_ops_test.py b/tensorflow/contrib/summary/summary_ops_test.py index 09169fa6d70..c5ca054f77f 100644 --- a/tensorflow/contrib/summary/summary_ops_test.py +++ b/tensorflow/contrib/summary/summary_ops_test.py @@ -79,7 +79,7 @@ class TargetTest(test_util.TensorFlowTestCase): summary_ops.scalar('scalar', 2.0) write() - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 2.0) @@ -92,7 +92,7 @@ class TargetTest(test_util.TensorFlowTestCase): summary_ops.scalar('scalar', 2.0) - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scalar') @@ -105,7 +105,7 @@ class TargetTest(test_util.TensorFlowTestCase): summary_ops.scalar('scalar', 2.0, global_step=global_step) - events = summary_test_util.events_from_file(logdir) + events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scalar') diff --git a/tensorflow/contrib/summary/summary_test_util.py b/tensorflow/contrib/summary/summary_test_util.py index 37b546d3ab3..794c5b8bab1 100644 --- a/tensorflow/contrib/summary/summary_test_util.py +++ b/tensorflow/contrib/summary/summary_test_util.py @@ -26,16 +26,37 @@ from tensorflow.python.lib.io import tf_record from tensorflow.python.platform import gfile -def events_from_file(logdir): - """Returns all events in the single eventfile in logdir.""" - assert gfile.Exists(logdir) - files = gfile.ListDirectory(logdir) - assert len(files) == 1, "Found more than one file in logdir: %s" % files - records = list( - tf_record.tf_record_iterator(os.path.join(logdir, files[0]))) +def events_from_file(filepath): + """Returns all events in a single event file. + + Args: + filepath: Path to the event file. + + Returns: + A list of all tf.Event protos in the event file. + """ + records = list(tf_record.tf_record_iterator(filepath)) result = [] for r in records: event = event_pb2.Event() event.ParseFromString(r) result.append(event) return result + + +def events_from_logdir(logdir): + """Returns all events in the single eventfile in logdir. + + Args: + logdir: The directory in which the single event file is sought. + + Returns: + A list of all tf.Event protos from the single event file. + + Raises: + AssertionError: If logdir does not contain exactly one file. + """ + assert gfile.Exists(logdir) + files = gfile.ListDirectory(logdir) + assert len(files) == 1, "Found not exactly one file in logdir: %s" % files + return events_from_file(os.path.join(logdir, files[0])) From de8453ff5d72ab64408e627ac9f4f184be3f9173 Mon Sep 17 00:00:00 2001 From: Akshay Agrawal Date: Thu, 16 Nov 2017 10:31:07 -0800 Subject: [PATCH 102/104] Do not change the default graph in variable_scope when building a function. When building a TensorFlow function, we need precise control over the default graph. This change ensures that, when a function is being built, variable_scope preserves the default graph. PiperOrigin-RevId: 175983226 --- tensorflow/contrib/eager/python/BUILD | 1 + .../contrib/eager/python/network_test.py | 18 ++++++++++++++++++ tensorflow/python/ops/variable_scope.py | 10 ++++++++-- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/tensorflow/contrib/eager/python/BUILD b/tensorflow/contrib/eager/python/BUILD index 92746b866af..bf2e883bc53 100644 --- a/tensorflow/contrib/eager/python/BUILD +++ b/tensorflow/contrib/eager/python/BUILD @@ -241,6 +241,7 @@ py_test( "//tensorflow/python:resource_variable_ops", "//tensorflow/python:training", "//tensorflow/python:variable_scope", + "//tensorflow/python/eager:function", "//tensorflow/python/eager:test", ], ) diff --git a/tensorflow/contrib/eager/python/network_test.py b/tensorflow/contrib/eager/python/network_test.py index 8718a8b5229..e7835a63e6d 100644 --- a/tensorflow/contrib/eager/python/network_test.py +++ b/tensorflow/contrib/eager/python/network_test.py @@ -20,6 +20,7 @@ import gc from tensorflow.contrib.eager.python import network from tensorflow.python.eager import context +from tensorflow.python.eager import function from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors_impl @@ -87,6 +88,23 @@ class NetworkTest(test.TestCase): result = net(constant_op.constant([[2.0]])) self.assertEqual(34.0, self.evaluate(result)) + # TODO(akshayka): This test should be changed once an API for compiling + # `call` into a defun is implemented. + def testReplacingNetworkCallWithDefun(self): + net = MyNetwork(name="abcd") + x = constant_op.constant([[2.0]]) + net(x) # Force variables to be created. + self.evaluate(net.trainable_variables[0].assign([[17.0]])) + + net.call = function.defun(net.call) + result = net(x) # Build and execute the TensorFlow function + self.assertEqual(34.0, self.evaluate(result)) + + # Force the creation of another TensorFlow function by changing input shape + y = constant_op.constant([[1.0], [2.0]]) + result = net(y) + self.assertAllEqual([[17.0], [34.0]], self.evaluate(result)) + # TODO(allenl): This test creates garbage in some Python versions @test_util.run_in_graph_and_eager_modes() def testNetworkSaveRestoreAlreadyBuilt(self): diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py index 2cdf5855030..91dea12da23 100644 --- a/tensorflow/python/ops/variable_scope.py +++ b/tensorflow/python/ops/variable_scope.py @@ -1828,7 +1828,13 @@ class variable_scope(object): # pylint: disable=invalid-name self._current_name_scope = None def __enter__(self): - if self._in_graph_mode: + # If the default graph is building a function, then we should not replace it + # with the cached graph. + if ops.get_default_graph().building_function: + self._building_function = True + else: + self._building_function = False + if self._in_graph_mode and not self._building_function: self._graph_context_manager = self._graph.as_default() self._graph_context_manager.__enter__() if self._cached_pure_variable_scope is not None: @@ -1907,7 +1913,7 @@ class variable_scope(object): # pylint: disable=invalid-name type_arg, value_arg, traceback_arg) if self._current_name_scope: self._current_name_scope.__exit__(type_arg, value_arg, traceback_arg) - if self._in_graph_mode: + if self._in_graph_mode and not self._building_function: self._graph_context_manager.__exit__(type_arg, value_arg, traceback_arg) From b20d11d36c3baa2e7c9b49d423a39c1e5cc0ceac Mon Sep 17 00:00:00 2001 From: Alexandre Passos Date: Thu, 16 Nov 2017 10:34:42 -0800 Subject: [PATCH 103/104] Ops with no outputs in eager should return None instead of [] PiperOrigin-RevId: 175983704 --- tensorflow/python/eager/ops_test.py | 4 ++++ tensorflow/python/eager/python_eager_op_gen.cc | 2 ++ 2 files changed, 6 insertions(+) diff --git a/tensorflow/python/eager/ops_test.py b/tensorflow/python/eager/ops_test.py index 51550c9f514..70e23b93117 100644 --- a/tensorflow/python/eager/ops_test.py +++ b/tensorflow/python/eager/ops_test.py @@ -30,6 +30,7 @@ from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.layers import core from tensorflow.python.ops import array_ops +from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import sparse_ops @@ -349,6 +350,9 @@ class OpsTest(test_util.TensorFlowTestCase): x = constant_op.constant(3.1415) self.assertEqual('3.14', '{:.2f}'.format(x)) + def testNoOpIsNone(self): + self.assertTrue(control_flow_ops.no_op() is None) + if __name__ == '__main__': test.main() diff --git a/tensorflow/python/eager/python_eager_op_gen.cc b/tensorflow/python/eager/python_eager_op_gen.cc index 374894733af..956fbdac50d 100644 --- a/tensorflow/python/eager/python_eager_op_gen.cc +++ b/tensorflow/python/eager/python_eager_op_gen.cc @@ -531,6 +531,8 @@ string GenEagerPythonOp::Code() { strings::StrAppend(&result_, " _result = _", op_def_.name(), "Output._make(_result)\n"); } + } else { + strings::StrAppend(&result_, " _result = None\n"); } strings::StrAppend(&result_, " return _result\n\n"); return prelude_ + result_; From 7e2bac6b8d75b810493415f0b06c8d9408f7858c Mon Sep 17 00:00:00 2001 From: Jonathan Hseu Date: Thu, 16 Nov 2017 10:59:47 -0800 Subject: [PATCH 104/104] Fix ci_parameterized_build.sh --- tensorflow/tools/ci_build/ci_parameterized_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh index c27f4953e3d..2217b110e3f 100755 --- a/tensorflow/tools/ci_build/ci_parameterized_build.sh +++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh @@ -546,8 +546,8 @@ echo "" TMP_DIR="" DOCKERFILE_FLAG="" -if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ] || - ["${TF_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then +if [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.5" ]] || + [[ "${TF_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then # Modify Dockerfile for Python3.5 | Python3.6 build TMP_DIR=$(mktemp -d) echo "Docker build will occur in temporary directory: ${TMP_DIR}"