From 6f953837fa4405d64f4db8085a698e746909f796 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Sat, 1 Jun 2019 22:42:23 -0300 Subject: [PATCH 1/6] Refactor TF and TFLite model implementations into their own classes/files --- native_client/BUILD | 13 +- native_client/deepspeech.cc | 725 ++++-------------------------- native_client/deepspeech.h | 1 + native_client/modelstate.cc | 81 ++++ native_client/modelstate.h | 88 ++++ native_client/tflitemodelstate.cc | 258 +++++++++++ native_client/tflitemodelstate.h | 51 +++ native_client/tfmodelstate.cc | 214 +++++++++ native_client/tfmodelstate.h | 37 ++ 9 files changed, 825 insertions(+), 643 deletions(-) create mode 100644 native_client/modelstate.cc create mode 100644 native_client/modelstate.h create mode 100644 native_client/tflitemodelstate.cc create mode 100644 native_client/tflitemodelstate.h create mode 100644 native_client/tfmodelstate.cc create mode 100644 native_client/tfmodelstate.h diff --git a/native_client/BUILD b/native_client/BUILD index bf4e1d26..d7813d29 100644 --- a/native_client/BUILD +++ b/native_client/BUILD @@ -70,9 +70,20 @@ tf_cc_shared_object( srcs = ["deepspeech.cc", "deepspeech.h", "alphabet.h", + "modelstate.h", + "modelstate.cc", "ds_version.h", "ds_graph_version.h"] + - DECODER_SOURCES, + DECODER_SOURCES + + select({ + "//native_client:tflite": [ + "tflitemodelstate.h", + "tflitemodelstate.cc" + ], + "//conditions:default": [ + "tfmodelstate.h", + "tfmodelstate.cc" + ]}), copts = select({ # -fvisibility=hidden is not required on Windows, MSCV hides all declarations by default "//tensorflow:windows": ["/w"], diff --git a/native_client/deepspeech.cc b/native_client/deepspeech.cc index 74d83e56..9955cf86 100644 --- a/native_client/deepspeech.cc +++ b/native_client/deepspeech.cc @@ -11,17 +11,14 @@ #include "deepspeech.h" #include "alphabet.h" +#include "modelstate.h" #include "native_client/ds_version.h" -#include "native_client/ds_graph_version.h" #ifndef USE_TFLITE - #include "tensorflow/core/public/session.h" - #include "tensorflow/core/platform/env.h" - #include "tensorflow/core/util/memmapped_file_system.h" -#else // USE_TFLITE - #include "tensorflow/lite/model.h" - #include "tensorflow/lite/kernels/register.h" +#include "tfmodelstate.h" +#else +#include "tflitemodelstate.h" #endif // USE_TFLITE #include "ctcdecode/ctc_beam_search_decoder.h" @@ -36,23 +33,9 @@ #define LOGE(...) #endif // __ANDROID__ -//TODO: infer batch size from model/use dynamic batch size -constexpr unsigned int BATCH_SIZE = 1; - -constexpr unsigned int DEFAULT_SAMPLE_RATE = 16000; -constexpr unsigned int DEFAULT_WINDOW_LENGTH = DEFAULT_SAMPLE_RATE * 0.032; -constexpr unsigned int DEFAULT_WINDOW_STEP = DEFAULT_SAMPLE_RATE * 0.02; - -#ifndef USE_TFLITE - using namespace tensorflow; -#else - using namespace tflite; -#endif - using std::vector; -/* This is the actual implementation of the streaming inference API, with the - Model class just forwarding the calls to this class. +/* This is the implementation of the streaming inference API. The streaming process uses three buffers that are fed eagerly as audio data is fed in. The buffers only hold the minimum amount of data needed to do a @@ -75,17 +58,20 @@ using std::vector; API. When audio_buffer is full, features are computed from it and pushed to mfcc_buffer. When mfcc_buffer is full, the timestep is copied to batch_buffer. When batch_buffer is full, we do a single step through the acoustic model - and accumulate results in the DecoderState structure. + and accumulate the intermediate decoding state in the DecoderState structure. - When finishStream() is called, we decode the accumulated logits and return - the corresponding transcription. + When finishStream() is called, we return the corresponding transcription from + the current decoder state. */ struct StreamingState { - vector audio_buffer; - vector mfcc_buffer; - vector batch_buffer; - ModelState* model; - std::unique_ptr decoder_state; + vector audio_buffer_; + vector mfcc_buffer_; + vector batch_buffer_; + ModelState* model_; + std::unique_ptr decoder_state_; + + StreamingState(); + ~StreamingState(); void feedAudioContent(const short* buffer, unsigned int buffer_size); char* intermediateDecode(); @@ -100,133 +86,12 @@ struct StreamingState { void processBatch(const vector& buf, unsigned int n_steps); }; -struct ModelState { -#ifndef USE_TFLITE - MemmappedEnv* mmap_env; - Session* session; - GraphDef graph_def; -#else // USE_TFLITE - std::unique_ptr interpreter; - std::unique_ptr fbmodel; -#endif // USE_TFLITE - unsigned int ncep; - unsigned int ncontext; - Alphabet* alphabet; - Scorer* scorer; - unsigned int beam_width; - unsigned int n_steps; - unsigned int n_context; - unsigned int n_features; - unsigned int mfcc_feats_per_timestep; - unsigned int sample_rate; - unsigned int audio_win_len; - unsigned int audio_win_step; - -#ifdef USE_TFLITE - size_t previous_state_size; - std::unique_ptr previous_state_c_; - std::unique_ptr previous_state_h_; - - int input_node_idx; - int previous_state_c_idx; - int previous_state_h_idx; - int input_samples_idx; - - int logits_idx; - int new_state_c_idx; - int new_state_h_idx; - int mfccs_idx; - - std::vector acoustic_exec_plan; - std::vector mfcc_exec_plan; -#endif - - ModelState(); - ~ModelState(); - - /** - * @brief Perform decoding of the logits, using basic CTC decoder or - * CTC decoder with KenLM enabled - * - * @return String representing the decoded text. - */ - char* decode(DecoderState* state); - - /** - * @brief Perform decoding of the logits, using basic CTC decoder or - * CTC decoder with KenLM enabled - * - * @return Vector of Output structs directly from the CTC decoder for additional processing. - */ - vector decode_raw(DecoderState* state); - - /** - * @brief Return character-level metadata including letter timings. - * - * - * @return Metadata struct containing MetadataItem structs for each character. - * The user is responsible for freeing Metadata by calling DS_FreeMetadata(). - */ - Metadata* decode_metadata(DecoderState* state); - - /** - * @brief Do a single inference step in the acoustic model, with: - * input=mfcc - * input_lengths=[n_frames] - * - * @param mfcc batch input data - * @param n_frames number of timesteps in the data - * - * @param[out] output_logits Where to store computed logits. - */ - void infer(const float* mfcc, unsigned int n_frames, vector& logits_output); - - void compute_mfcc(const vector& audio_buffer, vector& mfcc_output); -}; - -ModelState::ModelState() - : -#ifndef USE_TFLITE - mmap_env(nullptr) - , session(nullptr) -#else // USE_TFLITE - interpreter(nullptr) - , fbmodel(nullptr) -#endif // USE_TFLITE - , ncep(0) - , ncontext(0) - , alphabet(nullptr) - , scorer(nullptr) - , beam_width(0) - , n_steps(-1) - , n_context(-1) - , n_features(-1) - , mfcc_feats_per_timestep(-1) - , sample_rate(DEFAULT_SAMPLE_RATE) - , audio_win_len(DEFAULT_WINDOW_LENGTH) - , audio_win_step(DEFAULT_WINDOW_STEP) -#ifdef USE_TFLITE - , previous_state_size(0) - , previous_state_c_(nullptr) - , previous_state_h_(nullptr) -#endif +StreamingState::StreamingState() { } -ModelState::~ModelState() +StreamingState::~StreamingState() { -#ifndef USE_TFLITE - if (session) { - Status status = session->Close(); - if (!status.ok()) { - std::cerr << "Error closing TensorFlow session: " << status << std::endl; - } - } - delete mmap_env; -#endif // USE_TFLITE - - delete scorer; - delete alphabet; } template @@ -243,19 +108,19 @@ StreamingState::feedAudioContent(const short* buffer, { // Consume all the data that was passed in, processing full buffers if needed while (buffer_size > 0) { - while (buffer_size > 0 && audio_buffer.size() < model->audio_win_len) { + while (buffer_size > 0 && audio_buffer_.size() < model_->audio_win_len_) { // Convert i16 sample into f32 float multiplier = 1.0f / (1 << 15); - audio_buffer.push_back((float)(*buffer) * multiplier); + audio_buffer_.push_back((float)(*buffer) * multiplier); ++buffer; --buffer_size; } // If the buffer is full, process and shift it - if (audio_buffer.size() == model->audio_win_len) { - processAudioWindow(audio_buffer); + if (audio_buffer_.size() == model_->audio_win_len_) { + processAudioWindow(audio_buffer_); // Shift data by one step - shift_buffer_left(audio_buffer, model->audio_win_step); + shift_buffer_left(audio_buffer_, model_->audio_win_step_); } // Repeat until buffer empty @@ -265,21 +130,21 @@ StreamingState::feedAudioContent(const short* buffer, char* StreamingState::intermediateDecode() { - return model->decode(decoder_state.get()); + return model_->decode(decoder_state_.get()); } char* StreamingState::finishStream() { finalizeStream(); - return model->decode(decoder_state.get()); + return model_->decode(decoder_state_.get()); } Metadata* StreamingState::finishStreamWithMetadata() { finalizeStream(); - return model->decode_metadata(decoder_state.get()); + return model_->decode_metadata(decoder_state_.get()); } void @@ -287,8 +152,8 @@ StreamingState::processAudioWindow(const vector& buf) { // Compute MFCC features vector mfcc; - mfcc.reserve(model->n_features); - model->compute_mfcc(buf, mfcc); + mfcc.reserve(model_->n_features_); + model_->compute_mfcc(buf, mfcc); pushMfccBuffer(mfcc); } @@ -296,23 +161,23 @@ void StreamingState::finalizeStream() { // Flush audio buffer - processAudioWindow(audio_buffer); + processAudioWindow(audio_buffer_); // Add empty mfcc vectors at end of sample - for (int i = 0; i < model->n_context; ++i) { + for (int i = 0; i < model_->n_context_; ++i) { addZeroMfccWindow(); } // Process final batch - if (batch_buffer.size() > 0) { - processBatch(batch_buffer, batch_buffer.size()/model->mfcc_feats_per_timestep); + if (batch_buffer_.size() > 0) { + processBatch(batch_buffer_, batch_buffer_.size()/model_->mfcc_feats_per_timestep_); } } void StreamingState::addZeroMfccWindow() { - vector zero_buffer(model->n_features, 0.f); + vector zero_buffer(model_->n_features_, 0.f); pushMfccBuffer(zero_buffer); } @@ -332,15 +197,15 @@ StreamingState::pushMfccBuffer(const vector& buf) auto end = buf.end(); while (start != end) { // Copy from input buffer to mfcc_buffer, stopping if we have a full context window - start = copy_up_to_n(start, end, std::back_inserter(mfcc_buffer), - model->mfcc_feats_per_timestep - mfcc_buffer.size()); - assert(mfcc_buffer.size() <= model->mfcc_feats_per_timestep); + start = copy_up_to_n(start, end, std::back_inserter(mfcc_buffer_), + model_->mfcc_feats_per_timestep_ - mfcc_buffer_.size()); + assert(mfcc_buffer_.size() <= model_->mfcc_feats_per_timestep_); // If we have a full context window - if (mfcc_buffer.size() == model->mfcc_feats_per_timestep) { - processMfccWindow(mfcc_buffer); + if (mfcc_buffer_.size() == model_->mfcc_feats_per_timestep_) { + processMfccWindow(mfcc_buffer_); // Shift data by one step of one mfcc feature vector - shift_buffer_left(mfcc_buffer, model->n_features); + shift_buffer_left(mfcc_buffer_, model_->n_features_); } } } @@ -352,14 +217,14 @@ StreamingState::processMfccWindow(const vector& buf) auto end = buf.end(); while (start != end) { // Copy from input buffer to batch_buffer, stopping if we have a full batch - start = copy_up_to_n(start, end, std::back_inserter(batch_buffer), - model->n_steps * model->mfcc_feats_per_timestep - batch_buffer.size()); - assert(batch_buffer.size() <= model->n_steps * model->mfcc_feats_per_timestep); + start = copy_up_to_n(start, end, std::back_inserter(batch_buffer_), + model_->n_steps_ * model_->mfcc_feats_per_timestep_ - batch_buffer_.size()); + assert(batch_buffer_.size() <= model_->n_steps_ * model_->mfcc_feats_per_timestep_); // If we have a full batch - if (batch_buffer.size() == model->n_steps * model->mfcc_feats_per_timestep) { - processBatch(batch_buffer, model->n_steps); - batch_buffer.resize(0); + if (batch_buffer_.size() == model_->n_steps_ * model_->mfcc_feats_per_timestep_) { + processBatch(batch_buffer_, model_->n_steps_); + batch_buffer_.resize(0); } } } @@ -368,272 +233,27 @@ void StreamingState::processBatch(const vector& buf, unsigned int n_steps) { vector logits; - model->infer(buf.data(), n_steps, logits); - + model_->infer(buf.data(), n_steps, logits); + const int cutoff_top_n = 40; const double cutoff_prob = 1.0; - const size_t num_classes = model->alphabet->GetSize() + 1; // +1 for blank - const int n_frames = logits.size() / (BATCH_SIZE * num_classes); + const size_t num_classes = model_->alphabet_->GetSize() + 1; // +1 for blank + const int n_frames = logits.size() / (ModelState::BATCH_SIZE * num_classes); // Convert logits to double vector inputs(logits.begin(), logits.end()); decoder_next(inputs.data(), - *model->alphabet, - decoder_state.get(), + *model_->alphabet_, + decoder_state_.get(), n_frames, num_classes, cutoff_prob, cutoff_top_n, - model->beam_width, - model->scorer); + model_->beam_width_, + model_->scorer_); } -void -ModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) -{ - const size_t num_classes = alphabet->GetSize() + 1; // +1 for blank - -#ifndef USE_TFLITE - Tensor input(DT_FLOAT, TensorShape({BATCH_SIZE, n_steps, 2*n_context+1, n_features})); - - auto input_mapped = input.flat(); - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep; ++i) { - input_mapped(i) = aMfcc[i]; - } - for (; i < n_steps*mfcc_feats_per_timestep; ++i) { - input_mapped(i) = 0.; - } - - Tensor input_lengths(DT_INT32, TensorShape({1})); - input_lengths.scalar()() = n_frames; - - vector outputs; - Status status = session->Run( - {{"input_node", input}, {"input_lengths", input_lengths}}, - {"logits"}, {}, &outputs); - - if (!status.ok()) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - auto logits_mapped = outputs[0].flat(); - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(logits_mapped(t)); - } -#else // USE_TFLITE - // Feeding input_node - float* input_node = interpreter->typed_tensor(input_node_idx); - { - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep; ++i) { - input_node[i] = aMfcc[i]; - } - for (; i < n_steps*mfcc_feats_per_timestep; ++i) { - input_node[i] = 0; - } - } - - assert(previous_state_size > 0); - - // Feeding previous_state_c, previous_state_h - memcpy(interpreter->typed_tensor(previous_state_c_idx), previous_state_c_.get(), sizeof(float) * previous_state_size); - memcpy(interpreter->typed_tensor(previous_state_h_idx), previous_state_h_.get(), sizeof(float) * previous_state_size); - - interpreter->SetExecutionPlan(acoustic_exec_plan); - TfLiteStatus status = interpreter->Invoke(); - if (status != kTfLiteOk) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - float* outputs = interpreter->typed_tensor(logits_idx); - - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(outputs[t]); - } - - memcpy(previous_state_c_.get(), interpreter->typed_tensor(new_state_c_idx), sizeof(float) * previous_state_size); - memcpy(previous_state_h_.get(), interpreter->typed_tensor(new_state_h_idx), sizeof(float) * previous_state_size); -#endif // USE_TFLITE -} - -void -ModelState::compute_mfcc(const vector& samples, vector& mfcc_output) -{ -#ifndef USE_TFLITE - Tensor input(DT_FLOAT, TensorShape({audio_win_len})); - auto input_mapped = input.flat(); - int i; - for (i = 0; i < samples.size(); ++i) { - input_mapped(i) = samples[i]; - } - for (; i < audio_win_len; ++i) { - input_mapped(i) = 0.f; - } - - vector outputs; - Status status = session->Run({{"input_samples", input}}, {"mfccs"}, {}, &outputs); - - if (!status.ok()) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - // The feature computation graph is hardcoded to one audio length for now - const int n_windows = 1; - assert(outputs[0].shape().num_elemements() / n_features == n_windows); - - auto mfcc_mapped = outputs[0].flat(); - for (int i = 0; i < n_windows * n_features; ++i) { - mfcc_output.push_back(mfcc_mapped(i)); - } -#else - // Feeding input_node - float* input_samples = interpreter->typed_tensor(input_samples_idx); - for (int i = 0; i < samples.size(); ++i) { - input_samples[i] = samples[i]; - } - - interpreter->SetExecutionPlan(mfcc_exec_plan); - TfLiteStatus status = interpreter->Invoke(); - if (status != kTfLiteOk) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - // The feature computation graph is hardcoded to one audio length for now - int n_windows = 1; - TfLiteIntArray* out_dims = interpreter->tensor(mfccs_idx)->dims; - int num_elements = 1; - for (int i = 0; i < out_dims->size; ++i) { - num_elements *= out_dims->data[i]; - } - assert(num_elements / n_features == n_windows); - - float* outputs = interpreter->typed_tensor(mfccs_idx); - for (int i = 0; i < n_windows * n_features; ++i) { - mfcc_output.push_back(outputs[i]); - } -#endif -} - -char* -ModelState::decode(DecoderState* state) -{ - vector out = ModelState::decode_raw(state); - return strdup(alphabet->LabelsToString(out[0].tokens).c_str()); -} - -vector -ModelState::decode_raw(DecoderState* state) -{ - vector out = decoder_decode(state, *alphabet, beam_width, scorer); - - return out; -} - -Metadata* -ModelState::decode_metadata(DecoderState* state) -{ - vector out = decode_raw(state); - - std::unique_ptr metadata(new Metadata()); - metadata->num_items = out[0].tokens.size(); - metadata->probability = out[0].probability; - - std::unique_ptr items(new MetadataItem[metadata->num_items]()); - - // Loop through each character - for (int i = 0; i < out[0].tokens.size(); ++i) { - items[i].character = strdup(alphabet->StringFromLabel(out[0].tokens[i]).c_str()); - items[i].timestep = out[0].timesteps[i]; - items[i].start_time = out[0].timesteps[i] * ((float)audio_win_step / sample_rate); - - if (items[i].start_time < 0) { - items[i].start_time = 0; - } - } - - metadata->items = items.release(); - return metadata.release(); -} - -#ifdef USE_TFLITE -int -tflite_get_tensor_by_name(const ModelState* ctx, const vector& list, const char* name) -{ - int rv = -1; - - for (int i = 0; i < list.size(); ++i) { - const string& node_name = ctx->interpreter->tensor(list[i])->name; - if (node_name.compare(string(name)) == 0) { - rv = i; - } - } - - assert(rv >= 0); - return rv; -} - -int -tflite_get_input_tensor_by_name(const ModelState* ctx, const char* name) -{ - return ctx->interpreter->inputs()[tflite_get_tensor_by_name(ctx, ctx->interpreter->inputs(), name)]; -} - -int -tflite_get_output_tensor_by_name(const ModelState* ctx, const char* name) -{ - return ctx->interpreter->outputs()[tflite_get_tensor_by_name(ctx, ctx->interpreter->outputs(), name)]; -} - -void push_back_if_not_present(std::deque& list, int value) { - if (std::find(list.begin(), list.end(), value) == list.end()) { - list.push_back(value); - } -} - -// Backwards BFS on the node DAG. At each iteration we get the next tensor id -// from the frontier list, then for each node which has that tensor id as an -// output, add it to the parent list, and add its input tensors to the frontier -// list. Because we start from the final tensor and work backwards to the inputs, -// the parents list is constructed in reverse, adding elements to its front. -std::vector -tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) -{ - std::deque parents; - std::deque frontier; - frontier.push_back(tensor_id); - while (!frontier.empty()) { - int next_tensor_id = frontier.front(); - frontier.pop_front(); - // Find all nodes that have next_tensor_id as an output - for (int node_id = 0; node_id < interpreter->nodes_size(); ++node_id) { - TfLiteNode node = interpreter->node_and_registration(node_id)->first; - // Search node outputs for the tensor we're looking for - for (int i = 0; i < node.outputs->size; ++i) { - if (node.outputs->data[i] == next_tensor_id) { - // This node is part of the parent tree, add it to the parent list and - // add its input tensors to the frontier list - parents.push_front(node_id); - for (int j = 0; j < node.inputs->size; ++j) { - push_back_if_not_present(frontier, node.inputs->data[j]); - } - } - } - } - } - - return std::vector(parents.begin(), parents.end()); -} - -#endif - int DS_CreateModel(const char* aModelPath, unsigned int aNCep, @@ -642,15 +262,6 @@ DS_CreateModel(const char* aModelPath, unsigned int aBeamWidth, ModelState** retval) { - std::unique_ptr model(new ModelState()); -#ifndef USE_TFLITE - model->mmap_env = new MemmappedEnv(Env::Default()); -#endif // USE_TFLITE - model->ncep = aNCep; - model->ncontext = aNContext; - model->alphabet = new Alphabet(aAlphabetConfigPath); - model->beam_width = aBeamWidth; - *retval = nullptr; DS_PrintVersions(); @@ -661,182 +272,23 @@ DS_CreateModel(const char* aModelPath, } #ifndef USE_TFLITE - Status status; - SessionOptions options; - - bool is_mmap = std::string(aModelPath).find(".pbmm") != std::string::npos; - if (!is_mmap) { - std::cerr << "Warning: reading entire model file into memory. Transform model file into an mmapped graph to reduce heap usage." << std::endl; - } else { - status = model->mmap_env->InitializeFromFile(aModelPath); - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_INIT_MMAP; - } - - options.config.mutable_graph_options() - ->mutable_optimizer_options() - ->set_opt_level(::OptimizerOptions::L0); - options.env = model->mmap_env; - } - - status = NewSession(options, &model->session); - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_INIT_SESS; - } - - if (is_mmap) { - status = ReadBinaryProto(model->mmap_env, - MemmappedFileSystem::kMemmappedPackageDefaultGraphDef, - &model->graph_def); - } else { - status = ReadBinaryProto(Env::Default(), aModelPath, &model->graph_def); - } - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_READ_PROTOBUF; - } - - status = model->session->Create(model->graph_def); - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_CREATE_SESS; - } - - int graph_version = model->graph_def.version(); - if (graph_version < DS_GRAPH_VERSION) { - std::cerr << "Specified model file version (" << graph_version << ") is " - << "incompatible with minimum version supported by this client (" - << DS_GRAPH_VERSION << "). See " - << "https://github.com/mozilla/DeepSpeech/#model-compatibility " - << "for more information" << std::endl; - return DS_ERR_MODEL_INCOMPATIBLE; - } - - for (int i = 0; i < model->graph_def.node_size(); ++i) { - NodeDef node = model->graph_def.node(i); - if (node.name() == "input_node") { - const auto& shape = node.attr().at("shape").shape(); - model->n_steps = shape.dim(1).size(); - model->n_context = (shape.dim(2).size()-1)/2; - model->n_features = shape.dim(3).size(); - model->mfcc_feats_per_timestep = shape.dim(2).size() * shape.dim(3).size(); - } else if (node.name() == "logits_shape") { - Tensor logits_shape = Tensor(DT_INT32, TensorShape({3})); - if (!logits_shape.FromProto(node.attr().at("value").tensor())) { - continue; - } - - int final_dim_size = logits_shape.vec()(2) - 1; - if (final_dim_size != model->alphabet->GetSize()) { - std::cerr << "Error: Alphabet size does not match loaded model: alphabet " - << "has size " << model->alphabet->GetSize() - << ", but model has " << final_dim_size - << " classes in its output. Make sure you're passing an alphabet " - << "file with the same size as the one used for training." - << std::endl; - return DS_ERR_INVALID_ALPHABET; - } - } else if (node.name() == "model_metadata") { - int sample_rate = node.attr().at("sample_rate").i(); - model->sample_rate = sample_rate; - int win_len_ms = node.attr().at("feature_win_len").i(); - int win_step_ms = node.attr().at("feature_win_step").i(); - model->audio_win_len = sample_rate * (win_len_ms / 1000.0); - model->audio_win_step = sample_rate * (win_step_ms / 1000.0); - } - } - - if (model->n_context == -1 || model->n_features == -1) { - std::cerr << "Error: Could not infer input shape from model file. " - << "Make sure input_node is a 4D tensor with shape " - << "[batch_size=1, time, window_size, n_features]." - << std::endl; - return DS_ERR_INVALID_SHAPE; - } - - *retval = model.release(); - return DS_ERR_OK; -#else // USE_TFLITE - model->fbmodel = tflite::FlatBufferModel::BuildFromFile(aModelPath); - if (!model->fbmodel) { - std::cerr << "Error at reading model file " << aModelPath << std::endl; - return DS_ERR_FAIL_INIT_MMAP; - } - - - tflite::ops::builtin::BuiltinOpResolver resolver; - tflite::InterpreterBuilder(*model->fbmodel, resolver)(&model->interpreter); - if (!model->interpreter) { - std::cerr << "Error at InterpreterBuilder for model file " << aModelPath << std::endl; - return DS_ERR_FAIL_INTERPRETER; - } - - model->interpreter->AllocateTensors(); - model->interpreter->SetNumThreads(4); - - // Query all the index once - model->input_node_idx = tflite_get_input_tensor_by_name(model.get(), "input_node"); - model->previous_state_c_idx = tflite_get_input_tensor_by_name(model.get(), "previous_state_c"); - model->previous_state_h_idx = tflite_get_input_tensor_by_name(model.get(), "previous_state_h"); - model->input_samples_idx = tflite_get_input_tensor_by_name(model.get(), "input_samples"); - model->logits_idx = tflite_get_output_tensor_by_name(model.get(), "logits"); - model->new_state_c_idx = tflite_get_output_tensor_by_name(model.get(), "new_state_c"); - model->new_state_h_idx = tflite_get_output_tensor_by_name(model.get(), "new_state_h"); - model->mfccs_idx = tflite_get_output_tensor_by_name(model.get(), "mfccs"); - - // When we call Interpreter::Invoke, the whole graph is executed by default, - // which means every time compute_mfcc is called the entire acoustic model is - // also executed. To workaround that problem, we walk up the dependency DAG - // from the mfccs output tensor to find all the relevant nodes required for - // feature computation, building an execution plan that runs just those nodes. - auto mfcc_plan = tflite_find_parent_node_ids(model->interpreter.get(), model->mfccs_idx); - auto orig_plan = model->interpreter->execution_plan(); - - // Remove MFCC nodes from original plan (all nodes) to create the acoustic model plan - auto erase_begin = std::remove_if(orig_plan.begin(), orig_plan.end(), [&mfcc_plan](int elem) { - return std::find(mfcc_plan.begin(), mfcc_plan.end(), elem) != mfcc_plan.end(); - }); - orig_plan.erase(erase_begin, orig_plan.end()); - - model->acoustic_exec_plan = std::move(orig_plan); - model->mfcc_exec_plan = std::move(mfcc_plan); - - TfLiteIntArray* dims_input_node = model->interpreter->tensor(model->input_node_idx)->dims; - - model->n_steps = dims_input_node->data[1]; - model->n_context = (dims_input_node->data[2] - 1 ) / 2; - model->n_features = dims_input_node->data[3]; - model->mfcc_feats_per_timestep = dims_input_node->data[2] * dims_input_node->data[3]; - - TfLiteIntArray* dims_logits = model->interpreter->tensor(model->logits_idx)->dims; - const int final_dim_size = dims_logits->data[1] - 1; - if (final_dim_size != model->alphabet->GetSize()) { - std::cerr << "Error: Alphabet size does not match loaded model: alphabet " - << "has size " << model->alphabet->GetSize() - << ", but model has " << final_dim_size - << " classes in its output. Make sure you're passing an alphabet " - << "file with the same size as the one used for training." - << std::endl; - return DS_ERR_INVALID_ALPHABET; - } - - TfLiteIntArray* dims_c = model->interpreter->tensor(model->previous_state_c_idx)->dims; - TfLiteIntArray* dims_h = model->interpreter->tensor(model->previous_state_h_idx)->dims; - assert(dims_c->data[1] == dims_h->data[1]); - - model->previous_state_size = dims_c->data[1]; - model->previous_state_c_.reset(new float[model->previous_state_size]()); - model->previous_state_h_.reset(new float[model->previous_state_size]()); - - // Set initial values for previous_state_c and previous_state_h - memset(model->previous_state_c_.get(), 0, sizeof(float) * model->previous_state_size); - memset(model->previous_state_h_.get(), 0, sizeof(float) * model->previous_state_size); - - *retval = model.release(); - return DS_ERR_OK; + std::unique_ptr model(new TFModelState()); +#else + std::unique_ptr model(new TFLiteModelState()); #endif // USE_TFLITE + + if (!model) { + std::cerr << "Could not allocate model state." << std::endl; + return DS_ERR_FAIL_CREATE_MODEL; + } + + int err = model->init(aModelPath, aNCep, aNContext, aAlphabetConfigPath, aBeamWidth); + if (err != DS_ERR_OK) { + return err; + } + + *retval = model.release(); + return DS_ERR_OK; } void @@ -854,10 +306,10 @@ DS_EnableDecoderWithLM(ModelState* aCtx, float aLMBeta) { try { - aCtx->scorer = new Scorer(aLMAlpha, aLMBeta, - aLMPath ? aLMPath : "", - aTriePath ? aTriePath : "", - *aCtx->alphabet); + aCtx->scorer_ = new Scorer(aLMAlpha, aLMBeta, + aLMPath ? aLMPath : "", + aTriePath ? aTriePath : "", + *aCtx->alphabet_); return DS_ERR_OK; } catch (...) { return DS_ERR_INVALID_LM; @@ -872,13 +324,10 @@ DS_SetupStream(ModelState* aCtx, { *retval = nullptr; -#ifndef USE_TFLITE - Status status = aCtx->session->Run({}, {}, {"initialize_state"}, nullptr); - if (!status.ok()) { - std::cerr << "Error running session: " << status << std::endl; - return DS_ERR_FAIL_RUN_SESS; + int err = aCtx->initialize_state(); + if (err != DS_ERR_OK) { + return err; } -#endif // USE_TFLITE std::unique_ptr ctx(new StreamingState()); if (!ctx) { @@ -886,27 +335,20 @@ DS_SetupStream(ModelState* aCtx, return DS_ERR_FAIL_CREATE_STREAM; } - const size_t num_classes = aCtx->alphabet->GetSize() + 1; // +1 for blank + const size_t num_classes = aCtx->alphabet_->GetSize() + 1; // +1 for blank // Default initial allocation = 3 seconds. if (aPreAllocFrames == 0) { aPreAllocFrames = 150; } - ctx->audio_buffer.reserve(aCtx->audio_win_len); - ctx->mfcc_buffer.reserve(aCtx->mfcc_feats_per_timestep); - ctx->mfcc_buffer.resize(aCtx->n_features*aCtx->n_context, 0.f); - ctx->batch_buffer.reserve(aCtx->n_steps * aCtx->mfcc_feats_per_timestep); + ctx->audio_buffer_.reserve(aCtx->audio_win_len_); + ctx->mfcc_buffer_.reserve(aCtx->mfcc_feats_per_timestep_); + ctx->mfcc_buffer_.resize(aCtx->n_features_*aCtx->n_context_, 0.f); + ctx->batch_buffer_.reserve(aCtx->n_steps_ * aCtx->mfcc_feats_per_timestep_); + ctx->model_ = aCtx; - ctx->model = aCtx; - -#ifdef USE_TFLITE - /* Ensure previous_state_{c,h} are not holding previous stream value */ - memset(ctx->model->previous_state_c_.get(), 0, sizeof(float) * ctx->model->previous_state_size); - memset(ctx->model->previous_state_h_.get(), 0, sizeof(float) * ctx->model->previous_state_size); -#endif // USE_TFLITE - - ctx->decoder_state.reset(decoder_init(*aCtx->alphabet, num_classes, aCtx->scorer)); + ctx->decoder_state_.reset(decoder_init(*aCtx->alphabet_, num_classes, aCtx->scorer_)); *retval = ctx.release(); return DS_ERR_OK; @@ -1012,4 +454,3 @@ DS_PrintVersions() { LOGD("DeepSpeech: %s", ds_git_version()); #endif } - diff --git a/native_client/deepspeech.h b/native_client/deepspeech.h index 2f4637ce..b40da606 100644 --- a/native_client/deepspeech.h +++ b/native_client/deepspeech.h @@ -52,6 +52,7 @@ enum DeepSpeech_Error_Codes DS_ERR_FAIL_CREATE_STREAM = 0x3004, DS_ERR_FAIL_READ_PROTOBUF = 0x3005, DS_ERR_FAIL_CREATE_SESS = 0x3006, + DS_ERR_FAIL_CREATE_MODEL = 0x3007, }; /** diff --git a/native_client/modelstate.cc b/native_client/modelstate.cc new file mode 100644 index 00000000..c3fda2b9 --- /dev/null +++ b/native_client/modelstate.cc @@ -0,0 +1,81 @@ +#include + +#include "ctcdecode/ctc_beam_search_decoder.h" + +#include "modelstate.h" + +using std::vector; + +ModelState::ModelState() + : alphabet_(nullptr) + , scorer_(nullptr) + , beam_width_(-1) + , n_steps_(-1) + , n_context_(-1) + , n_features_(-1) + , mfcc_feats_per_timestep_(-1) + , sample_rate_(DEFAULT_SAMPLE_RATE) + , audio_win_len_(DEFAULT_WINDOW_LENGTH) + , audio_win_step_(DEFAULT_WINDOW_STEP) +{ +} + +ModelState::~ModelState() +{ + delete scorer_; + delete alphabet_; +} + +int +ModelState::init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) +{ + n_features_ = n_features; + n_context_ = n_context; + alphabet_ = new Alphabet(alphabet_path); + beam_width_ = beam_width; + return DS_ERR_OK; +} + +vector +ModelState::decode_raw(DecoderState* state) +{ + vector out = decoder_decode(state, *alphabet_, beam_width_, scorer_); + return out; +} + +char* +ModelState::decode(DecoderState* state) +{ + vector out = decode_raw(state); + return strdup(alphabet_->LabelsToString(out[0].tokens).c_str()); +} + +Metadata* +ModelState::decode_metadata(DecoderState* state) +{ + vector out = decode_raw(state); + + std::unique_ptr metadata(new Metadata()); + metadata->num_items = out[0].tokens.size(); + metadata->probability = out[0].probability; + + std::unique_ptr items(new MetadataItem[metadata->num_items]()); + + // Loop through each character + for (int i = 0; i < out[0].tokens.size(); ++i) { + items[i].character = strdup(alphabet_->StringFromLabel(out[0].tokens[i]).c_str()); + items[i].timestep = out[0].timesteps[i]; + items[i].start_time = out[0].timesteps[i] * ((float)audio_win_step_ / sample_rate_); + + if (items[i].start_time < 0) { + items[i].start_time = 0; + } + } + + metadata->items = items.release(); + return metadata.release(); +} diff --git a/native_client/modelstate.h b/native_client/modelstate.h new file mode 100644 index 00000000..7f53c63e --- /dev/null +++ b/native_client/modelstate.h @@ -0,0 +1,88 @@ +#ifndef MODELSTATE_H +#define MODELSTATE_H + +#include + +#include "deepspeech.h" +#include "alphabet.h" + +#include "ctcdecode/scorer.h" +#include "ctcdecode/output.h" +#include "ctcdecode/decoderstate.h" + +struct ModelState { + //TODO: infer batch size from model/use dynamic batch size + static constexpr unsigned int BATCH_SIZE = 1; + + static constexpr unsigned int DEFAULT_SAMPLE_RATE = 16000; + static constexpr unsigned int DEFAULT_WINDOW_LENGTH = DEFAULT_SAMPLE_RATE * 0.032; + static constexpr unsigned int DEFAULT_WINDOW_STEP = DEFAULT_SAMPLE_RATE * 0.02; + + Alphabet* alphabet_; + Scorer* scorer_; + unsigned int beam_width_; + unsigned int n_steps_; + unsigned int n_context_; + unsigned int n_features_; + unsigned int mfcc_feats_per_timestep_; + unsigned int sample_rate_; + unsigned int audio_win_len_; + unsigned int audio_win_step_; + + ModelState(); + virtual ~ModelState(); + + virtual int init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width); + + virtual int initialize_state() = 0; + + virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) = 0; + + /** + * @brief Do a single inference step in the acoustic model, with: + * input=mfcc + * input_lengths=[n_frames] + * + * @param mfcc batch input data + * @param n_frames number of timesteps in the data + * + * @param[out] output_logits Where to store computed logits. + */ + virtual void infer(const float* mfcc, unsigned int n_frames, std::vector& logits_output) = 0; + + /** + * @brief Perform decoding of the logits, using basic CTC decoder or + * CTC decoder with KenLM enabled + * + * @param state Decoder state to use when decoding. + * + * @return Vector of Output structs directly from the CTC decoder for additional processing. + */ + virtual std::vector decode_raw(DecoderState* state); + + /** + * @brief Perform decoding of the logits, using basic CTC decoder or + * CTC decoder with KenLM enabled + * + * @param state Decoder state to use when decoding. + * + * @return String representing the decoded text. + */ + virtual char* decode(DecoderState* state); + + /** + * @brief Return character-level metadata including letter timings. + * + * @param state Decoder state to use when decoding. + * + * @return Metadata struct containing MetadataItem structs for each character. + * The user is responsible for freeing Metadata by calling DS_FreeMetadata(). + */ + virtual Metadata* decode_metadata(DecoderState* state); +}; + +#endif // MODELSTATE_H diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc new file mode 100644 index 00000000..f1e97539 --- /dev/null +++ b/native_client/tflitemodelstate.cc @@ -0,0 +1,258 @@ +#include "tflitemodelstate.h" + +using namespace tflite; +using std::vector; + +int +tflite_get_tensor_by_name(const Interpreter* interpreter, + const vector& list, + const char* name) +{ + int rv = -1; + + for (int i = 0; i < list.size(); ++i) { + const string& node_name = interpreter->tensor(list[i])->name; + if (node_name.compare(string(name)) == 0) { + rv = i; + } + } + + assert(rv >= 0); + return rv; +} + +int +tflite_get_input_tensor_by_name(const Interpreter* interpreter, const char* name) +{ + int idx = tflite_get_tensor_by_name(interpreter, interpreter->inputs(), name); + return interpreter->inputs()[idx]; +} + +int +tflite_get_output_tensor_by_name(const Interpreter* interpreter, const char* name) +{ + int idx = tflite_get_tensor_by_name(interpreter, interpreter->outputs(), name); + return interpreter->outputs()[idx]; +} + +void push_back_if_not_present(std::deque& list, int value) +{ + if (std::find(list.begin(), list.end(), value) == list.end()) { + list.push_back(value); + } +} + +// Backwards BFS on the node DAG. At each iteration we get the next tensor id +// from the frontier list, then for each node which has that tensor id as an +// output, add it to the parent list, and add its input tensors to the frontier +// list. Because we start from the final tensor and work backwards to the inputs, +// the parents list is constructed in reverse, adding elements to its front. +std::vector +tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) +{ + std::deque parents; + std::deque frontier; + frontier.push_back(tensor_id); + while (!frontier.empty()) { + int next_tensor_id = frontier.front(); + frontier.pop_front(); + // Find all nodes that have next_tensor_id as an output + for (int node_id = 0; node_id < interpreter->nodes_size(); ++node_id) { + TfLiteNode node = interpreter->node_and_registration(node_id)->first; + // Search node outputs for the tensor we're looking for + for (int i = 0; i < node.outputs->size; ++i) { + if (node.outputs->data[i] == next_tensor_id) { + // This node is part of the parent tree, add it to the parent list and + // add its input tensors to the frontier list + parents.push_front(node_id); + for (int j = 0; j < node.inputs->size; ++j) { + push_back_if_not_present(frontier, node.inputs->data[j]); + } + } + } + } + } + + return std::vector(parents.begin(), parents.end()); +} + +TFLiteModelState::TFLiteModelState() + : ModelState() + , interpreter_(nullptr) + , fbmodel_(nullptr) + , previous_state_size_(0) + , previous_state_c_(nullptr) + , previous_state_h_(nullptr) +{ +} + +int +TFLiteModelState::init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) +{ + int err = ModelState::init(model_path, n_features, n_context, alphabet_path, beam_width); + if (err != DS_ERR_OK) { + return err; + } + + fbmodel_ = tflite::FlatBufferModel::BuildFromFile(model_path); + if (!fbmodel_) { + std::cerr << "Error at reading model file " << model_path << std::endl; + return DS_ERR_FAIL_INIT_MMAP; + } + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder(*fbmodel_, resolver)(&interpreter_); + if (!interpreter_) { + std::cerr << "Error at InterpreterBuilder for model file " << model_path << std::endl; + return DS_ERR_FAIL_INTERPRETER; + } + + interpreter_->AllocateTensors(); + interpreter_->SetNumThreads(4); + + // Query all the index once + input_node_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_node"); + previous_state_c_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_c"); + previous_state_h_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_h"); + input_samples_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_samples"); + logits_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "logits"); + new_state_c_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_c"); + new_state_h_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_h"); + mfccs_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "mfccs"); + + // When we call Interpreter::Invoke, the whole graph is executed by default, + // which means every time compute_mfcc is called the entire acoustic model is + // also executed. To workaround that problem, we walk up the dependency DAG + // from the mfccs output tensor to find all the relevant nodes required for + // feature computation, building an execution plan that runs just those nodes. + auto mfcc_plan = tflite_find_parent_node_ids(interpreter_.get(), mfccs_idx_); + auto orig_plan = interpreter_->execution_plan(); + + // Remove MFCC nodes from original plan (all nodes) to create the acoustic model plan + auto erase_begin = std::remove_if(orig_plan.begin(), orig_plan.end(), [&mfcc_plan](int elem) { + return std::find(mfcc_plan.begin(), mfcc_plan.end(), elem) != mfcc_plan.end(); + }); + orig_plan.erase(erase_begin, orig_plan.end()); + + acoustic_exec_plan_ = std::move(orig_plan); + mfcc_exec_plan_ = std::move(mfcc_plan); + + TfLiteIntArray* dims_input_node = interpreter_->tensor(input_node_idx_)->dims; + + n_steps_ = dims_input_node->data[1]; + n_context_ = (dims_input_node->data[2] - 1) / 2; + n_features_ = dims_input_node->data[3]; + mfcc_feats_per_timestep_ = dims_input_node->data[2] * dims_input_node->data[3]; + + TfLiteIntArray* dims_logits = interpreter_->tensor(logits_idx_)->dims; + const int final_dim_size = dims_logits->data[1] - 1; + if (final_dim_size != alphabet_->GetSize()) { + std::cerr << "Error: Alphabet size does not match loaded model: alphabet " + << "has size " << alphabet_->GetSize() + << ", but model has " << final_dim_size + << " classes in its output. Make sure you're passing an alphabet " + << "file with the same size as the one used for training." + << std::endl; + return DS_ERR_INVALID_ALPHABET; + } + + TfLiteIntArray* dims_c = interpreter_->tensor(previous_state_c_idx_)->dims; + TfLiteIntArray* dims_h = interpreter_->tensor(previous_state_h_idx_)->dims; + assert(dims_c->data[1] == dims_h->data[1]); + + previous_state_size_ = dims_c->data[1]; + previous_state_c_.reset(new float[previous_state_size_]()); + previous_state_h_.reset(new float[previous_state_size_]()); + + // Set initial values for previous_state_c and previous_state_h + memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); + memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); + + return DS_ERR_OK; +} + +int +TFLiteModelState::initialize_state() +{ + /* Ensure previous_state_{c,h} are not holding previous stream value */ + memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); + memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); + + return DS_ERR_OK; +} + +void +TFLiteModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +{ + const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank + + // Feeding input_node + float* input_node = interpreter_->typed_tensor(input_node_idx_); + { + int i; + for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { + input_node[i] = aMfcc[i]; + } + for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { + input_node[i] = 0; + } + } + + assert(previous_state_size_ > 0); + + // Feeding previous_state_c, previous_state_h + memcpy(interpreter_->typed_tensor(previous_state_c_idx_), previous_state_c_.get(), sizeof(float) * previous_state_size_); + memcpy(interpreter_->typed_tensor(previous_state_h_idx_), previous_state_h_.get(), sizeof(float) * previous_state_size_); + + interpreter_->SetExecutionPlan(acoustic_exec_plan_); + TfLiteStatus status = interpreter_->Invoke(); + if (status != kTfLiteOk) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + float* outputs = interpreter_->typed_tensor(logits_idx_); + + // The CTCDecoder works with log-probs. + for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { + logits_output.push_back(outputs[t]); + } + + memcpy(previous_state_c_.get(), interpreter_->typed_tensor(new_state_c_idx_), sizeof(float) * previous_state_size_); + memcpy(previous_state_h_.get(), interpreter_->typed_tensor(new_state_h_idx_), sizeof(float) * previous_state_size_); +} + +void +TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc_output) +{ + // Feeding input_node + float* input_samples = interpreter_->typed_tensor(input_samples_idx_); + for (int i = 0; i < samples.size(); ++i) { + input_samples[i] = samples[i]; + } + + interpreter_->SetExecutionPlan(mfcc_exec_plan_); + TfLiteStatus status = interpreter_->Invoke(); + if (status != kTfLiteOk) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + // The feature computation graph is hardcoded to one audio length for now + int n_windows = 1; + TfLiteIntArray* out_dims = interpreter_->tensor(mfccs_idx_)->dims; + int num_elements = 1; + for (int i = 0; i < out_dims->size; ++i) { + num_elements *= out_dims->data[i]; + } + assert(num_elements / n_features_ == n_windows); + + float* outputs = interpreter_->typed_tensor(mfccs_idx_); + for (int i = 0; i < n_windows * n_features_; ++i) { + mfcc_output.push_back(outputs[i]); + } +} diff --git a/native_client/tflitemodelstate.h b/native_client/tflitemodelstate.h new file mode 100644 index 00000000..de02074d --- /dev/null +++ b/native_client/tflitemodelstate.h @@ -0,0 +1,51 @@ +#ifndef TFLITEMODELSTATE_H +#define TFLITEMODELSTATE_H + +#include +#include + +#include "tensorflow/lite/model.h" +#include "tensorflow/lite/kernels/register.h" + +#include "modelstate.h" + +struct TFLiteModelState : public ModelState +{ + std::unique_ptr interpreter_; + std::unique_ptr fbmodel_; + + size_t previous_state_size_; + std::unique_ptr previous_state_c_; + std::unique_ptr previous_state_h_; + + int input_node_idx_; + int previous_state_c_idx_; + int previous_state_h_idx_; + int input_samples_idx_; + + int logits_idx_; + int new_state_c_idx_; + int new_state_h_idx_; + int mfccs_idx_; + + std::vector acoustic_exec_plan_; + std::vector mfcc_exec_plan_; + + TFLiteModelState(); + + virtual int init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) override; + + virtual int initialize_state() override; + + virtual void compute_mfcc(const std::vector& audio_buffer, + std::vector& mfcc_output) override; + + virtual void infer(const float* mfcc, unsigned int n_frames, + std::vector& logits_output) override; +}; + +#endif // TFLITEMODELSTATE_H diff --git a/native_client/tfmodelstate.cc b/native_client/tfmodelstate.cc new file mode 100644 index 00000000..866775e4 --- /dev/null +++ b/native_client/tfmodelstate.cc @@ -0,0 +1,214 @@ +#include "tfmodelstate.h" + +#include "ds_graph_version.h" + +using namespace tensorflow; +using std::vector; + +TFModelState::TFModelState() + : ModelState() + , mmap_env_(nullptr) + , session_(nullptr) +{ +} + +TFModelState::~TFModelState() +{ + if (session_) { + Status status = session_->Close(); + if (!status.ok()) { + std::cerr << "Error closing TensorFlow session: " << status << std::endl; + } + } + delete mmap_env_; +} + +int +TFModelState::init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) +{ + int err = ModelState::init(model_path, n_features, n_context, alphabet_path, beam_width); + if (err != DS_ERR_OK) { + return err; + } + + Status status; + SessionOptions options; + + mmap_env_ = new MemmappedEnv(Env::Default()); + + bool is_mmap = std::string(model_path).find(".pbmm") != std::string::npos; + if (!is_mmap) { + std::cerr << "Warning: reading entire model file into memory. Transform model file into an mmapped graph to reduce heap usage." << std::endl; + } else { + status = mmap_env_->InitializeFromFile(model_path); + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_INIT_MMAP; + } + + options.config.mutable_graph_options() + ->mutable_optimizer_options() + ->set_opt_level(::OptimizerOptions::L0); + options.env = mmap_env_; + } + + status = NewSession(options, &session_); + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_INIT_SESS; + } + + if (is_mmap) { + status = ReadBinaryProto(mmap_env_, + MemmappedFileSystem::kMemmappedPackageDefaultGraphDef, + &graph_def_); + } else { + status = ReadBinaryProto(Env::Default(), model_path, &graph_def_); + } + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_READ_PROTOBUF; + } + + status = session_->Create(graph_def_); + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_CREATE_SESS; + } + + int graph_version = graph_def_.version(); + if (graph_version < DS_GRAPH_VERSION) { + std::cerr << "Specified model file version (" << graph_version << ") is " + << "incompatible with minimum version supported by this client (" + << DS_GRAPH_VERSION << "). See " + << "https://github.com/mozilla/DeepSpeech/#model-compatibility " + << "for more information" << std::endl; + return DS_ERR_MODEL_INCOMPATIBLE; + } + + for (int i = 0; i < graph_def_.node_size(); ++i) { + NodeDef node = graph_def_.node(i); + if (node.name() == "input_node") { + const auto& shape = node.attr().at("shape").shape(); + n_steps_ = shape.dim(1).size(); + n_context_ = (shape.dim(2).size()-1)/2; + n_features_ = shape.dim(3).size(); + mfcc_feats_per_timestep_ = shape.dim(2).size() * shape.dim(3).size(); + } else if (node.name() == "logits_shape") { + Tensor logits_shape = Tensor(DT_INT32, TensorShape({3})); + if (!logits_shape.FromProto(node.attr().at("value").tensor())) { + continue; + } + + int final_dim_size = logits_shape.vec()(2) - 1; + if (final_dim_size != alphabet_->GetSize()) { + std::cerr << "Error: Alphabet size does not match loaded model: alphabet " + << "has size " << alphabet_->GetSize() + << ", but model has " << final_dim_size + << " classes in its output. Make sure you're passing an alphabet " + << "file with the same size as the one used for training." + << std::endl; + return DS_ERR_INVALID_ALPHABET; + } + } else if (node.name() == "model_metadata") { + sample_rate_ = node.attr().at("sample_rate").i(); + int win_len_ms = node.attr().at("feature_win_len").i(); + int win_step_ms = node.attr().at("feature_win_step").i(); + audio_win_len_ = sample_rate_ * (win_len_ms / 1000.0); + audio_win_step_ = sample_rate_ * (win_step_ms / 1000.0); + } + } + + if (n_context_ == -1 || n_features_ == -1) { + std::cerr << "Error: Could not infer input shape from model file. " + << "Make sure input_node is a 4D tensor with shape " + << "[batch_size=1, time, window_size, n_features]." + << std::endl; + return DS_ERR_INVALID_SHAPE; + } + + return DS_ERR_OK; +} + +int +TFModelState::initialize_state() +{ + Status status = session_->Run({}, {}, {"initialize_state"}, nullptr); + if (!status.ok()) { + std::cerr << "Error running session: " << status << std::endl; + return DS_ERR_FAIL_RUN_SESS; + } + + return DS_ERR_OK; +} + +void +TFModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +{ + const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank + + Tensor input(DT_FLOAT, TensorShape({BATCH_SIZE, n_steps_, 2*n_context_+1, n_features_})); + + auto input_mapped = input.flat(); + int i; + for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { + input_mapped(i) = aMfcc[i]; + } + for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { + input_mapped(i) = 0.; + } + + Tensor input_lengths(DT_INT32, TensorShape({1})); + input_lengths.scalar()() = n_frames; + + vector outputs; + Status status = session_->Run( + {{"input_node", input}, {"input_lengths", input_lengths}}, + {"logits"}, {}, &outputs); + + if (!status.ok()) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + auto logits_mapped = outputs[0].flat(); + // The CTCDecoder works with log-probs. + for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { + logits_output.push_back(logits_mapped(t)); + } +} + +void +TFModelState::compute_mfcc(const vector& samples, vector& mfcc_output) +{ + Tensor input(DT_FLOAT, TensorShape({audio_win_len_})); + auto input_mapped = input.flat(); + int i; + for (i = 0; i < samples.size(); ++i) { + input_mapped(i) = samples[i]; + } + for (; i < audio_win_len_; ++i) { + input_mapped(i) = 0.f; + } + + vector outputs; + Status status = session_->Run({{"input_samples", input}}, {"mfccs"}, {}, &outputs); + + if (!status.ok()) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + // The feature computation graph is hardcoded to one audio length for now + const int n_windows = 1; + assert(outputs[0].shape().num_elements() / n_features_ == n_windows); + + auto mfcc_mapped = outputs[0].flat(); + for (int i = 0; i < n_windows * n_features_; ++i) { + mfcc_output.push_back(mfcc_mapped(i)); + } +} diff --git a/native_client/tfmodelstate.h b/native_client/tfmodelstate.h new file mode 100644 index 00000000..c3dc7708 --- /dev/null +++ b/native_client/tfmodelstate.h @@ -0,0 +1,37 @@ +#ifndef TFMODELSTATE_H +#define TFMODELSTATE_H + +#include + +#include "tensorflow/core/public/session.h" +#include "tensorflow/core/platform/env.h" +#include "tensorflow/core/util/memmapped_file_system.h" + +#include "modelstate.h" + +struct TFModelState : public ModelState +{ + tensorflow::MemmappedEnv* mmap_env_; + tensorflow::Session* session_; + tensorflow::GraphDef graph_def_; + + TFModelState(); + virtual ~TFModelState(); + + virtual int init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) override; + + virtual int initialize_state() override; + + virtual void infer(const float* mfcc, + unsigned int n_frames, + std::vector& logits_output) override; + + virtual void compute_mfcc(const std::vector& audio_buffer, + std::vector& mfcc_output) override; +}; + +#endif // TFMODELSTATE_H From 6e78bac799362516f029708c581e07de2c8fcf08 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Tue, 4 Jun 2019 18:11:37 -0300 Subject: [PATCH 2/6] Address review comments --- native_client/deepspeech.cc | 8 +++++--- native_client/tflitemodelstate.cc | 16 +++++++++++++--- native_client/tflitemodelstate.h | 1 + 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/native_client/deepspeech.cc b/native_client/deepspeech.cc index 9955cf86..1ee22d58 100644 --- a/native_client/deepspeech.cc +++ b/native_client/deepspeech.cc @@ -271,11 +271,13 @@ DS_CreateModel(const char* aModelPath, return DS_ERR_NO_MODEL; } + std::unique_ptr model( #ifndef USE_TFLITE - std::unique_ptr model(new TFModelState()); + new TFModelState() #else - std::unique_ptr model(new TFLiteModelState()); -#endif // USE_TFLITE + new TFLiteModelState() +#endif + ); if (!model) { std::cerr << "Could not allocate model state." << std::endl; diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index f1e97539..92d9c014 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -35,7 +35,8 @@ tflite_get_output_tensor_by_name(const Interpreter* interpreter, const char* nam return interpreter->outputs()[idx]; } -void push_back_if_not_present(std::deque& list, int value) +void +push_back_if_not_present(std::deque& list, int value) { if (std::find(list.begin(), list.end(), value) == list.end()) { list.push_back(value); @@ -86,6 +87,10 @@ TFLiteModelState::TFLiteModelState() { } +TFLiteModelState::~TFLiteModelState() +{ +} + int TFLiteModelState::init(const char* model_path, unsigned int n_features, @@ -235,8 +240,13 @@ TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc input_samples[i] = samples[i]; } - interpreter_->SetExecutionPlan(mfcc_exec_plan_); - TfLiteStatus status = interpreter_->Invoke(); + TfLiteStatus status = interpreter_->SetExecutionPlan(mfcc_exec_plan_); + if (status != kTfLiteOk) { + std::cerr << "Error setting execution plan: " << status << "\n"; + return; + } + + status = interpreter_->Invoke(); if (status != kTfLiteOk) { std::cerr << "Error running session: " << status << "\n"; return; diff --git a/native_client/tflitemodelstate.h b/native_client/tflitemodelstate.h index de02074d..ee5bfb6a 100644 --- a/native_client/tflitemodelstate.h +++ b/native_client/tflitemodelstate.h @@ -32,6 +32,7 @@ struct TFLiteModelState : public ModelState std::vector mfcc_exec_plan_; TFLiteModelState(); + virtual ~TFLiteModelState(); virtual int init(const char* model_path, unsigned int n_features, From e51b9d987d162bd4cbad0a0c94295ae7809ea086 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Thu, 6 Jun 2019 16:40:19 -0300 Subject: [PATCH 3/6] Remove previous state model variable, track by hand in StreamingState instead --- DeepSpeech.py | 86 +++++++---------- GRAPH_VERSION | 2 +- native_client/BUILD | 14 +-- native_client/deepspeech.cc | 18 ++-- native_client/modelstate.cc | 1 + native_client/modelstate.h | 11 ++- native_client/tflitemodelstate.cc | 150 +++++++++++++++--------------- native_client/tflitemodelstate.h | 27 ++++-- native_client/tfmodelstate.cc | 96 +++++++++++-------- native_client/tfmodelstate.h | 10 +- 10 files changed, 213 insertions(+), 202 deletions(-) diff --git a/DeepSpeech.py b/DeepSpeech.py index 1883724d..7e92e202 100755 --- a/DeepSpeech.py +++ b/DeepSpeech.py @@ -574,12 +574,8 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): # no state management since n_step is expected to be dynamic too (see below) previous_state = previous_state_c = previous_state_h = None else: - if tflite: - previous_state_c = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_c') - previous_state_h = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_h') - else: - previous_state_c = variable_on_cpu('previous_state_c', [batch_size, Config.n_cell_dim], initializer=None) - previous_state_h = variable_on_cpu('previous_state_h', [batch_size, Config.n_cell_dim], initializer=None) + previous_state_c = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_c') + previous_state_h = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_h') previous_state = tf.contrib.rnn.LSTMStateTuple(previous_state_c, previous_state_h) @@ -605,7 +601,7 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): logits = tf.squeeze(logits, [1]) # Apply softmax for CTC decoder - logits = tf.nn.softmax(logits) + logits = tf.nn.softmax(logits, name='logits') if batch_size <= 0: if tflite: @@ -618,51 +614,31 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): 'input_lengths': seq_length, }, { - 'outputs': tf.identity(logits, name='logits'), + 'outputs': logits, }, layers ) new_state_c, new_state_h = layers['rnn_output_state'] - if tflite: - logits = tf.identity(logits, name='logits') - new_state_c = tf.identity(new_state_c, name='new_state_c') - new_state_h = tf.identity(new_state_h, name='new_state_h') + new_state_c = tf.identity(new_state_c, name='new_state_c') + new_state_h = tf.identity(new_state_h, name='new_state_h') - inputs = { - 'input': input_tensor, - 'previous_state_c': previous_state_c, - 'previous_state_h': previous_state_h, - 'input_samples': input_samples, - } + inputs = { + 'input': input_tensor, + 'previous_state_c': previous_state_c, + 'previous_state_h': previous_state_h, + 'input_samples': input_samples, + } - if FLAGS.use_seq_length: - inputs.update({'input_lengths': seq_length}) + if FLAGS.use_seq_length: + inputs.update({'input_lengths': seq_length}) - outputs = { - 'outputs': logits, - 'new_state_c': new_state_c, - 'new_state_h': new_state_h, - 'mfccs': mfccs, - } - else: - zero_state = tf.zeros([batch_size, Config.n_cell_dim], tf.float32) - initialize_c = tf.assign(previous_state_c, zero_state) - initialize_h = tf.assign(previous_state_h, zero_state) - initialize_state = tf.group(initialize_c, initialize_h, name='initialize_state') - with tf.control_dependencies([tf.assign(previous_state_c, new_state_c), tf.assign(previous_state_h, new_state_h)]): - logits = tf.identity(logits, name='logits') - - inputs = { - 'input': input_tensor, - 'input_lengths': seq_length, - 'input_samples': input_samples, - } - outputs = { - 'outputs': logits, - 'initialize_state': initialize_state, - 'mfccs': mfccs, - } + outputs = { + 'outputs': logits, + 'new_state_c': new_state_c, + 'new_state_h': new_state_h, + 'mfccs': mfccs, + } return inputs, outputs, layers @@ -682,10 +658,12 @@ def export(): output_names_ops = [op.name for op in outputs.values() if isinstance(op, Operation)] output_names = ",".join(output_names_tensors + output_names_ops) - if not FLAGS.export_tflite: - mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')} - else: + mapping = None + if FLAGS.export_tflite: # Create a saver using variables from the above newly created graph + # Training graph uses LSTMFusedCell, but the TFLite inference graph uses + # a static RNN with a normal cell, so we need to rewrite the names to + # match the training weights when restoring. def fixup(name): if name.startswith('rnn/lstm_cell/'): return name.replace('rnn/lstm_cell/', 'lstm_fused_cell/') @@ -710,7 +688,7 @@ def export(): if not os.path.isdir(FLAGS.export_dir): os.makedirs(FLAGS.export_dir) - def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=None): + def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=''): frozen = freeze_graph.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=saver.as_saver_def(), @@ -731,7 +709,7 @@ def export(): placeholder_type_enum=tf.float32.as_datatype_enum) if not FLAGS.export_tflite: - frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='previous_state_c,previous_state_h') + frozen_graph = do_graph_freeze(output_node_names=output_names) frozen_graph.version = int(file_relative_read('GRAPH_VERSION').strip()) # Add a no-op node to the graph with metadata information to be loaded by the native client @@ -747,7 +725,7 @@ def export(): with open(output_graph_path, 'wb') as fout: fout.write(frozen_graph.SerializeToString()) else: - frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='') + frozen_graph = do_graph_freeze(output_node_names=output_names) output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite')) converter = tf.lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values()) @@ -771,8 +749,7 @@ def do_single_file_inference(input_file_path): inputs, outputs, _ = create_inference_graph(batch_size=1, n_steps=-1) # Create a saver using variables from the above newly created graph - mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')} - saver = tf.train.Saver(mapping) + saver = tf.train.Saver() # Restore variables from training checkpoint # TODO: This restores the most recent checkpoint, but if we use validation to counteract @@ -784,9 +761,10 @@ def do_single_file_inference(input_file_path): checkpoint_path = checkpoint.model_checkpoint_path saver.restore(session, checkpoint_path) - session.run(outputs['initialize_state']) features, features_len = audiofile_to_features(input_file_path) + previous_state_c = np.zeros([1, Config.n_cell_dim]) + previous_state_h = np.zeros([1, Config.n_cell_dim]) # Add batch dimension features = tf.expand_dims(features, 0) @@ -799,6 +777,8 @@ def do_single_file_inference(input_file_path): logits = outputs['outputs'].eval(feed_dict={ inputs['input']: features, inputs['input_lengths']: features_len, + inputs['previous_state_c']: previous_state_c, + inputs['previous_state_h']: previous_state_h, }, session=session) logits = np.squeeze(logits) diff --git a/GRAPH_VERSION b/GRAPH_VERSION index 56a6051c..d8263ee9 100644 --- a/GRAPH_VERSION +++ b/GRAPH_VERSION @@ -1 +1 @@ -1 \ No newline at end of file +2 \ No newline at end of file diff --git a/native_client/BUILD b/native_client/BUILD index d7813d29..5203eb47 100644 --- a/native_client/BUILD +++ b/native_client/BUILD @@ -114,34 +114,26 @@ tf_cc_shared_object( ### => Trying to be more fine-grained ### Use bin/ops_in_graph.py to list all the ops used by a frozen graph. ### CPU only build, libdeepspeech.so file size reduced by ~50% - "//tensorflow/core/kernels:dense_update_ops", # Assign - "//tensorflow/core/kernels:constant_op", # Const - "//tensorflow/core/kernels:immutable_constant_op", # ImmutableConst + "//tensorflow/core/kernels:dense_update_ops", # Assign (remove once prod model no longer depends on it) + "//tensorflow/core/kernels:constant_op", # Placeholder + "//tensorflow/core/kernels:immutable_constant_op", # ImmutableConst (used in memmapped models) "//tensorflow/core/kernels:identity_op", # Identity "//tensorflow/core/kernels:softmax_op", # Softmax "//tensorflow/core/kernels:transpose_op", # Transpose "//tensorflow/core/kernels:reshape_op", # Reshape "//tensorflow/core/kernels:shape_ops", # Shape "//tensorflow/core/kernels:concat_op", # ConcatV2 - "//tensorflow/core/kernels:split_op", # Split - "//tensorflow/core/kernels:variable_ops", # VariableV2 "//tensorflow/core/kernels:relu_op", # Relu "//tensorflow/core/kernels:bias_op", # BiasAdd "//tensorflow/core/kernels:math", # Range, MatMul - "//tensorflow/core/kernels:control_flow_ops", # Enter "//tensorflow/core/kernels:tile_ops", # Tile - "//tensorflow/core/kernels:gather_op", # Gather "//tensorflow/core/kernels:mfcc_op", # Mfcc "//tensorflow/core/kernels:spectrogram_op", # AudioSpectrogram "//tensorflow/core/kernels:strided_slice_op", # StridedSlice "//tensorflow/core/kernels:slice_op", # Slice, needed by StridedSlice "//tensorflow/contrib/rnn:lstm_ops_kernels", # BlockLSTM - "//tensorflow/core/kernels:random_ops", # RandomGammaGrad "//tensorflow/core/kernels:pack_op", # Pack "//tensorflow/core/kernels:gather_nd_op", # GatherNd - #### Needed by production model produced without "--use_seq_length False" - #"//tensorflow/core/kernels:logging_ops", # Assert - #"//tensorflow/core/kernels:reverse_sequence_op", # ReverseSequence ], }) + if_cuda([ "//tensorflow/core:core", diff --git a/native_client/deepspeech.cc b/native_client/deepspeech.cc index 1ee22d58..7dd96574 100644 --- a/native_client/deepspeech.cc +++ b/native_client/deepspeech.cc @@ -67,6 +67,9 @@ struct StreamingState { vector audio_buffer_; vector mfcc_buffer_; vector batch_buffer_; + vector previous_state_c_; + vector previous_state_h_; + ModelState* model_; std::unique_ptr decoder_state_; @@ -233,7 +236,13 @@ void StreamingState::processBatch(const vector& buf, unsigned int n_steps) { vector logits; - model_->infer(buf.data(), n_steps, logits); + model_->infer(buf, + n_steps, + previous_state_c_, + previous_state_h_, + logits, + previous_state_c_, + previous_state_h_); const int cutoff_top_n = 40; const double cutoff_prob = 1.0; @@ -326,11 +335,6 @@ DS_SetupStream(ModelState* aCtx, { *retval = nullptr; - int err = aCtx->initialize_state(); - if (err != DS_ERR_OK) { - return err; - } - std::unique_ptr ctx(new StreamingState()); if (!ctx) { std::cerr << "Could not allocate streaming state." << std::endl; @@ -348,6 +352,8 @@ DS_SetupStream(ModelState* aCtx, ctx->mfcc_buffer_.reserve(aCtx->mfcc_feats_per_timestep_); ctx->mfcc_buffer_.resize(aCtx->n_features_*aCtx->n_context_, 0.f); ctx->batch_buffer_.reserve(aCtx->n_steps_ * aCtx->mfcc_feats_per_timestep_); + ctx->previous_state_c_.resize(aCtx->state_size_, 0.f); + ctx->previous_state_h_.resize(aCtx->state_size_, 0.f); ctx->model_ = aCtx; ctx->decoder_state_.reset(decoder_init(*aCtx->alphabet_, num_classes, aCtx->scorer_)); diff --git a/native_client/modelstate.cc b/native_client/modelstate.cc index c3fda2b9..7bb7f073 100644 --- a/native_client/modelstate.cc +++ b/native_client/modelstate.cc @@ -17,6 +17,7 @@ ModelState::ModelState() , sample_rate_(DEFAULT_SAMPLE_RATE) , audio_win_len_(DEFAULT_WINDOW_LENGTH) , audio_win_step_(DEFAULT_WINDOW_STEP) + , state_size_(-1) { } diff --git a/native_client/modelstate.h b/native_client/modelstate.h index 7f53c63e..71799421 100644 --- a/native_client/modelstate.h +++ b/native_client/modelstate.h @@ -28,6 +28,7 @@ struct ModelState { unsigned int sample_rate_; unsigned int audio_win_len_; unsigned int audio_win_step_; + unsigned int state_size_; ModelState(); virtual ~ModelState(); @@ -38,8 +39,6 @@ struct ModelState { const char* alphabet_path, unsigned int beam_width); - virtual int initialize_state() = 0; - virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) = 0; /** @@ -52,7 +51,13 @@ struct ModelState { * * @param[out] output_logits Where to store computed logits. */ - virtual void infer(const float* mfcc, unsigned int n_frames, std::vector& logits_output) = 0; + virtual void infer(const std::vector& mfcc, + unsigned int n_frames, + const std::vector& previous_state_c, + const std::vector& previous_state_h, + std::vector& logits_output, + std::vector& state_c_output, + std::vector& state_h_output) = 0; /** * @brief Perform decoding of the logits, using basic CTC decoder or diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index 92d9c014..9af0ae86 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -4,14 +4,13 @@ using namespace tflite; using std::vector; int -tflite_get_tensor_by_name(const Interpreter* interpreter, - const vector& list, - const char* name) +TFLiteModelState::get_tensor_by_name(const vector& list, + const char* name) { int rv = -1; for (int i = 0; i < list.size(); ++i) { - const string& node_name = interpreter->tensor(list[i])->name; + const string& node_name = interpreter_->tensor(list[i])->name; if (node_name.compare(string(name)) == 0) { rv = i; } @@ -22,17 +21,17 @@ tflite_get_tensor_by_name(const Interpreter* interpreter, } int -tflite_get_input_tensor_by_name(const Interpreter* interpreter, const char* name) +TFLiteModelState::get_input_tensor_by_name(const char* name) { - int idx = tflite_get_tensor_by_name(interpreter, interpreter->inputs(), name); - return interpreter->inputs()[idx]; + int idx = get_tensor_by_name(interpreter_->inputs(), name); + return interpreter_->inputs()[idx]; } int -tflite_get_output_tensor_by_name(const Interpreter* interpreter, const char* name) +TFLiteModelState::get_output_tensor_by_name(const char* name) { - int idx = tflite_get_tensor_by_name(interpreter, interpreter->outputs(), name); - return interpreter->outputs()[idx]; + int idx = get_tensor_by_name(interpreter_->outputs(), name); + return interpreter_->outputs()[idx]; } void @@ -48,8 +47,8 @@ push_back_if_not_present(std::deque& list, int value) // output, add it to the parent list, and add its input tensors to the frontier // list. Because we start from the final tensor and work backwards to the inputs, // the parents list is constructed in reverse, adding elements to its front. -std::vector -tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) +vector +TFLiteModelState::find_parent_node_ids(int tensor_id) { std::deque parents; std::deque frontier; @@ -58,8 +57,8 @@ tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) int next_tensor_id = frontier.front(); frontier.pop_front(); // Find all nodes that have next_tensor_id as an output - for (int node_id = 0; node_id < interpreter->nodes_size(); ++node_id) { - TfLiteNode node = interpreter->node_and_registration(node_id)->first; + for (int node_id = 0; node_id < interpreter_->nodes_size(); ++node_id) { + TfLiteNode node = interpreter_->node_and_registration(node_id)->first; // Search node outputs for the tensor we're looking for for (int i = 0; i < node.outputs->size; ++i) { if (node.outputs->data[i] == next_tensor_id) { @@ -74,16 +73,13 @@ tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) } } - return std::vector(parents.begin(), parents.end()); + return vector(parents.begin(), parents.end()); } TFLiteModelState::TFLiteModelState() : ModelState() , interpreter_(nullptr) , fbmodel_(nullptr) - , previous_state_size_(0) - , previous_state_c_(nullptr) - , previous_state_h_(nullptr) { } @@ -120,21 +116,21 @@ TFLiteModelState::init(const char* model_path, interpreter_->SetNumThreads(4); // Query all the index once - input_node_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_node"); - previous_state_c_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_c"); - previous_state_h_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_h"); - input_samples_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_samples"); - logits_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "logits"); - new_state_c_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_c"); - new_state_h_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_h"); - mfccs_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "mfccs"); + input_node_idx_ = get_input_tensor_by_name("input_node"); + previous_state_c_idx_ = get_input_tensor_by_name("previous_state_c"); + previous_state_h_idx_ = get_input_tensor_by_name("previous_state_h"); + input_samples_idx_ = get_input_tensor_by_name("input_samples"); + logits_idx_ = get_output_tensor_by_name("logits"); + new_state_c_idx_ = get_output_tensor_by_name("new_state_c"); + new_state_h_idx_ = get_output_tensor_by_name("new_state_h"); + mfccs_idx_ = get_output_tensor_by_name("mfccs"); // When we call Interpreter::Invoke, the whole graph is executed by default, // which means every time compute_mfcc is called the entire acoustic model is // also executed. To workaround that problem, we walk up the dependency DAG // from the mfccs output tensor to find all the relevant nodes required for // feature computation, building an execution plan that runs just those nodes. - auto mfcc_plan = tflite_find_parent_node_ids(interpreter_.get(), mfccs_idx_); + auto mfcc_plan = find_parent_node_ids(mfccs_idx_); auto orig_plan = interpreter_->execution_plan(); // Remove MFCC nodes from original plan (all nodes) to create the acoustic model plan @@ -168,50 +164,57 @@ TFLiteModelState::init(const char* model_path, TfLiteIntArray* dims_c = interpreter_->tensor(previous_state_c_idx_)->dims; TfLiteIntArray* dims_h = interpreter_->tensor(previous_state_h_idx_)->dims; assert(dims_c->data[1] == dims_h->data[1]); - - previous_state_size_ = dims_c->data[1]; - previous_state_c_.reset(new float[previous_state_size_]()); - previous_state_h_.reset(new float[previous_state_size_]()); - - // Set initial values for previous_state_c and previous_state_h - memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); - memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); - - return DS_ERR_OK; -} - -int -TFLiteModelState::initialize_state() -{ - /* Ensure previous_state_{c,h} are not holding previous stream value */ - memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); - memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); + assert(state_size_ > 0); + state_size_ = dims_c->data[1]; return DS_ERR_OK; } void -TFLiteModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +TFLiteModelState::copy_vector_to_tensor(const vector& vec, + int tensor_idx, + int num_elements) +{ + float* tensor = interpreter_->typed_tensor(tensor_idx); + int i; + for (i = 0; i < vec.size(); ++i) { + tensor[i] = vec[i]; + } + for (; i < num_elements; ++i) { + tensor[i] = 0.f; + } +} + +void +TFLiteModelState::copy_tensor_to_vector(int tensor_idx, + int num_elements, + vector& vec) +{ + float* tensor = interpreter_->typed_tensor(tensor_idx); + for (int i = 0; i < num_elements; ++i) { + vec.push_back(tensor[i]); + } +} + +void +TFLiteModelState::infer(const vector& mfcc, + unsigned int n_frames, + const vector& previous_state_c, + const vector& previous_state_h, + vector& logits_output, + vector& state_c_output, + vector& state_h_output) { const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank // Feeding input_node - float* input_node = interpreter_->typed_tensor(input_node_idx_); - { - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { - input_node[i] = aMfcc[i]; - } - for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { - input_node[i] = 0; - } - } - - assert(previous_state_size_ > 0); + copy_vector_to_tensor(mfcc, input_node_idx_, n_frames*mfcc_feats_per_timestep_); // Feeding previous_state_c, previous_state_h - memcpy(interpreter_->typed_tensor(previous_state_c_idx_), previous_state_c_.get(), sizeof(float) * previous_state_size_); - memcpy(interpreter_->typed_tensor(previous_state_h_idx_), previous_state_h_.get(), sizeof(float) * previous_state_size_); + assert(previous_state_c.size() == state_size_); + copy_vector_to_tensor(previous_state_c, previous_state_c_idx_, state_size_); + assert(previous_state_h.size() == state_size_); + copy_vector_to_tensor(previous_state_h, previous_state_h_idx_, state_size_); interpreter_->SetExecutionPlan(acoustic_exec_plan_); TfLiteStatus status = interpreter_->Invoke(); @@ -220,25 +223,23 @@ TFLiteModelState::infer(const float* aMfcc, unsigned int n_frames, vector return; } - float* outputs = interpreter_->typed_tensor(logits_idx_); + copy_tensor_to_vector(logits_idx_, n_frames * BATCH_SIZE * num_classes, logits_output); - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(outputs[t]); - } + state_c_output.clear(); + state_c_output.reserve(state_size_); + copy_tensor_to_vector(new_state_c_idx_, state_size_, state_c_output); - memcpy(previous_state_c_.get(), interpreter_->typed_tensor(new_state_c_idx_), sizeof(float) * previous_state_size_); - memcpy(previous_state_h_.get(), interpreter_->typed_tensor(new_state_h_idx_), sizeof(float) * previous_state_size_); + state_h_output.clear(); + state_h_output.reserve(state_size_); + copy_tensor_to_vector(new_state_h_idx_, state_size_, state_h_output); } void -TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc_output) +TFLiteModelState::compute_mfcc(const vector& samples, + vector& mfcc_output) { // Feeding input_node - float* input_samples = interpreter_->typed_tensor(input_samples_idx_); - for (int i = 0; i < samples.size(); ++i) { - input_samples[i] = samples[i]; - } + copy_vector_to_tensor(samples, input_samples_idx_, samples.size()); TfLiteStatus status = interpreter_->SetExecutionPlan(mfcc_exec_plan_); if (status != kTfLiteOk) { @@ -261,8 +262,5 @@ TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc } assert(num_elements / n_features_ == n_windows); - float* outputs = interpreter_->typed_tensor(mfccs_idx_); - for (int i = 0; i < n_windows * n_features_; ++i) { - mfcc_output.push_back(outputs[i]); - } + copy_tensor_to_vector(mfccs_idx_, n_windows * n_features_, mfcc_output); } diff --git a/native_client/tflitemodelstate.h b/native_client/tflitemodelstate.h index ee5bfb6a..3a6d4971 100644 --- a/native_client/tflitemodelstate.h +++ b/native_client/tflitemodelstate.h @@ -14,10 +14,6 @@ struct TFLiteModelState : public ModelState std::unique_ptr interpreter_; std::unique_ptr fbmodel_; - size_t previous_state_size_; - std::unique_ptr previous_state_c_; - std::unique_ptr previous_state_h_; - int input_node_idx_; int previous_state_c_idx_; int previous_state_h_idx_; @@ -40,13 +36,28 @@ struct TFLiteModelState : public ModelState const char* alphabet_path, unsigned int beam_width) override; - virtual int initialize_state() override; - virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) override; - virtual void infer(const float* mfcc, unsigned int n_frames, - std::vector& logits_output) override; + virtual void infer(const std::vector& mfcc, + unsigned int n_frames, + const std::vector& previous_state_c, + const std::vector& previous_state_h, + std::vector& logits_output, + std::vector& state_c_output, + std::vector& state_h_output) override; + +private: + int get_tensor_by_name(const std::vector& list, const char* name); + int get_input_tensor_by_name(const char* name); + int get_output_tensor_by_name(const char* name); + std::vector find_parent_node_ids(int tensor_id); + void copy_vector_to_tensor(const std::vector& vec, + int tensor_idx, + int num_elements); + void copy_tensor_to_vector(int tensor_idx, + int num_elements, + std::vector& vec); }; #endif // TFLITEMODELSTATE_H diff --git a/native_client/tfmodelstate.cc b/native_client/tfmodelstate.cc index 866775e4..5393ed40 100644 --- a/native_client/tfmodelstate.cc +++ b/native_client/tfmodelstate.cc @@ -98,6 +98,9 @@ TFModelState::init(const char* model_path, n_context_ = (shape.dim(2).size()-1)/2; n_features_ = shape.dim(3).size(); mfcc_feats_per_timestep_ = shape.dim(2).size() * shape.dim(3).size(); + } else if (node.name() == "previous_state_c") { + const auto& shape = node.attr().at("shape").shape(); + state_size_ = shape.dim(1).size(); } else if (node.name() == "logits_shape") { Tensor logits_shape = Tensor(DT_INT32, TensorShape({3})); if (!logits_shape.FromProto(node.attr().at("value").tensor())) { @@ -134,66 +137,83 @@ TFModelState::init(const char* model_path, return DS_ERR_OK; } -int -TFModelState::initialize_state() +Tensor +tensor_from_vector(const std::vector& vec, const TensorShape& shape) { - Status status = session_->Run({}, {}, {"initialize_state"}, nullptr); - if (!status.ok()) { - std::cerr << "Error running session: " << status << std::endl; - return DS_ERR_FAIL_RUN_SESS; + Tensor ret(DT_FLOAT, shape); + auto ret_mapped = ret.flat(); + int i; + for (i = 0; i < vec.size(); ++i) { + ret_mapped(i) = vec[i]; } - - return DS_ERR_OK; + for (; i < shape.num_elements(); ++i) { + ret_mapped(i) = 0.f; + } + return ret; } void -TFModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +copy_tensor_to_vector(const Tensor& tensor, vector& vec, int num_elements = -1) +{ + auto tensor_mapped = tensor.flat(); + if (num_elements == -1) { + num_elements = tensor.shape().num_elements(); + } + for (int i = 0; i < num_elements; ++i) { + vec.push_back(tensor_mapped(i)); + } +} + +void +TFModelState::infer(const std::vector& mfcc, + unsigned int n_frames, + const std::vector& previous_state_c, + const std::vector& previous_state_h, + vector& logits_output, + vector& state_c_output, + vector& state_h_output) { const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank - Tensor input(DT_FLOAT, TensorShape({BATCH_SIZE, n_steps_, 2*n_context_+1, n_features_})); - - auto input_mapped = input.flat(); - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { - input_mapped(i) = aMfcc[i]; - } - for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { - input_mapped(i) = 0.; - } + Tensor input = tensor_from_vector(mfcc, TensorShape({BATCH_SIZE, n_steps_, 2*n_context_+1, n_features_})); + Tensor previous_state_c_t = tensor_from_vector(previous_state_c, TensorShape({BATCH_SIZE, (long long)state_size_})); + Tensor previous_state_h_t = tensor_from_vector(previous_state_h, TensorShape({BATCH_SIZE, (long long)state_size_})); Tensor input_lengths(DT_INT32, TensorShape({1})); input_lengths.scalar()() = n_frames; vector outputs; Status status = session_->Run( - {{"input_node", input}, {"input_lengths", input_lengths}}, - {"logits"}, {}, &outputs); + { + {"input_node", input}, + {"input_lengths", input_lengths}, + {"previous_state_c", previous_state_c_t}, + {"previous_state_h", previous_state_h_t} + }, + {"logits", "new_state_c", "new_state_h"}, + {}, + &outputs); if (!status.ok()) { std::cerr << "Error running session: " << status << "\n"; return; } - auto logits_mapped = outputs[0].flat(); - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(logits_mapped(t)); - } + copy_tensor_to_vector(outputs[0], logits_output, n_frames * BATCH_SIZE * num_classes); + + state_c_output.clear(); + state_c_output.reserve(state_size_); + copy_tensor_to_vector(outputs[1], state_c_output); + + state_h_output.clear(); + state_h_output.reserve(state_size_); + copy_tensor_to_vector(outputs[2], state_h_output); } void TFModelState::compute_mfcc(const vector& samples, vector& mfcc_output) { - Tensor input(DT_FLOAT, TensorShape({audio_win_len_})); - auto input_mapped = input.flat(); - int i; - for (i = 0; i < samples.size(); ++i) { - input_mapped(i) = samples[i]; - } - for (; i < audio_win_len_; ++i) { - input_mapped(i) = 0.f; - } + Tensor input = tensor_from_vector(samples, TensorShape({audio_win_len_})); vector outputs; Status status = session_->Run({{"input_samples", input}}, {"mfccs"}, {}, &outputs); @@ -206,9 +226,5 @@ TFModelState::compute_mfcc(const vector& samples, vector& mfcc_out // The feature computation graph is hardcoded to one audio length for now const int n_windows = 1; assert(outputs[0].shape().num_elements() / n_features_ == n_windows); - - auto mfcc_mapped = outputs[0].flat(); - for (int i = 0; i < n_windows * n_features_; ++i) { - mfcc_output.push_back(mfcc_mapped(i)); - } + copy_tensor_to_vector(outputs[0], mfcc_output); } diff --git a/native_client/tfmodelstate.h b/native_client/tfmodelstate.h index c3dc7708..0ef7dcfe 100644 --- a/native_client/tfmodelstate.h +++ b/native_client/tfmodelstate.h @@ -24,11 +24,13 @@ struct TFModelState : public ModelState const char* alphabet_path, unsigned int beam_width) override; - virtual int initialize_state() override; - - virtual void infer(const float* mfcc, + virtual void infer(const std::vector& mfcc, unsigned int n_frames, - std::vector& logits_output) override; + const std::vector& previous_state_c, + const std::vector& previous_state_h, + std::vector& logits_output, + std::vector& state_c_output, + std::vector& state_h_output) override; virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) override; From 4b305d2f5ef409f129c4638561f55b3707d9e8dd Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Fri, 14 Jun 2019 15:11:21 -0300 Subject: [PATCH 4/6] Remove --use_seq_length flag --- DeepSpeech.py | 4 ++-- README.md | 2 +- bin/run-tc-ldc93s1_tflite.sh | 2 +- util/flags.py | 1 - 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/DeepSpeech.py b/DeepSpeech.py index 7e92e202..400a5067 100755 --- a/DeepSpeech.py +++ b/DeepSpeech.py @@ -588,7 +588,7 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): rnn_impl = rnn_impl_lstmblockfusedcell logits, layers = create_model(batch_x=input_tensor, - seq_length=seq_length if FLAGS.use_seq_length else None, + seq_length=seq_length if not FLAGS.export_tflite else None, dropout=no_dropout, previous_state=previous_state, overlap=False, @@ -630,7 +630,7 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): 'input_samples': input_samples, } - if FLAGS.use_seq_length: + if not FLAGS.export_tflite: inputs.update({'input_lengths': seq_length}) outputs = { diff --git a/README.md b/README.md index bf593a27..91447cfa 100644 --- a/README.md +++ b/README.md @@ -343,7 +343,7 @@ Refer to the corresponding [README.md](native_client/README.md) for information ### Exporting a model for TFLite -If you want to experiment with the TF Lite engine, you need to export a model that is compatible with it, then use the `--nouse_seq_length --export_tflite` flags. If you already have a trained model, you can re-export it for TFLite by running `DeepSpeech.py` again and specifying the same `checkpoint_dir` that you used for training, as well as passing `--nouse_seq_length --export_tflite --export_dir /model/export/destination`. +If you want to experiment with the TF Lite engine, you need to export a model that is compatible with it, then use the `--export_tflite` flags. If you already have a trained model, you can re-export it for TFLite by running `DeepSpeech.py` again and specifying the same `checkpoint_dir` that you used for training, as well as passing `--export_tflite --export_dir /model/export/destination`. ### Making a mmap-able model for inference diff --git a/bin/run-tc-ldc93s1_tflite.sh b/bin/run-tc-ldc93s1_tflite.sh index bab6d7b0..b402d7d9 100755 --- a/bin/run-tc-ldc93s1_tflite.sh +++ b/bin/run-tc-ldc93s1_tflite.sh @@ -20,4 +20,4 @@ python -u DeepSpeech.py --noshow_progressbar \ --export_dir '/tmp/train_tflite' \ --lm_binary_path 'data/smoke_test/vocab.pruned.lm' \ --lm_trie_path 'data/smoke_test/vocab.trie' \ - --export_tflite --nouse_seq_length + --export_tflite diff --git a/util/flags.py b/util/flags.py index b0f824ff..a4cb4979 100644 --- a/util/flags.py +++ b/util/flags.py @@ -73,7 +73,6 @@ def create_flags(): f.DEFINE_string('export_dir', '', 'directory in which exported models are stored - if omitted, the model won\'t get exported') f.DEFINE_boolean('remove_export', False, 'whether to remove old exported models') f.DEFINE_boolean('export_tflite', False, 'export a graph ready for TF Lite engine') - f.DEFINE_boolean('use_seq_length', True, 'have sequence_length in the exported graph(will make tfcompile unhappy)') f.DEFINE_integer('n_steps', 16, 'how many timesteps to process at once by the export graph, higher values mean more latency') f.DEFINE_string('export_language', '', 'language the model was trained on e.g. "en" or "English". Gets embedded into exported model.') From ea1422d47b0ba5736ccda19d4bbcabc51c93b70a Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Mon, 17 Jun 2019 08:42:18 -0300 Subject: [PATCH 5/6] Document vector/tensor copy functions --- native_client/tflitemodelstate.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index 9af0ae86..8c61a83f 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -170,6 +170,8 @@ TFLiteModelState::init(const char* model_path, return DS_ERR_OK; } +// Copy contents of vec into the tensor with index tensor_idx. +// If vec.size() < num_elements, set the remainder of the tensor values to zero. void TFLiteModelState::copy_vector_to_tensor(const vector& vec, int tensor_idx, @@ -185,6 +187,7 @@ TFLiteModelState::copy_vector_to_tensor(const vector& vec, } } +// Copy num_elements elements from the tensor with index tensor_idx into vec void TFLiteModelState::copy_tensor_to_vector(int tensor_idx, int num_elements, From f12ea5e958716ea9648833fb25cd6b204b45b4e0 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Tue, 18 Jun 2019 19:23:16 -0300 Subject: [PATCH 6/6] Add a test for interleaved/concurrent streams with a single model instance --- data/smoke_test/new-home-in-the-stars-16k.wav | Bin 0 -> 114794 bytes native_client/test/concurrent_streams.py | 78 ++++++++++++++++++ taskcluster/tc-python-tests-prod.sh | 2 + taskcluster/tc-tests-utils.sh | 21 +++++ 4 files changed, 101 insertions(+) create mode 100644 data/smoke_test/new-home-in-the-stars-16k.wav create mode 100644 native_client/test/concurrent_streams.py diff --git a/data/smoke_test/new-home-in-the-stars-16k.wav b/data/smoke_test/new-home-in-the-stars-16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..bbefd166f5d93f3634d1b3c2a2af90b13081fc04 GIT binary patch literal 114794 zcmWIYbaP8Oz{n8p80MOmTcRMqz`(%Bz{t>G$H2f)%fP_IAi$84SdwUWfRTZL;s5`C z|3Tp2|G)qL{AXbJ^Z)n%zyBE-{{R2^|M&mz|G)qL{Qv9!@BiQb|MLI)|8M_4{QnG9 z3DV8X@c%zIg9HON11rPt|Evs>3_=XT3>*v+3~CG-45kdK3~~&T4EzkL3~|D*qJ|G)kJ?*Fa-cmJRJf9U_^|M&mj`G4pC zmH$`%GcxcoC^FbHC@`opNHGX8y!`*}KM#W_0|Ud~|I7>m3}Orq{B>WRPO;WN=`xV$fxfWpHM&W-w+@ zXW(OyXAoo%W)Ng}{r}_tcmH4hzxx0F|8M_~{J--5`Ty(xcl@98fA0Ss|4;ni{D12I zTmKLI&;4KgKj;6Z|J(nU{_p-@`ak9WtpCgYFZtj8f5QL6|9Kfc{{Q;lib0rxmEraO zNB_V7zxn_De|ZK*hIju(82A|&7+(KB^#A(*|Nli9E{?}pfVJK!;#jt^427@<407E*%Y=$`u@eHX9SqwT1LJT$x zVGMo@attC2ObqM{ARqkv|M35%|I`1^|G)SD)Bhj9p>y~Do&S6cMhuz^iVUF)X$+QI{=fNu`Tv#wpZtINUx0y~L4m=8 z!GPh(e<=ojhOhts|Nrw}oZ-&@{r_+LS7ZohC}J>Y@MiE}`2PR-|L^~08H^Y>7`PcU z7=Hb~@_+6BL*BnEwk7zTZYAOFAozxZF9;otus|3w+h7#tWv zz;PeLkilTi@caLR|8@*^3=#}_34Au-xU=};W@BbhFb1?`oJo`WW|Lp%O{-623_W#lU z5B~Eo-2H#^KRd$*aNb~Gxb*+P|2zNp{r~g-|9@o$H--oXH->lrKmX@s;9_|2|L1=> z20aE&h9Cd${NMS1*8j%;`~P46fARm$|G)qL`2XVn{{J`s|M`Fa|GEDs{y+Ku?*G~U z2mUiKurly7y!`+9KPb&zEQU^oQidvqe1;4LT?R)67X}LkF9uZxHU=gJ83s*;H~;T~ z%jK>Ar~Tjk|G@uS|5yCK^#9X;UIsn}E`~S%ul|4ZA5;cC`p?Pm=l{9?r~hyL-}is> z|GNLn{%`od>;Ja@$N%5^ugMU?pv}O|@ZrA;Jz0SN}izfBAp^ z|2_YY|8M(W`#=Bx_WuX}&->r{fA;@%|5yCK{C~^;MgJH4pZbWYAz> zVUT9pmD3^oit4Au;$48Q*0`Tr6e3rygePl@5n{}2CH|G)KLiGh=Wje(Wn z!T%%wXa1l6fBXMK{~!Fn{D1lX#s3%lU--|+v*|2O~l|KIih$p7vCxBfr!|Ji>921N!%1|tR+hAM_? z27d+}hA;nR7<3u*7=Hb~@n4+5kU@dr@BbJ7MH%E7Zv0>Q|MLGo|KI|KI=b{eS%b_kTWc>6^ol$WY8+%%H>&$`HU{%V5h8%#g^S!63%q!BE8D#Gt_N z>pwfg|Nrm)|NZ~+zb=CugA#)}gA&90{}=!N{cpj*!T^fTC;tT)4*oy)pNWB=frUYW z!HL10L4)DL|NH+R{(tcQ!T-nLTJHS+)BivH7h@1-5MnT4kYW&I;AFV_|IL3;dgNvJ z{$GT_j=`BBm?4!Rkim+35IT$|vzw`g%|6Bis89x1g`2Xeq)Bn$c=^J1dvokOV})&IZ$gIZ5I41o-041NrD48{x^40a6W4E79R48{!k43iiV8GIRJ z7~&cF85S}mGJr}cMFu5?&;LLFfBv77fsf(W|9}7A{eSTP&3{IQi~sljKlcCn|C9gk z{6F%4-Txc^AO2@y`1}9U|DXQ_7(lJ8um43DK>0+5L6O0XL5e|&;qL!a|1bSN{QviV zb_PKP9)|b-_x23Cfv z|8M@k^Z(}mt^ZH_zx;pS|9Ah@8B7_37(gu_9)@RNm#Qo@lGyMH83U;SDg9C#w z0}F#NgC#=}gAs!U0~5o||BwIcFoZE^GVn9p|Ifg{z;OKk#s8=O-}wLj|NH-+|Fbas z{_n!zz+l6m$nfO*fFRvgfVn5crn;B=rK4lBr@1BgfIj$ zWH5v<ii0{{P@VBLhE! zI)gNW9)mH1J~-DpFeo!HGd%qN`2Vf{xBg%JfAs&}{|ElR`Y*un|Np1|SNl3X>e)(?*HxoAO1i2fAat9|4a-ZeV|;$%b?Go#lXS9$)Li(&+z%bGJ`k+ zC&RPA)g_S!Ic5j_EKO_WcUuwt3SbcSA^m1e>ny*20;cx21$ks;1co1 z|0DmW{-5@L;s2Tc%l~iuzwQ6o|Ihy4{r~v?ng4J9U;h8|5yG${r|xK$NxcXL3sv222lnH21T&Tg&CL_ z7#N=X{|NRWsMO+P`1+rjL6ISVVG=_bLjr>%LoLHXhE#?$h75*Yh9U+n23Lj*h8%`0 zhEj$J49gfMGb~_eWr$&DWXNF%WN>8&V2EajWiVinWZ+{EXOL%*U;y>@p8wzd|K|Ud z|5yH>@&Ca8rT_c>FZjRZ|AGHg{vZ7R;QxpJul_&$4~i=Z1_uU7hCqf?hGK?E;PNPv zA&kMC!IZ&^!Hq$WL4(1P!Hz+W!JWaDfrsJw|7+m3*CwzZSNxy;zu|xC|MLIE|I7cE z{-68*(EqLfkN#ivfBpYy|NH-+_S8{y+Nv;y(++pZ}oNy$FLI124F5^7uao!`uI#{%bL~FmNz@_|L!q zY6S{01Ttta{QS?$5X6wf;KUHg;K5+VAkOgOzYv2YgC&Cng9d{FxPL0jV8Wozpvhpx zV8!tBKd7b5!obb&=>Nn2@Bjb(FUz3EAkDzT@b>@v|11oG3``8H3?Kh9g6n%ehPVGe zKzqv9{(t)a`TyDfcmKZ!x3fjTJy1~wBL-ataRw0vDF!wM1_l`hK?W`cZU!cX-~R=` zb&)iKAcGhKFT?Hs@BT|LNHaYAf9n6<|JVM1`_BPZ%g(^U@cIAG|56MJU|oU?@Bg3p zf9(IS|6l*#`+wvAhyQ{M>fo6D@&C<#P+9rmKRW{tgCw|jcKZM6|Ns8K`v3m_i~pbg zzXZp~;r}20|NVdC|M~xK|6lum;s2%oZ~wpg|N1}3?;u}(2e*X({Fi39`v3X=Z~wpk zKmPy7{|EoM88R6v8JZYE8HyQV7z`LZ7y=m#8RQwr}{~!F{_5alWFaN*%KlK0N|0n-h7!LnG{-2G3kpa|al4J;G2xhQlP+%}*aA2@t zP-Wm^P-A%i|JeV#|9|`!V0inVkpa}-2bKB%{@?w7{r}PbZ~uS&|KvX?9yAz!|Nrt| zl;Pk1kN>~_2el$V>EQeSYyY4AfByg3|JVN+7(g}Rm;dMgzyHs{V8W2j;K~rn;KQK9 zV8-CfkjxOm;Lc#pV921tAj+V@AjiPQAi&_v5XE4^AjF^vZrA?%FU}yy@Z|p|@YuxR z|J(l``2YI@A|*{|AzlR|1&bY0QYY0|3CPD+5ZLqH~fF}|Kk70{~!K8^Z(KR zv;WWjKlT6M|God$|G)76{r_wKKmO-o0QFWL{eSoW@&60|AN~LT-;}|aL56{yL6kv? z;q!l124My@1}O$U21^D|zuy&HH>rWgRx}v?{}*TY_@9|UltG6;`OyFU|4;ls^Z)AqH~&FBw`cz!|Ns2|+y9sU@BC+H`0@Yq|F{2t|Nr^_ z{r^Y*U;Tglf5-px|BwFP{eR>CHUF>tKl}g2{|*0lg4?>*46+P%4E_vZ3`Gp7;FhTY zgA0QtgA+pxgF8b8LoB$RC;;xO7%@mPC@~l?eEWa(|C#^S{y+Tx{QsN(&;OtLfA9ax z|IffeELp?(c zLo9<6gB61ugAqd*gA0Q#gEoT^g9QVq6|Bsl$>7AG$MEMrC&R!0a^Q9^7sKEGObj3Y zKl=al|LOlH|8MzU{(s8=+^#(ioB%tQfxk|MmaH|2zLdwZQKGul|cNa5C^R z2r_7bOBy}~P~&z!~aYS|Np=K zfA#-Ua9?EE|IPo`{one3)&Ir+%l=pWFa2Nnzw3YF|26*?{y*@4&HpL?*Z*Jif6xC* z|8M+%^Z(2Lm;c}WKl*>u{{#Qe{D1!c(f?onWf^oBR2VcEd>LFAlo)szxEc5ucp11E zlo|XO!Wk?WbQny)eN0b=CjP)!WsM+ycp6Lsu@ZdY8mPoIvHviY8f&a zQW-#FC20&k3?dAF|NjHmK@#BcUIhkG1`&qy|DS=!kd+zu8Tc9g{TF4>Vqju;0q#wK z>R!;uu`sxm?akoIV8LL`;J_fx@aF%u|Db-{_Wuk2Pye6%Kj?qd|I+^j|C9g6|4;p2 z`ako3=l`Yum;YY^4(nb2*ZiOVf5!jg|1bT21@`av|7ZT+`2XuaD+4IsfZBQ&|6l)q z>i_NkAO9cuzxV(4|EK;x_<#NXwf|fUPyX-uf8qbR|NH;1`M>o4s{hCTul~OjoQ^>K zZctzL|Nj^NKmV6xkYUhcP-id)r;i4PK8A@5s~F}pG%*w~L4?5sod2{LOc=ZvlECGTKSMS{ z4MQ|TB7+lyBZDS`E`u%u7Xu5pga!5I@BjbyUj!^7&7i>`z`(`;8jJO1@Mmym@M2J6 zkYz9gkBV|JfO>%-%*SvOJW9vRz{GInKPaEQ`+wm7&i_mP&-=gj|5kANdH4U%e=!C# z25pA-|4;uv_x~xlmfHJ&7r4jv0Gz%U7+(E<^PiuAgTaVF2b^-07-Sg48T1*98B7@z z80;AI7_`8(X9+_RLo9;}Ln4DWgAIcpgD!(P1897UiQ(M;ZQwE;H1@Ri|Em8>{;vb~ zNLT;g^#9oZtN-u)e*&Ia0QHsM{(toU{{JukIT%zKKrI(_1|bG@24)6U23ZDvhWGz> z{onlm;Q!tC4)VKA%h~rqyJa`gT@bk{y+ME z-v4F)Pyavkf5ZRP|M&dg_aD^5Is5;?{|o=O|KIrk{Qo!q8NlO_tPCOynhe4Wstno; zstj5T3JkgoDh#X)piyQ}3(SncfI*Jo-+w*^4u+@yPy9dr|H}V!|9Adh@c;0CW(IKv zWd=P4H3k-j%iviOH3kNTYyXe`KldLrGIjU=lK-#&zyE*h|LOno3<2QLn}`4T7+(Ls z^8eER2nIa{P#R5U@M8e=zLgnL8AKRBBdV$lpz_Uw!JR>v;m3bdhFAtu2GIC{00Tb* zJHxC0;ta+NS`0A^{tTd+RGT56p%FY%X2Vdx;KX3ckj~)CpvJ(@V9nsiV8x&VuHCE| z^r2=mGkpIKN}(JKZ~j01|LXtS|3CkK`hNrLCQ$2wl|hg}f+j%?; zdJF~(ppoO&aw}4YRNZtGY zSN2d{{U!B2=!J0vg!I;5?!IeRSL5e|*L5u-3 zA11>f!XV9H!63@;=sz=qGJ_F=I>Z0}AOGL@f9F4_JkeoL1kZ^{GRQHoGsrTiGk|(s z_y6-VeEBcJ0BRGlfYa;$|B4L!3`z_}42}$1;93(jPRkAM6>%_tTHP1^pZI_H|IYsh z|G)Zw3*3)*_#c$3zW#sn|K9)G|DXH^&Axs6|MdT_{~QdUk*RC zz5iGI-}xVuCqSbw=l_F7LLdMC1D+KWVDM+iWC&s~XRu@lVX$RTVc=%a2lq-e86?0Z zw=~!WP)h<-E`#QKj{pDmUw}cLK?R&b6c~gVKK~bE5M}_)uz+S$|NQ^(|J#2Ch9Cby z^Qx>2PyZkIf9n6O|BwD3`rq+?{r@xnPyD|No}v8m|NVbZ`|R8QZ{RclYFC3w04oN4 z1`P%!hEM-l7`PZfa}vM*gJ${Uz6^Sq4)EGX_Bhb_Q|q{3aKJCb%p$Wbk0{WYA^+%_Yl%NBPwltQgD~ zlo_NMe*ORS|NH+R|3N8&nc>ZUMFw>SDF!VDO9nFreg+|MFPDcwiy@UEk|B{Hh9Q=r ziJ_5UF2gj28ip!{5QZ>@GzNDDPX;pvGX_vecK!e9|9AdB`Om>13N8fBXN7;PT+c{}caD{XhHv^#7yaw(rCLbN^5N zzw7`0{}=z?{{Qwr4+Ch_osq$w!HJg95|<|G)o#`~Ug>jsK_q?*aEfum9imf9L;` z|Bw7%^#8#BOaJ%(zXWb|aWnh`kAA-RFTfzhpu`}+pvGX$;LP9!o-q+-Fl8`jkY;%E z|KEQehL_+_`0)SBe?|rY1|Ej%|M!At$I|HbN0h*-*&8Oe~f8hV=|BwD(2d@;k_W%C>UEp36Xl?*BJMi>BsGb7N8-U8F zWB(t3Tk{Oy(v63Kp8-@>OEG}faeVy$Hoz4J^#D^H~wGrfARmB;L&7I z{j(E1SAXsQhyNe{gG&4N|D_pB84STQDnI_qGbn&-bY%uraJY&wfX4ge8O#~X8H5-f z{lD`6)&E!j@BimwP-YNj5MYpHkYzAoaAfdeaAq)KaAqh3kL+4As4!?V@PkVPQHJmT zPk?(w=l_5B|K~p=!{`4e|AXezeuBk8vu5Z1Km7myKR<&!!-xNO|Ns0i4<6gR@gLMr z;a~ujP@oZ5X7KE*40r}s13V_p$RNX@%^<+Q%)rZ_49=Ya48aU?3``6k{{Q%Y>;JL; zXa4{A&&~i^$MEkzKZ7WPHiH)fXbw<>L50DF!GIx%A&|k1L5qQ(K^nYX2(;n@RJuai z1E4nD$^ZNQF97#dcKl!Pzu)f$Y*F`n7~lW;KSh05W^785Y3RmP{I(yV8)=&pv$1c@ZyA|KI-~|G)PC26!&- zE_lS`%Kt2orm>EE&>CgWs|Ih!w`Tybn7ylpl zKkxsH|Fi$E|9{~B{QvF$+rV@9>;G^3zxe;I|L6X{1GfM`BbZDKQsCZ z3EVOUwTG{Q$EUCTfBT;SoI^pQn8pm@3<2Qz`A`M}1`Y<$=%XrlRL70Mogs`Nfx(YK zo`H!$5Zpe8jB@cZXflX1C^0B9NHQ2PgfiGMKw52l450C25e86NS7vZx2w<=Rmsp?? z=7-?=L5_i$;nn{S|3M?m&%yQn{r{K2bv~%a&%>a=puq6&{~vHo@$Ns!y<7iZ`v2_z zeQ;K*VXTj@EPX53A|H=QW|JVON@_+OHo&T@@zw`gz|M&mj{Xh5r=>Nn2FZ~Dg`ax@k zuKfqiH{JgK`u~srZ~ueREkDEK|6Bea|G)SD)&E!i@A`k~|6cI?*4zKz{{H~aHt>Mw zJ&YL?z~u;N^v8ffiNTP;he4YGH2=rN@C`g4EX)AP(*_I*3^L$W#;5C74rW-F9T>5!T_DXfdcTFf;u4ufm`LUh#49{}%9?g$w^L{J#zkgS-DX z|KI)p`2XAgFZ^Hnf8zhe|F8Xj^Z(WVKmS1^QlQlx)(jd9nhX};wRmm}aSWaeUJRKG zH4Mq%de@4lwAu(Xi!8?=%JBEUFoQ9J6a#3^Mvj4z;o*PKjP{}b@BY90|Np-L zg9L*NgBF7_187WEfB`fX51c)cWO76h~+Nsz&T!2w(&XfYTu_%ft2R54UC z*f6L<$A^s>Oc=r$A{bo2{TW>bF9t6LeFixO&}t-5uM||0fW{a>^PB7b?*fm2zx@wt zp$IWZFc>hHf>*DB(u6d4#!nPHXAN3C%D})6?mvNAY@iiHU;l%~=RhkyY#AgOco;w> z0~^D`|Hr}Yv)lhcGcceTx%dB{{Xg~p=KuZwyZ@*EFa5vZ|C#@1{y+YI=l|LNxBl<^ zf9e0J|A+r?`@jA_sIRo`|F!?$|Ns0C>RW?Wr|tW{`2WiP^ZqaSfB64(@T|zw{}2D) z`wv=0@ZkTw|9AgC|9|`cga4=gKLf8`m18htFlVq}5McQ6AJn1)%?*O)&|dt1^q+x& zgW*4To(Ht@NR+{f!Gl4G;phKX;5jB;aNkge!IQz3L5;zJA%P(SyoSYq!Hq$b0W>20 z;s5Rbcm96_uazrd@WAO8pSzc?897;b>uh@iDD5)4iZ2@L5B0Sqw= zUJStuAq?>hNenIw{tST(J`6z&KHw2QMFvoNL4iS>0W@mGz`)J$@4o;8XeJmm4++rT_4b&{|nr@2f66{|11ANzF=qI zW-wv^111AFmgFbYYR2kYya5=!l z@bv$o{~N)h4s-rb`oHb}=KquaulTp)WiAqAJjeqt#<~^qJR7U{QuMcU;lsj51OS1 zwXns(ElooPSq3QvBL)u!&}s;Oa7!?XL4kpl0kmR^n}Lx5GzzB&ZjV|rXfrr5crrLL zh%IP4H^nOW?W+G|CNXSAtgBfLd`% z46F{!#{}cbu{lD!0mj8ReqvfErR-l?the4TvmEjb4ec=E9zy4nZ&r5CozvlnJ z|0n)G0?(j=X3M_(e+o|LpcvTp{{*<4c=-Rw|I`03{D1l%v`Xd#xZeHzUy&h#A)BF) zA&nu5A&bF@0aQmNFcdH(fLCFMF_ePmh(NO$3Jjo;*LVNf7(neS(5x?LEfJ_B;bM?u zuwigzaA$A?w>Zrh92soEbrNWF5;Xb-T4D9z|Be4Q{@?l!nwdHJ|JwiS|1bT&|NqSY z%m1H(+e9z_@A$vx|Hc0g|9=LD#Y=Fj29&Db{1;-7VUT7pX0T&$Vz6Y;XK-L}W-w$h zV6bJdVlZZ~U~pt`U;wQZ|NsB%|BK*~5Y)EY4_?=O^#6wc3;x&rPyJu_fARmd;MI~V z|L^{P{r{8yXTkdnuKx$s;y?d`T7L%MkqywCoEn2UgCqkZc>es$e^76eg@K2G3%vFk zv<~R?e?|s5204bm|6lzFt!$QKPyvsQfmS_;G8i+2Gq^DrFsLyYg6HXZ88{f^82A`| zfNR?i|F8W&0bXSXS~I!t|GNMC|6lzN3YEkE&;36IhO_>!1h?o9{@($fdHnSM`TwK; zkAi0mZvFrIpAEdeTZzGf0koa~G&T(C|AJOo&SIB|God9zW7V7#Toy7O0g83S}z>cLpm4&@K-# z1{DT*hOgjtDWI_t&`8Oy{~Q1B1CKs~S`45yJdzBcSt3;iUWQ-bI`uxdy$+h~xc~na zc%7{lgBOD>g8&0)B@4(d9tK8+@BiQZ2dzy9%~ss~|K~qw^#rKhr^lejpu^z7kj9YB z5W^6_kjxMYZh1H{=rVxT@d$ud_kn6LF7O-!sE+ydpP2zPF3ZXA_5a`hQs5auPaTqL&%hwf@DIG^05l)`=>LuX*Z;r&|KmTX{{b3>0gd;8Mk_(H zeH;v+RdoCR@A-f1|MCBz8NMgr`O2H%b&<^Aoi(6!kDyUk7X~K=V+Laedj>fM76#Bt z$+!POra$=4%plDm$^hzD+y}=HH^YzrptUPU{yzn;fB~(^0r`Rly!xAqfd||I;s&q& z2xl+>?~nnl>j3rrKrPAL|JVQD^Z)e!Yya0XTnxGlpcUG- z3>FNaSuhm_IRHpZDK_;l%%?|0n!k`G4pCegAj-zxsdc{~iB3|Iht@;D7J` zsQ=gh%P?F9mr$?%-~BJbpamYu2j#XHhFXSnhGd3BhG_7b=mdsD1`h^51_uVv+7%Xt z+y9^aXJP=2)oL(UG01|)y<`|T7(o53SKyUppwbPr=JW-)<@WvmqyI1e|M(AD7p}_y zT5Wpt|Hl9O!8Hb`+y|`(2F;m)W|Kf84<_I>PJ9fYF(oDj(E4>-h5!Zy22gw7jKK)J zj|;SRjEO;;0klq9lfjlDmLZ>^h#`-mlEIMyw8lgg+%D#3fV7(){{Q&@*ME?UU;aPw z|IUBVXo@L#zT^XV<=n0RPr$7#(7vu~|M!E}Ykv>Is~`S<2e-&T zEz&3dKmP}fS%OxgNHF~Q{}S9nJNtj*|2hBL{!jWp`Twf_YyWrtZv?Lp0qtU1`+v*- zz2LF*58$;YzyE_)7>P1~W-3g;D`cG*{1`&Pc9T;Fff7F5s5I!F&Hw~gZEyDG4O+DYy=oS{{Q!aY=xzGO}{=e{l%m11GoBsFwpZkC5{}um1 ztDolmpYwm)|4;uxdmKS?TA+38ptXyj@+JzrddPyooWX{{hyk=m3N(Vk#vs7}8bbr^ zsQd6=j6s$`fx&>mhQSOxPh!L%#Bl%r$^R$8JIN#%K(lEv3@HqO40a4Y3@PBY6sW%o zT3y4<02;Fdt$lk1?h`)vF9_a;!pR`O0BSA#`w!Xy#lXPE@csXPaBUCTaR#dY)EM}{ zElSYJOVFC*)&CcQM+J|8M>-Gx-}-+Oc%97p|11A*{(tuWrT>rq|Nak}^Zfe%6}aUE z+Fb;i0bpVH1s<3F`d@&-h{1`$iNTj4fFX<_ogtec72N*=mDd{Jb*`X(=u_}|^xa_7 z_xwNnf5ZQc;9UZj!LKz(RXNv#K72PMJ)S`P)`@t&?cmH4Ue>u3cU;BUe z|I7bh{=f16A-F{T^&hlL0<`N2v{y-nL5=~`YT;%OW#9+z9RbZ2cr68JWr_=fIk@!z8pU#E0Ht3K@UE3KhBSsi zh7fQ&3ba}Uw3h1Y|1aSAyYK%&>*LuOKoHbF0`&wztI63IZh}{*fkp=|fLFwV)`syi z2rwuxfX09%!8L$9*yUp2@ntsfI&+YIP>U0^mKikrarpn@|I5JhZ%_W8`hWNT%l{|; z@Be?`KWGQj%m1Kt7{9@@_xuc?`4~vJgI2#>{m;T6#K6V?G6OV<0NPJC>;HuRZU4Lf z_y6ztzw!U!|6Bea`hVd6Ver1XZ~s{sWEi9vlo{l}J4rz`7-&6_7=sdnJ3}%<51)_Xenc?8)HH09tb{1>OY<+FNGMpur%)02)h_W>5mJJJe+W^+z#h}Rm+Fi`b09yC82fRAz=>KEjT@$PRFaE#c|MmZ%xvtCLR^Z3~{0yJK zH3VoyDQNEOAG9hFw5tZRMg}xq_VoX=|8M`l`G4{M*8dy+ANj1nw6KF@SbM zgT^&Mv)$_8_ydhFef|IJ|1I#S(9Qn`{~!6k4ZH?o!T;6&_x|7eA2iao_5afU$Npae z?@v1R|LK252GFf5<87%@mQNPtJF3>dT- zWEkFn_iEq%4;oq#qs+r9hL>NFTAV8~YKr2N+E7k7)|MmY5c*hiIMn#iB2Rv&8Y6Gi*M}>62 zJtR=6`tv^<0~-Tqy@D8nJcAMgFF5u=`!i4e-vRddPVnlJtN%gnJqO-Pdj9{d|9`;i zQ9gp#^FI84|Np!Hq6|U|pmiu*44|B#$RNPL&G6#??f-lKZ~MRd|L*?_|E~kD;$HB- z^?&F8uK%YxpnaI34Al%p3>gfW z47m)2;8tBQgDXQYgD(T9&jWG`Xx#@F18DBkh9Qa}lfi>Qn}HWR(x$}#+J_3-eeoRJ zwgL4Yt-!0J9{vaIB-IADdv5=K^dB@%@#X)=|M&i1_C+E4ZZ0V#s6&VMt==W~gHD1FzxEWr$+10nbfpg7cIL11LR%cFn~wIDyyBfp&lX z{Qvm>+5ZRsAN;@lKWH^B=mZ1MsO+ErtPCIigZ7$$PA=eJ0Ihh^VgRjQ2hEQj0?)6U z{J$MM4shiEz5oCIgJSa2e=hJW7|3Uky^x?a`P>Yk7BT3AfIHyx4Q_x}#`81qGJsB3 z0IkFYoksv#vkqDv#K|DZAk6UQ|Aqgc-pAelpgF^<;1zzL5tBdvKZ4iB{r)e(09x}1 z8fOQ&>lb+S6lm`~XdM3Uf6%-OC=LAm|MvgW|JVNC{D18K{QuMc?*PvcUHHG@|H=QL z(*Qu{J%HA|fySCX|L0%;ty6mWpNYYML4&~p+(VXPP-Tz@k2df#h%>M;2tw160eGz> zXqL->!G$53A%?+|!52I(30l8y!eGu|#vsE0n#%*NVh63)0Ah!&L7Yk#%KTUf%j28_zzk+`{zG5!=L|e!Q(5J{(t)q%Gsb5c%TtGE^vJS zs<}b^UO@&>2!Pgsax#GC9T^ym83Gtm8R8iH8KM{x844M)88R6v7}^=~F1 z@(E}K24t-1_WzUM-H)sOpZEf8GD3 z|7ZN4`+xKQ>)=!Y+8=uC|7GxsjfeklgI6DY|1Zb@T0#96JW2yPy8_f=(PWTiPz29o zNHSf81%sH z4HE_(21f>e@Oli;2pDMhizRr)jV-i3)nhPaFaeMFsxat*XIecOEEycYBb%UgM4Aj{ z44^qnW(H8X4mzSbzbN@lBdGGuO)dHZkUvl6%LeSc*Z{RZ$K>L}_{D1Zz zl(#@@IY4J+fL2-E_<#KW`v05%FZkc_zvut7|BL_c_> z-k|{+bp*9wK>JkG7_`Cj4hR1q`hV>It^c4A7tmTOP%j$PH@*qp`^C%v>bIQvfBpZf z|5yH>_lPgVF+M{UB(b)RZBHp_n0`A&9}3!JI*kK?}UQ z1>}O~|KEaF$TEO;0Daj22aW~Kk)Vtu-J)o80ul~OS_ZD~jKk)y| zf6zHEpw(HR9ekjf(=Y!)yOQ7k=VDM|uwk$O*AFoaxeVnD1q>13U1h8cpq2z^&7m0s zXr+rZ187DHwC4siQpd*d=Rc_DB+DSf@D;pIUV_0Cyy6|?*RTI|7>pPkz&$z82qtJw zNDF+9j1+?ggEE5%xORO1|NQ?W|4;ls`hVg7HUHOx$BGyKpZ>r8|GfX8bbR9f%l~W) z`ry@+`V5xfJ^Y~k{Nmu=pALf>cvbWH|Ih#5`TzU><^Nm^H~+uTz_3!7J-@a_O{?GdLi}5D2%bzX()-qN8PhyB;3jCM-KZNlXL*l=Q48n}X|7ZQ* z#h}Ab`@f1|14GpRfB&l)Vi?Z+FZwUc=*TeX|J?s_4DbHCGsrV6_@Bk_fZ^tUF@|o2 z90pc~3;(Sd-hfBD1Q`zh|M%aT;rRcq|7Hxq44|y$&+zL%XcX!7|2O}S{0Hrg1Fcp)@PFI?ZT~@gDwh4< z2wpev>i=KxE<(^4(c}N1KF^!~KmUVP0ABt7?Y|hqi~leG%QApg1hX;dg3p=JX3%B; zou=^m|Hc3I42%q*6{VncjfM|K4H!<&qGiFp792g=QKx3O9|AW?8zyE*ezY7B^!~6dd46F=p z3<3=6|ATh=gVx8~`Om}f_dh?w%m1K#6Cf67OvZtsiXo1{mSGY@ECU-u8bcVv&HuOm zD=`Q$yaUgJUHX6G|F{3A{^$Si{=f47^#5D_zXq=e;bOS;{{i^?6VQ6wlm8d~{{-Hd z`s+U^9liU1>Hqfs5C0$gzXLog0vd?~omg_`|N8&8|AY42gH{lL)}?@US%B8`*??2I zH+a4qv|-@fYqOmKgB-Tw#wPyavu|1NkG@F#d)9<;{J4!rN)ogthdjKPP&3p_WY z%HYWm$&kws!(hr_$Kc4|3a)vq7_=F97(k`Q<^Q1e)TRG>{$Kh3?LTNV6tsSdj{&r9 z7Zg^Y-SGkp`V0XKVhlI_OES1KeEt9UKWOLC&;JPw4h%>CfBbL9;L5=l`SsAOC*??+oE$xbXkff6zHtfBrxH4_c`p$e_>Q$N*}Qefq!m|4MMlmhivj z|K0yH{(Jpz`9J@E)Bo20$G~UWT>LM_06M|v)c^1QZ~veD{|b1Q7pN8a;{UDx=l`Go zzx@Bq|6Bjh{l5mh!~ZdO4)5N7Rt6RZW`;li-~E63AGB5%G^%muKPSV7|5yIAGd%cz z_dhcOXvMuE!}b4u|Bw6^Wmx>b61;l4_Wz0hd<@V3FZmB@Iqv%Z6nyfN8UyHrj1Yz* zhDqR6*|`io4Eq>%F*Gn_G8|@@!H~%?i=l?Wl);A~hQXMDmqC|7pTUj68N4$Nw9~_$ z!J8qNAp+craAa_2FlDf0Pymlh2{A~4*O7x-s~ilE{$B#`OZ)Mki2<}56tqVIG%K+b zysxM0fA9ZJhAM`A|IhvxW#DDF@E@|SS(xF$e`kg*48jay|J(j^F=R2^|L^>N<$ut= z-TnWo{`dU9^`Di&k->=J_Wy1FQ~xjf4?1~l?*ID#x&JNy7yU2&Kl6X*|9k&IXOCO} zpF#BF|JncB{xgD4S+QX-XE^r%$NzYS7>4!#`~PP!tYk=KP-oC%X!+mpUy5NiLkz>q z{|O9B8Oj(g{%`yL^1n3$Xr|VdA&^0t;nV*=|3N#IG#Ef<1%xszVTb~cvt}|RFoZCG zRv`N@Xfc3RfPMz2#0Un^tdTrJFvGk5wf|TBul(=+-}3*e|5N@?{vZEe<-hfR@Ba_~ zANs%O|B3(U3|0(B|F{3Y{vQI_^AYz!$3r3|qQfB&!juguWQV8rnAzbZo^Ln_0s z{}=!JGt@9NF{Cl%Gej`BGWauaGTi)s=09jf&65A;{%`#s#1Q^p=U>o&HpU=Eeuk<4 zeL!oR{)RG0Gu>mD|IhM&^8aK1K>M}Y|C{`0W4ifYmSNJbEC05$Pi9^9zxQ|1-&6)S z=9dgN{`~$U!{p6kz!381%D)>-6PSzsrTkI-cY^T?lOSX6ztevc8S}l|EI)c z&fLWy^WXaaVTK-t1cuZ9Z~p(qP{^pqF#VtF|HBL)7hR^?F7?v@_GJO1h@c$EVTLsjI2krh=W6)u^4{k>u_}}_}@Bh31kAeG~=l?(a zzyANp|NIQ^{y+c!=D#?@tN%<4pmy4q|DZagg8{Ug4YbD;RO1^lC@|PCs58_vNPufI zA%++c?_!=mNBF;lrU5pCPg3|CIl8 z{%-~M2M+(g|6i8Dg+Yx0be;jIMQ_ajT7_55(8l1(kiZbepu^zH5W|qb;Kcx%9WVr+ zWC9uo2hFQ;GJtknflij#^Z)$+v;UX=pZEXT|Cj$a{9pC|#Q#bEr~Y5}f7Smj|3NEc zmi#~efBXN2{|o+~`+xHP=Kl--H~yas-lg#D|Be4Y|7$X+F+BQz;XkM~4qB=C)`To||+1Q>J~{JHkykZYwqh(7Zb{cm`Dz-1`8X)N}d&yZ_(+TQNNNFU8RFzwQ4e@X2Q-|F8dF z@SmOG(f{@TQ~vM&|KR_E|25##J*NCG{vZ2)%Kx4JKmLFIf93!B|NH-+{Qu*Z>q&$2Hqs@|HJ>FUHJFFD{KG#|MLIOe?{<|4rtXdXzdl~WEjv~EH49S_utq5pz#bL1_SU6 zP7T8vh9wN83=Ise4DAdZ3}p;H3~mfT3{ea&3?>Y2;1g8r!Sh>+3~UUbagbO4-~0#d zmLgn^R*6pozY1P;1zPQR2fR-GD0mMTs4WFr_4MXH=(HEm-ZW6^1MN4{2lw>9gIm#{Rlx85 zgU*z?{{Pwkum9N@K)bm?{d@xkP|se4fd_n=Avc2z_>4)=ZfRC%D@U<#Vi0mzX?>EDKc0x7&BNh1cO)Gf>y5RGFUQz zR&q;#$G|~*5TF0w|9{K>RsZMzZ~0&Sf5HDX|7ZVS^nU`FoCuzU-uwT|f6(j+XrJ8E z|Df}6{((<+0_`$C`TxfMPya<2zJO0fdJEn^1KOV^!2s$>gI4W>i;$W7lKa)-~gYb06Mn{ zG$$9tkjRk1(7`Z=p_^e2LpwtaLoY);Lj;2zgEvDOLlHv>Ljr>*gA;=*gC>I{gE{!j zThIx8pp`%p;2IB<^O;DC;6Y{ zKlA^u|E~Od^6%BZ7yo|zQ~q!G-|D}_f204t|6TtZ{a60~^Pj|jhyNb`jsNrgH~(+( z-}1k~fA0V9{;~bP`|r}f^Z)k#yZZ0czt{h`{#*Qy`Jef}hHX1r8(R(=FWW}eG*&g% z4J@84*O~p8Z!zgJ^)f0lE?`h&nE9XK|Ac>9|Gxa){I}|F@L$EhpZ*;Ev+2*|Kk0vx z{sjJU{Nwf~^-txW(m&;Y3jS38nfhnSpJjhG{#o|t$e;axj{Q0JXWO6lKZ$>=|5*P? z{uBL2?+@1>u0Pj)@A9@;o z#otc9fBy3LZSXttckJ)%-%Ee5{hjw)?>F;ruHR|D7yh>RbMViyzl;9f`ftyCn?r*8 z9)}c1Ki6A6H32=oO5S1tR-uK0#sWVDRD>rBJ>omWo z|Nj4H-M_YfPygKdeesXmzw3Wj{7L<@<i~r0FIt(!kb`0(RdH*l@4?3GO@c+(#eE&E82hEML zFgP)&F#P*p$Y8@bl`)G^jZv8~mT?~AWyZ6NGa0=ZIT$$@wHWysH!=h>fcDq@1h0qQ z^k3+I@4rd^-v49yzvW-izleWf{}TVr`X~Nh`~Qi5oBs*^U-KVS`WY~=F?cZSXUJi^ z&uGBp#FWHzl_`n&C$l|^CyOUb9?N+a6IOXvPS(9FvsmV_)Uw#HoMg^q{>QY6DT~R4 zX$PY)V>QFu{}=!F{5SgF{_or0xqm19_5OR~PuU;kKbL+N{jT~Q^V{h6$6vdCRsU-J zRrjmtm*X#%U+;c0{F3_>^DFC@&9DDIkN!OS^WD#9KX3fJ{PW_^4?k!9eE9RxPvu{- zzi#~G{#EqL@AstNdw#F~UH$vjZ>~RIetY~0{ge17^iSBITYnDx>HM?dPyZi}zi<9d z{x|g>Gw2j%hK~%dnL^l4a^L4Y&*jOvoqLafwQ#A>bwMkU^P>487Q*eq#-hO@hJsA| zpZLECFbO2`FmYD1+p@8IxAITX@0q_WezE+T{NwPq|6gssYyK?z`Qy9Q z_r@P_zqEfp{2tcgLTOKW2a2{@ndD z_wVGt+S3~Y@2jGYX>|F8L<_rL0Y@_&*4GyWO=d-=EbZ_r=azkmL$_+$3x`S0z&C;Sfj zeePHDFWXA z%e=Betm6B`t;8C{mP%fba+aJd-Y4-=(n_LEWUJ78;a9?1g55kiTrHdt95Y$3GWj#c zGfexZ`{(7x0T;re{lY2|E}<3?a$4>x_`<2dj4zA@5XMd;vJ@a?jAC5m;{v`a>|NHdM#J|1&=KOp5clF=oe@6dJ{-6Fg_urO( zqW_!!@BcsXf7Ji-|LeeOwn4MToBk{RXZ=60!5qt+!90n1F7ry}&CK!4+|1XRt}$I@n$4ucw2m>EQHk*xLlXlB!@U2x z|BwH3`FH2)L^S__^uK1(;N9vEY zKOBE9`o;Wb!QYwxJ~DK(Eauq6ZOOyLw?@E0__wf%aF1}DXr|aD(G<~pqLJdW#4W{~ zM6ZeXi}VRCzMmRDRezrQ+52n#ua;jWzs~&<`hD})ieHa^IsVT2t@E4j_uSv&f2{uK{)ztc z`cLTJroXHH{`>3xFaDqHKfizT|9$u;^Izk?$^V4^YyWdHfZ7>(3>gf*3_9Qy|Ns7< z{NMCH@Bh62%l~)$ul~RC|I`1h3>pj(4C@)rF&tvJ#lXR+&FIY-!x+OD#8}R_mhlqf zS;qa0{}>&ZGMU<#wlIBS;$xO)PGX+Oe4Y6_^LJ)OmUqm*nORsgS!`JxSxi}ESpG0S zV7|;;$*jwKm8q0ThG_?*E8}j4K!&6L9saNWC-AS}@9jTcf9Cua`Mvm;X<->tq&fB*Vz{7W-|zp}@H6t)qhAfb&HpU?^XgCiU-o~c|7QGK@=xgh z)c<$?D=;)Nd}7FB+`_n@@gd_A#$AjH7!w$k81FG`Vwl5_$q>WP#ITy-D8n{}O$;{~ zzA$`bxXf^#;Vi>8h6xN?8U8XDGNv*1GX7&sW;(*8#2m|fkXe+)gQbt!@7ueCp8e=_`>_QUQ6*>#zl-X7^g9&Fe)+% zF$yp;G2Uf(!tjpa4Z|mf^9%O@xAVQ+IN@lHs58w3w;;( z{_5N9Z&$x9{nq?#$+rjJ*uQ`O_W2vb_h;Y4zWaaA`@ZaZ%lFjpao-cZPyW95`_Avv zzo&nn|9#evYd_U~AN%9~ZxO>ira+eKtW($yv!CK<+O}}b>{q?2oi|`lrFCkz4f0^`^<=dHW zyS|_L@!;q6UweNi{5kxG?QhCo#eetz@&A|oKl6V$gB0U>MkA&zOwX7OG4(QaFzsdP zVS3A0#u&<&#F)jH!I;4q#puE4$!N#u<=o#<-WEf&p}f^V|QRGe!3Pzw_Uap_ySb z!)XR##tg&S7R|p2~EYQHXH{0~ypZ@oU=`?Jv$>N$M46#H~o?M_xC?H%T5koK0~qdvYe{( z)sL$`Q2(v=SNV<1EKwK1qx`3M6Sy~WS@VVnY~|PD)!_ck$;N({LGyR*&(>e!zm7(Q#$S(rn0`M`6uUh!0$!B&;RcD^YyR$|K|Va{+a%J z@z?C{GjJ=x{_pjFJ`A2rQ(1i3-mtx9o5MDZ^%(OJMn4AM|Be5y{5|+r=U>;qlmFiS z`}wc;-^;&of3^RL{^j~x^LO<>A%;jscP2aL@61^&OIg0Ll(L$#&SSa8e2@7r^L=I( z=1`_urejP7%$u0JnO-wqWxT?;fpIFBM<@eNp>b`_=4g!dLOH@4m2q zx&Ha(=UtzdeJ=j=<&(#!6QAlnZu;>5!;6o{KHvB}?JLjsXFu-!wED&SC+Pn}w(q=! zqJDBq)naw-YBgyTsRk=~N*@sT%dwj+n8Tj^0;?bUZEjn>o!pz*&#_El>iEa|lkdCc zue1NwFeb1xak}wK2(<{T;+et`#?tm*>DT^mHD61<>U~T4p7Qg-ue{&Rzn*-z`?~qF z?&lAm>OZgl>h|l;Llx5=7EQJgwl%ENS(KP<7-Rm=|M&ji1O`c_ z3C!nMwy_$rEn;2DGM9NZ(|JY>#`g@NjMhvE%$zI{EL&LquuNdt!n~L17Gn$}E8|Lr zQigB_ONNX89sY0s_xbOkKV`pteiiFNe*O8y zgVZS zcYX!^dHb)3)s!bt_^F(&<|*Binq3-8G}P7ZNeA=WaJX~a;{3}ti?xrFf!~-7f|>k(xsBLwFzo-$@I&a^jIVs(tiEsmvGmuT z-zR>D{M_~B&Igwd8Xy0BX!zLn)&Gw?(-zh?j)z=K+xze{}E`Zf0ZqhGfFUoly*RdBd+{^MZh;9z59W@0G*m;3L* zzv}-543ii+nYtJo8MOYZ|6TR#$Pc$48b1&ITKMO}KMqDw<{}nO);^YK<_(Oy7|t+M zF*Y*su_Uo|a&F-f;xpoX&$XE2DQg7t1BS`}82<+TUH{LJVLGD~6DQLw#>tHTKyx7f zRR3-JTl&xAfA0Th|2+ON{$2RH>X+J2-|tVqT6}%;MepnDuQ}fozWw+*`)k41n_tYn z>VC`m-ta@{XX{VnpLc(X|N8u`@$2R93w}QOzV|!-uarNze@gx=`qTgC!e6yNFMdh> z(*48y$LBZm@3=qm|HGIbvedKwW$R+kVt>rS!t#gtF6&1wPQjDHMj~2bt&-Oyd&L$C zzY}QZ3*fcm4d;8rw~bGb*M)lzXA%1h=Jfv${~Y+e><`P|HGdoao&Mj-kjilO|J{GJ ze=2_M{<-Yu&Y#zQCjIjK75eMQFNfcYe!2cS@bmUhhhM*c>Hkjs9sWnN|trj7snekcCy_%Y$<{@-8!#Q%Nx57g?K^8exAs6Quu zr~G;PXX@Xqe~`_5TrupA7pL%ozA;({O9&R?_bAX!+*;E8vh0T%laqx_v0VezjyzX|331^{Qv5IQ~rNo z&|vKUKmULI{}uma{v`hU@W=I+%D?JAI{#Y#fBEOk81#Sc|4$6kzrX*P@z3|~w!eQE zJQ=P3{$ny`VEOm@ z{@VZB&+PYKnRyZ8T88V)|CxOMFJ#=zT+U#}WXrsW=_AWQW;G@$)(Yk)%tx8eGwHLM zvsbgKusvnZV^d+f#r%P}lEshd5OX#2RhFYHH<%n)MVP#pyO?G(lru>(GBYe^n8CP{ zv4~;be~tgs{?Gdt|5t^%^IzHjPmFB;{25O(YBKC%{QG|o<6Q=x|J97r|5yH({hz>e zwiz{#pGu_;=N>gul)Imi;{P&*JZ`-(i1qe+x0# z{k8c$`FGs^OMi?1{Q5ibpTqz5e>WMP{I6jNVX*wWnn9aUmZ9!{0886{4#x8g%?#WA zg)*}<@BIIl#gn;(>8+e{7gze{KB5$6WSP z^q=bA$A8KgguW;KQ~BHXtC?{>L;bJ2|1PoV|333$H_ObwX@9Q%VPX2tAo#EPKhOU+ z|1wJVi`{|Sh8&W|CZq*XWhR?j9XaPna?oAGJj?) z{Xgx$C+poGt_%m5|Nq(dXA5I9(}&+HnbUt2G3B#;`dZ3V&i3So50f#&vVXt+JY-V( zzwrkb}(Z)DP7zx?;v-w3t^ zj7R@YXG#61!gzvd<)4)d?i}-eWHW@acKokr`1j{Nr~9{6|75u>zR&zq{a=|`_S?GO zXE{Iop7Zr4(EmgRX@4L7No8o{T>sC9p@iL@IhjG@ zPcBO*bJ*_w3RuCYu$I(zv*nMOpksm{n^jm$ky`Z8s~ZDjsMD6m;Lqp7teBzx%|sdrb6b@ z{|!HvGyde5{P!E5wwCJ{)AA1+|93LZ|7-bmK3C@N)qjOJQ+{t@aAA%8tIxXW|M4$T9B=+< zez?H5g6YDi(_8_+_!$g-RkMjRIexs$&cMj}g@@?^SIlpxe}XK&>%M&<@Yw;kAGi$W9Bqtz4uj|c^69@hv2hD{-ex6FD`T0F~9sK@V!`c_h-L% z9a6`B1%0&Vb!YtW^)-VBTggw=zc+v6bIksu|IeCThjIPC#Y}F0=6z-1X#E@X!-XN7 zOXj=yKMuyD?DK!;vCm+Q{^Z7I%iQ(pCG&iNtWSTL7}%CEPi6bd!0|VNOXBa_uZNf> z{<-vf12Zq<#IIqzTmLA0OlR22v*6RDKPLpAep&QASxod3-_NT;!GHJv<@xi3GwR2p zKh?}P8PES@{cp@D!r<|D_Rl2NjSRd0zWyBD@zN@s_zF_j5!zl zoB#VcTlD|4ERTNu`TCmgH?#kn$sF%lwSH`7h+$mA^zHxCf0O@j=Xm`;_`C6+PeL}I z-+%tWclej$kBz*RKVyE*;6KCE_jUE(I&smRj0EX-w}JAa3<2{VPis}WetB>4UpcNovzcW+tz`R4r5{i?_LlB6R!fitmS+R`4Eq+4$#? z!jCs>zaqJp{q6a!{^#|-`8=h682`Rt@nc)Vvg(IDlNgU2Xt0f2_TTFt@(c&K6n<~~ z7tXw#h3EU$|HkaqKkQiT|NQ+c`7Tpf`kRHcoj5oA4f(l% zllSvZb`^$0e>wg*GFmaq{{H`G*WWyjc|U$JW&hD*y2hFhn48AXCX!<{y>Hn`SObghS{{77u^mi6R_CF2g4}Xp^CH|^oddkTC|ICkA z#t!Ci*65#|9NAn8zoq@3$H(xa`&SJg-}}|vEdMo`Ui}sPpUi&t&&r<%Sl+Oiyk%ls z#&h$79aARflaHslzOtYGsP$ikXa5iDKhaD#m`?t(|2yfQHPh{X6aHKNPyVmVxb)|F zX4${zS?vBa{5!~^_3!qNc`P3Pt=Se_mw$`#bo* z9CIV1<)6ho;lFDBXRuxQW5Afsw2L|8*Au3BoT7hju~{>7d{^c5{(a=DGMhH*`FB0M z2UyELKlmTOX8Jeks|QcX&;Ngg*uH-0|2vI?_uK4$>sf6WK7DiGJn*;ft0UL%Uz`5g zvDE)@XPo>0?r&W-7Dhv+!+(Bpc>b07WxzU}f&E|I-v|!JZx@*KIG=n;`#+WQ?!S4z zUj3J3PW`rn#fD+|HzEG6Ka#&E@SJ9{{nX30jBV;KWrnkC{tUZ+=CB?6KlPtH%i&); z|I{(dGtK^0_)~$s{m(kaIOfdXGg)UbuKU8q_K8vc`xY+kf7-uxu$D1N{j>l7i^1~$ zs(((br+=z525|iRzMk>+|Jc9&49fqeF+66L`zQ7 zcrkhZKm6B+Y0@7?rnG-*Oviqk|4w2T{deYf;Qw2U=?shi?qgW^>o)7GKi_|cGTZ|#;=yY`>cza7kv{$2b&i#_@u({F8t)&C9uEn{H#)5*H$ zZ`Z$btXG(7ShW9mvmF2Xk7>@oC;t{R9s6JS)0xednd5sOR}q)kKlNW?oCW_D{Un&Q~Uom#@!61e+E`!Zzx?qwEa3i{Q}u$kfF-&KD${@=iq@h9hB9m}!5@xQs5 zHCPN7*0XT@G5i(9`iYU}ZzLlR+o8XUew1@=Vczg3hxrzhKEoPDw*O~;$1!hXYWc^; zEW7UIE z1xze|mi#;Qf9gM5#y|fFzugcu;Ta2lkaU0XgU+ruY z{>w7%`yay8%W(SlJ;ryepZ{?%oM7Sq-~We~=|1DvKQ9?)|2xCFj^X6*Ie*@;IQ?<@ zJAwHn^ZK8xtiS*7|KG=A`8$Ov_3vR8v0sc#DaH&Sr-p%xdF3DVe>)j{ zf4~3R#_)zw^S=aR>c4}GuK$=ArZ6@CE@D(*S@m0*fsMt3LFx|^(;UXY-;0=f{@?#w z_CJBC<)8S!XDkW~4ZosTBpAN`N@05Yzw93q!%haNf3l1LjNJ^q{}UKa{{P9ejp-nB zKSu?NA$ti!0HZTQ(NBGbuD|#HYyao`z3@-spDq8n|L*-`#W;b%?{~{TfxoT)g8mEs zoAO8DeC! z!~cd17XK0$IsgA+WMWWch-8pui2nbQ;UJ^wUkyh0|KSWh{~H)m{`N9mVOYhO#<=yr z6QeKlng68>JD3vw^8EkCT)@EbmxDRoIWsv?~$B@pz`CsgRF5{1X&;M`u@5pfeuQr1K)0zL<87?#G{1s%n z@SmAs^?!9HF~;Q#q5m5gVh-Ds#@CZ46dSh5t_e+sk_|5nDXzYG2!{HMT>_@C*& z9-|TCpa0JP&M_AKd&uC(T=MTC(@#d-f0_RmFj_NAWw2nF${5DL$gtt><$oGXvze~_ zulf_kxbSx|qtJgLW+%pTe@*{~{#nTA`iJK~E5i#Wi$9%=@yxD&-~3y`_~qaEf2)~F z82|s#WvphF|MQeFhGFmj4SyXNI{p_izW>+$-|OE^#_5cC42q2H{}~uB{_ABd`7`zZ z)_+XQ;{UEQzWg`g|7r#uCT2$2|N4x7{$?_D{aefY_wW1viHxxfKL3yYef&S?|M!2K zjG_N5|EV!e{=fQv6JyuE=l}0AcKsD$*v-hnF!A3MhVKl4|FaloGo1T>pTYK@Ib+xV z<^OInsxy4}-^pO~A9Rz~{Qs-|-(?76{v85S}$GJO9xlVK-==)dm&nheVS zelUDzC}Vi_-;jaf->!cljMEvGFf{z1#(3*rBvaPERiJb38Q%Y^XKZD7$sqe*`2TdK zrGI@`X8m(#?ERn1#Q(pa;p%^zf6JMc{Lf?Z`hWa?>pwAudyMn{%l}VeIR9UeG5G&) z2C;w64A1_(`L~9VgW=-;lK&SNSpSAHf==|i_D`7MC&O9>9!AIi9sidxbuirjd;S0V zzkmOK{OiMX=AS>qmH(0dj2YzqdocX`&-I`G-&UsnKmClq|Gi>3@!yA`gn^Smp26_n zb|&us2298PHZeB+t6?hpKNEZ>_gRJo|4uQy{~N-{_3s?xvVR}`&-pFSwDvy-gEQmt zzg-O9{+|D@`p=lL= z2xH=Z6-L$nHy9@W(_#GcpO0|?gCC;^~8IJ$=VodvgnQ_PeE=Kl$dW^gN^)amepTLm9z{+q4d@rZ}|J@81|CKQ6 z{Qvd8_g^LWbnuSe;87%*|GM4^Z!Vtos``?7&|G!3tVumh;#s5VA zzhdb8pTrQ#_~>67LkGj!e@2Y{{|y-q{x@S%{(tnp4by~w>Wq8;l`=;Emt}DMZ_8-F z*!%xC!>9k>8F>B+FkblAz~JzI=fCL;_6#cj-~I!g+O_oGM8*^Uiohr5i!-YK&1NY6 zx0qr7|7ZWx{w-lh{U67m$?)L6EaUrsKNyt$*D<#ID`HS)`18N}|2f8~|M>s!W8Ct$ zfI)#_3!^>5s{bYbeHmLABLCm}e~BTEDVZUNaSB7%|8R!j|BwEMGyMKHi{at_nGBEr zUHf0fIQ9RmfBFm;{@wm3#Q6G;3*)5!KmLdO2i*l$&#;+c^M45jjsI5|tp2MqO8wiw zsQKTO(URf#zoY-77_T#={ug5E{`ZaH>%RcTEez}o&i|({sxg%OzxY3$A&|lIe*|OB zzc2>B|1ONKpmov=z6?tl-2PAaU;CevY1aQ03@`p~`R~S{^}mB*%70IWkN+JQ-u&P2 zpOfLpKX#@c|BM(V{>%K|%w+P{ol%0}HiPQ_5QYo?3jgsjF8?q7zluSJ;ro9chWr05 z7&!lj{Xg_CfKm2e6l3=P0)~D6`52D;zxz*+!S(-jMsbD>491L;7=HYVU|jGQw6Bea zao@kg{|y;T|2<<2{a4I*<4*#kKI51FtPETJ=`&pVzw+PC|F;E6FshI}T2zn2&n|5Ibm|F?@l=KpLahkp(KxfpN#-vhpBcm3aWOu`Htj2aAW z3?={b7`FZwW7zs%n(@hh&?=w!e=8ZR8D}%>{Fn7VhDq#y+W)(Zk^hVs!~YjBZ2BMk zf91c2j7$G5Wi0>y>t6ta48z3#<_uFA_x{<;wEyo82E~6#O#KXX|8_F$|NoJ(@qaSo ztbeSG9shk9v>3boZ}{)cz|6Sf|8$0v|5F)HGsG~q|DVNB0Y1S>;~yil4d{Njzf6qz z3=jUFVr2Wb@&8?h(*IZg<^7w;nD&4Fzc~!&7_R)4{vXVE{_hrs!helS3;*f<|HSb0 z->&~h|1bT|$8dqc;(r)J*Z+tAj{X0@aP(g_gWUgV|Nk-gG8p}dR#r`HSzWq1L?|0fuI z7&QLBWcdF-`~PG{z5nkRO8#*(USNn|P-U3+KZ7BPVf(+-|LTk!|8*G}8D}s!{N2P9 z@P95t)L$n?OD3VeZy6K+eqy-w|J*+|#)SX!47LByFvv3g`+xS|^Zyqaau|O7zwmz{ zgXw=2hGvE{3||?J{g-1%XJYwxhH=jSr~i8xI2n#JZ2do(@%{ga4D5_2|8M-8z|6%E z!C?6RFXQdM>;E_X=lU;CipU&J86wEbTrgVcZh|Ex@)oBnn) zOlJ~jX!!5XFz5e{e{Yyt8H5?$GMF+<`#`sW3@rIv98L(yF#Y{)!Nkmv&XCF=`R~I2bcVmG_WX0h3PxoICBO`<1e-5Tp2GxH;|FxMv{QvUr z;9oYz-T%88GX8h{i)EBxxW(Y~&;MWK|MQGD{#*T<_kRsj(Z9R@IhmF-a{QhAzmG}% zuPc)xW5s`ChFJ`f|HS|M|5IZ4!=T6H_n-g&8iw3|3mE1xHZ$$`Bg_=W)b@8RgFeIQ zf7MJbjC=lr%EE&GlK(9JRDK^{oC5PlU0RVZ;A_|33Y9V2EJS`giJI=09bI(*L@Q zlUS;M@A>zS;ncsa|N9t=8Mpp>$N2m2-~U|9j~IOZxcr~Mr2gNMA>zLQqwfCz#=gIT z|F<*t|Mz6*{@4FE@_+IFg8!n7wv0yq75?93xbWu!vjyX?KUaSj{Xg{Y`TuVWR{!;w zo-iEvAI9*A@%;Zj2ASXWf4BU-`+pH*8KV@V3WNUtYyT(x3;oscUx!hSVI#w-e@Xw% z81FN&G2Q$d!`Q;W$ME9c8b)pAM+`!Y8yQ*|t~2~#xcYC^e*q?2Mz8-n|2_Jf^KZ|; zuMCg>^Dub-U-OTffeW-A=6@W+UnVgobEehIJS_6eZHyXBzyI4YocS;J-|esc|ECPS z|5^U;{!{n2MqS3sj3!K$Okzw&n1q>An53D68L#|j{D1JT z%pb8oxBfW%mHd0*&)&b@|Fr&T|GoIz>v!qzO}{_ZuZ6hI)0>l&V29i-skg%pT!JrY!zJ2eAxoB0=syVx%${Lnaci6_-*pB{W=581rpEtg|GfTnoz2w)Vzn+Y&tb!a9IMX<*IC|I? zGe7;G`e)Hk@9z~~nZK<6eC$i;w+-L-{`miM_wU-j_WxS`)&9Hx@74b-h8%{g42Kv^ znWi(PGl?**VtmKY_W$(X2Y(v>c>kI3I|Q^A?r-ORHbz#adS-Ff7Pdd^TRFdR2l9U4 zoyWU^$C*2dGl|WcN$0=x-^stTe_j9i;AhnD<$vY>OEH{c=wiIWXwDeIFyY_7Kl6Tn z|Ecw(;@hgPkH4gTQTk%@W$72kuPeX)`r7fW`kTVHPhW3-E&M9-we<_@m$RRrf9C$u z@Hz2Q=Euz+&b~K(|K#1ScP{VkK6-zi|Mlay`9J>s`u+bWTM6%Ku>_@0dh?C{8y4u* zX-BC)l}g~$`8DtJ{g2b$<-b;a_uHV(nxVr5e2dw~4_JyULY4!i#fAoL-{+9bC<8$hl&~NNNbN|#ZRIxl_H{trswVum>^DNtS=6wvt z|6>2R|Ni%@`uD{@`~KB4WH7F0Ol924kov#s@9W=;zk7aX{N2J(%{G;Hy0EIayhOVA z8xd{+8IJk?;=XHsp7cTMefN8=&vSlmVNha|<~+l-pZgQn4)*Izv;Ug@wD_j-rS8-B zj~_nOe^LCl>ATL)4L==!%Km8ncJ9lAPpdw>dN=jmf%jq`cYk{KWzn~&AK^a(e;R%d z`Xc->?(O=QIxm`EhP;0AmhFSd7yDl~nVWgJB>Ghk7=AUEGr6IAOk=6?KCzdKOFsI( zyZLFxr#o-;K1MKZ7ZH&ymGY9vlyR2dB9_LU|7Z6v(cfM_)_-06)1Ubd*K?i|JPY|+ z`J=el7%IMreZ2cN{-w!Nwr90(&A;>fk7eoLZ0ENUNfMdMKZ7HMiS=Lb&(6<1?{ePO ze3<*i^n1-um%q;$7qFfA9ZeVX$Uez;cU~mDQg)pYg^2 zAAhBPulx?(*J;exBuP$U&pYIVJc%U zQw!5V#v%r%|3Cf){bBvR{@1nNTmK0$KV}c$2@(hqJ|J>f=o@bndpx84pO@dFKF$7+ z^vUa+#_!MnGg*>3lDMyM_i*lK&0>`L`|Zb+ul}Dee$x9A_HEq{qu+IZcmBKk@BQCd zzrX!x|2E;X!^b`E=e_6p$np8)SJ@vPzt{fF`zP~v->)g(-+xa0kpAZJ3+rc%&-Gu; zc>CzX(r?-S&AGcIr>OqceWJHq`@TB6@^#5I975mEys!N{`MbuKl#klK_wbj>+g2vNqiEbE+PQ}hPAm0gTJq)ZXO~{A z{cz*g1g2+fJGr*;s|v3W)a2g6?D&uI_xpI5*4c<=Q2^bf;73;&q^m0|E>R%0&t zzx4O+U**5Ee`o&6|F!>*4?`YvDC-ioe{9WcKUlsqMKc)v_5PLhL;A<|ACG?e{MP(i z{r@cEJmxf(EzJE)FBv~G`Y~?&Kj&ZL|A~xym^zq5nBtjsGo4|a_&@OP)!**Ft$*MA zec?~wKV8OnRvoSzypH^`0$cfY_!zi4n6>^X|4RR1^_}ax$q)PA7yd6|KEtZb_KMY& z^&&GPno>PRaQ<{m+M=;opV7{`ti8(cnYdhi4y|z8HVA z|Iz=e{7=!}e}AffpZUT3jp4K7`&DmFzwCS|^YZ%(_t(B}%|3E|jr={4C6|AT^b%Em zEme(Rl{5t->0kV^43EBk`nKn9`oDia@BDer{Zjh4Y?81kPc;7xp#@xlzYl%3__d2g zlB1UGFvm$jX~{nEnf$ld>zI80eE59m&EA(MUmCpL_U`D{-Txw4mvFq`D&bwr$H*hd z=D;BT_uTKMUkyK=eJlIk`b+BHjQ>ghCjOcF=hR8JM3Ek9*{8T@+vtMO0YzdQdyCkVxYdg+WO7~Ppd zm?kkTWoBayVC7^9W2$6iW1RQD^zWJ9H-CTsqx0|oKWm0% z;(Wwh@bAEH=Re$kHUG@|o%Ht)gAVg1rbLE^|GxfH`oHV%{@-c8L;v3UFT{9=L5#tg zfrWwZU&pVb-)?@^{j~PO{`dLsT|PK`3iuNLZQ1u#KcaqW|8)JaA&y) zD*m_sm-$!wd*RR4pUS_Sf4Tl@{4M)0k1?60fGwOoj$MoW5L*Ci9n*_{Q~wnF`Tb|# zpUOYG|MD@~vU+ne@?7NE%HzP}%A?QSz`m5J_y2r`CrmL+whWv9H!~e%wP1b6u=UTz z-^c%?{7L`$?wi+-BYzqhZZo9)m-#=LVK>9nzimGXzr6i4=hMfJF(3TiTYk{_l>KGx zSB|e5U%EcGe{TLX@59@-$KNo&o%QzM+mhGGuTQ*L`F{OJ-p@f_bAEO(Y4I!|vxkV(3XYc=4KPNME@frw4@=oVz5o=I5EEUF)@O$Imbu4mBtv}U%7PCc& zib-x0cI2PUZ_LNbHu=}QPwF3}KX84t|NQ^k*PmX$FaEjv@9+Pce`9_%{(SnI`M(Rp zJ_Z}6lPtH{&a-*2h%z+%Iq_@JufIQ6eBb<)|GUfYtA98B3H(*{>-nF!fA)X-f4lri z{y&jPn8lC9mL-#=m*oZXEygGRvj3dNBs}}AO61k_9pd}&x@)T9WUx$u6^C}j`!2@FU4PZeiZy! z!`#bbE&5(wSv%dpMl(y+U#wc}yYNyrtKX;oUl)8K6Ck>R(}*WiVwvPYj`;6yzh|>X z@fvd4u-@bL7yl&5EyTd#&GhCU%dd>D_dXYYRsBBa$Ejb^f4Toyd=LJV{qg(fRp0u5 zWc-e2aAteN#l+LYxt>LlVf$b2KdwIuzNUWB`C<5X&%fP&mi_wm>-_I&zpi{={LT5t zrr&=56PUVL_Oq^F%VF1LmtwVK(qwq~Kbk>_q2j;y|JMJ3|C|4v`YZWYtB|ytQ;J=oCzFT*f+AjWbb94#Hz<4$K1nY$P~dC%9ze*%XozG z17irI7en!Xk^f8o@%>xsRN`2j6?Y^?$Yda`RK{N12a$AA3J; z{uuMI|Kp;M>YtK6ZunsGp5xt(*Fi5Ep09qE`h3fa*>6t1pZmr7+paH1zg_wr%6?2h zN&KiRuR5pxU3FziXTB=_SkX`XYuUDN7)h|HI?BD{?P8ba)#DFf`SxSQ?~R-jh5Lmr z@jd0gE;xhtG>gl>qksPWnfGVS-`7`u0-C-#Fx90EWKiWUFzaIGP@pb3-8$XNwWd488;K%Ul zpWQ$9e|!JR{5$*KmMM{ilZ}mi8{0*eY$h4TRtE3?g@2v?3jBY;aGc@#e_n=g#tn>~ zj4PO=+2(S!@f7eZ;Qq*!&$Wa@o?V^Imu(4K99tjjL1rbUYKEErp8UD-TjS67Kb!ul z{qy;k_;2A~nLqBos(&{9-1_sw&vQQ~|2+L;-S;J5xj#F7ocUqJhuI&bKcv3bdcWe` z?{}{6gx)TBE%a*5%VRH}y{dj){MP^7zK_mdD!*iYDf#O4CxrdK0JDUjT)t|))*-n^ z{98FC1wKl&3Y&7j=XaLdrraiK%=GhrGKV(LMb?Lmhq;=>;w35tKXdhPzF|MfbdX^a zGe2iC*ClohR$k_&|C@h4`mFk)?SuH2bw3vWn)7qwkMdu~{}let|9;{d;~yQSEzGkR z6&VegTo_mUIsfCt5ARv>i2>lHQ%Ou`}5uWXV0&te>VIRVX$E2WjfB7 z&T!{{A!8}?Oy&(tXPCHImM{k}D*o5`U;h8*zo&oq|C3-?%#g`6m-PtOMeYi&OB@$D zxp+_Ty76Ri&E_)We#jZeX3Ctw_=X{kq4WRAe~|v zr59UXZ+m<9{l`y_n?|3?4pxgv#%_?WnadFBb87AnvZss3a6 zTk$LN$FuLQKN`Oqf6e^z^2g$Tix`^!x&8I~-@P8Qc<*ZR+hxZ-uV&ycRAIb>p7OAj?)P+0%&x54*!fr%{;y%^WN}~{pWY+-|WBjzvum| z_`T=<6UG9@jf^}jubCVEhyU*S)65Xh==o3ZPr#omzk`0rd@cC8;=BG2mG2Y3NB?~F z^T>}&KP-Q<|8x1j{(n5fV}@FWoeaAeH!>M8oo3w4*vT}Dc^7Lo#~k(-EI$}eFq?2Z z&;M?_YjNd+f>E;!+S{1L1&}#Yr!8}n}rHRxAC!a z%JIJtFBa+LC}O(LlFBB`V#Yj)>n|S{=lj3*zyAKOX0u@1#N5q%gV~AE|If1@tA6PJ zn(^n~zsvsw|6KSX@?+x9tlxHjzx`jv$i^V?_u-$H|3#Vf7_TsNGf!e&$0YRk(68@* z`2Q#VtNHu!@7%wOe{J|a;YZx>!aqsB-F`d%(f(uoYyHoMzrX#9WSGkEn4yv3_&=$? zXaB3RHgfIee8wWmc$_hXZ2_k_*GG;g?E36~SS4A`F!?iYU_HS8n|%T63Z@co7W$dxljDz{3G_if@vbtp8wMSe*S&_=h3hCzl#5U`&aO{`uDZpd;d86S^N9z zUn|BirYr{k|26-={QvZ?>;FXNDIBZ0Zm{iOde30XV$P|^Q^viDBatne^(@O_=9A2C zSgx~8U|+`C!(_!^%Miy@$+ClmnMviJ;qS^{J-;M>9sHs5{o>cppTjeaq@-lwX;=*Qpy8K;ytenxTQCt`J zZF!uT(*NxHm&>HZxb)wvzw-Y~{#^W#`d#)%+xMXFQ9lL#Wd7OpdyX_{S8|& zYb5I;wp#WLY{yu*Sbnl_vYlX!X8Ff7h3PfZHzq@-KnC`I=6_6n@A&oeC;Lye9}~Vu ze>eGl_?zOlO2`9Irymi#>DQ|+e{pLTv~ z`LyGc)+gJ~*FPWq;{I*TcY|Lce}(@y|IcGw$@GHd1KU$}C+?6r97~%*)B6%B93n#x{#pmX(7=n0W=$F~%|m z?*IS)zW#geZ}ng8zomb=ey{l@{)^?8+%J}2dcR_SP55Q{%j9R+k6+*4e}D4*%=abV zw|tNL{^=X@_vY^|Kfe69^;7XT`yabM6@UK!arnFc57(a*@OZ8B-`RiN|93I0Wn9Xb z$h46;pT&n&hs}h21A9Ar4!Z!yat?LQQ=G|M&$&)>m2&ZNE#S1_Jjc<@VaE~8eu(uT z^IXOw|Be6g{(bqo>=(n&t=}8JiGIEJdGe?EA9+9idH?jC+uL2Qt6rtPX(ei;7>`Q7(VfFY3CfGvSz8uw~mJ^ly$#)6GP z_e7qGaY+hGn@U?sACfvHWh3PyIY**TqCrAOTu+oq_?tj8Um%YL_YE#5uD={T?Cxya zSROL*Gj{*K_RsKN^502+N`9;TlK83hSb5mQ*wfh;vQ@CDus>&S=P2bk$-%_Aouip!3x_nP5vLpHY|ci` zOpfPlS6H=J_c5-f*%pXk4~f6@Qt7-AS)8Dtn< z{$Kxp`u`Ay)r^fyq0Bd#mojf+-o-qHrHD0()t_|@>nYX>)(%!pHc|E~Y%*-(Y|3mK zS&y+~vt+Yau+%e4GS@S4F*f{P|4-)Mp+DPyz4&qY``K?5U)Ovw{ZjmS;-_67-99pW zh<)GjF5z9syX3bPZ-w8yd$sRX_bav6#&6EQ`T6$d`|6LvUy{C`{qg0O&|lvF1xz6< z(k)VpBuyl(B(%hC3Lh8z#=nvG z5VscBLJoCyUDlP%aZIX=CJf5|6aR_*%l_;4=ije0Kly%E{rLF(%XhIK`@TzjulP3a z+pljAzIA_V{PyBo%=fhK`rqxocYXi+z2?XLAMQVQ{fj?e7`hqK7;Z2qGks_3U^Za=$J)Z?#rA^r1)CWALADyU6>L-4gxS`y9%2<_ zyT$6mc7&~;?F6e9D?jUWmIUTHCRe6%#?AkC{8jw3=+~nkd%wT@#{KQdmua6>K39F3 z^|9lF)rTwZf4_@=SN`_Zn}|1Aud83Dzkc~@`KxCyjb7=z)_-gBzT+e7m)qYC{p9-X z`Pb+_15+q#Kl@Uyt2_aGp8QV){t7J?=@OeJ9wS*H^-JoIbc6JK=>t-pQcaTG66eIF z#Uw;x1UvX+d1rE;;`HE{!fwR2iscN`6vh+=f&Vf8KK{-8%lD`1m&ng^-#>p#`d0d_ z`kVVVzHfiN-uasF_05;FU(&yFe{=ix@9T=MJHCGSYWHo)x32Fee;oVi{Ojhgy}!5q zDgOKU@8y3#|DR%zWwd4NWqiQ+l2MbXg(->If@M03A8RyQJG(!}3XURtC<6-=w_x`o{X5{fF(J z*uRSZ*!461x8>iT|CE>wz9@cO{#ELmKZbl%{mAw4+J~kOp&yby6nyafVEsYogY$=+4^bcH zf0+5gN$###mk(Gf} zg+-FtooOQD1cu`OzW?O@-uhkfEAr>UACJGweb@iy@iphm!p{MpO+Qs$d~nBE`Is`uG)5?%SBbAg{IsH}>UG5(@?pwrH4dn!$?p*p5ZxqsM=n$@TVf%< zE5{Y)txTCro(yyUX#d*#>;3O%KSjS(yc2nQ?|sl0#~)9BUHmQZtLwY+cdcJF|DH2M zGwx$DV+-Y6z#+}D=>PG5N{rnsQEU@f^O*eq$^X9kGv;UckGbDve)#_K`E&92{~yo3 zHh!7&weyGbpRE5|7?>C?{cHbM_TQEvpTUH28S_21%N&jzeQa&4Tx|X944iv7k8m#K zY~vK>1xvt0#M&ynm|q^5C0< z4?Uk0J~e)E{67D`B)hl(r&zJ{O=$-4OTv=k3zRl##b|MBup{8RX!0D~sO;=d1m zfBRF$D9zT)UdjH9{Wqr}*CDnej9331{Qrbeg4uvcltJTf&Cl)MD!v+gWBb1H`;s3F zzxaPo`DOXj=*Q+C2Y-G2yNPi*%N*7$mZeNlOzzB+ST3{Vv#wyX=D5V+#F54RhEdLbc@N1amv3Re^&ps`hSk$D?=88_J56k z_y0)!*7)iAJ^$POuM5B4_>%TH_ru1wJa72ln7^@nGx4>`Yu{HXFPUF#eirex{_*w4 z`=4HZ{p9`W&r?6~ewzA4^4B}2A}$vZ4e29_j`H&*>_sD_($(yA#I$r&3{;Os4aU+y5v3JN%dL-=BX< z4Br@jGw3jc{`>#?@X!6<*M58Q^~_i6ubyA5KkaR`7^F;l{i-b9N9x*%qJO69*kGEeFKmC3G?mg$HL!UZ6u)SUMs^zu( zyDcBjeHH%I_t*O0iofB1U;aPMe1T1i;|RwEE>>P`z7U=>9E(^zSrS>r+3&IcWwm4~ z`}gPflV4RoUwvo!-uC_HkD{OVe^`J2{Wa|CldlWETmE|gXZAmf|3d${|2O{kXJ}`< z%jCfFpLHSoc1}Sq7tR-KCs-G<&E?49`pB8ek*S8k0bzWY5F8yNdi{Mv^Z=S!q@uBY1n@>l-H2;wJugPH`FjG`s#9g3|@1by- zEUU_Ihz5zPOV5(tEOvu8hOL+B5R*HT@_(jZGT&Z(Rrucj zZR@A2Z{uEvzNvd}{%OPKx1SGxHvGc&_1w4pKOX!N`rG^eEn^__Bj(-AznF7bX0vQ! z`Oi|qc7mOmBa-b7GXql@qXg3?ru9tg7!w&b{O|ui@&D|9-~Vj*ee$==-=KdP|BU~B z{O$HT{`aZhW`CyrasO-ZZ_)qL45o|^7%Ull7~C0uFimEuWSz=7g|&oLmGuBiF3TF0 zeAZ*E0j%0A3z>o$tr(>LYy3<9`~1(zKiz-!{a*CT`)BkI%^$Hp=Ka|6!|2EI@44T3 zzOVUa_)Y5D!*A!m$Nu>CUH1F@Z*AYozbE~;^Mmte&(FS}vOgJre*59^lkwM{Ux$C| z{xe}*!BW9?h?R%Mn%Rzpk9{R)FP8z=CXQrwZT2JVH5}2L^_=rL?{Y5S4B))Ok<4Mo z;mGlaeG0of`zN+a>yO=^z(4kXF8=oUZSlMC_wV1ieI+Dk&T^g9;+y;C(C)}Nz4Jv51E9S3>Xa=^!{7? zoAr17pSs@;zaISH{o(L^!8fySx4ssAJ^V%POW)_@&yk-~Kd<;a<@3DHyFTywoc>wo zbK&O+U!=bIf8YG$*3a7Cd;ezqf6s7_@dRT8(@th<)1=+Nv@5Y2RLrCYqGbqO<~PuIml$r_~(D` zzqG$Pf1G}=`o;RIa zh}w&N7TYY|DjqLhCT1>bCDJZ5Pv9uuJl-ih@42UO)o|=#+s(3{Ig0r)(+sA2j9iQ{ z3_t!G{kQ-3@6VdwEWg8lefgRAGwes?_xf+T-{ij8ePjLh`777A@NW#?<-dRZw*K4A zZ*AYxe~A37{ki4m{hyYLiTzy8l*n8>Kf#Lle9%*cF< zxsRoSRhKP+&6aHsTPQm>hb*TSrwV5>XBX!q&OMxroL(IJ+3&L1v9+=qu&^+DGVc7J z^Y7jtzuzB!ru_Kx?eo{jFMOZRe(d_7_F?XO$M-$&cE5e}=KJfU*Uw)~e&zj2^3{cx zQ7@OjV18NqYTDb^?@xae{<7w4*pJNLbN@*~OVM&JE#B??g+hhG8%2x7!zJUTGh}AV zD9FgkoRgN4IV#;MHCwV%+(oQOL{CUS;2y6(&ugwEt~DJ0*=$&Knf;i;7}qgKGW`3u z_^;|8!QTwOQhwV0F#W#qo58pGZynzRzFB|O|FY=wq0iZ0=6<>OW#^XzUlPBr{VM)V z>|4+`>F;ene*aYeedzat-@bpC{__3{|8K~!k%5UZhw(Y1HPbD|62=ThF{Uo&`^+0y zWLdLWma}|dImUXFjh)?&eG_{ahbl)5hcX8%hXaQQ$9j$j95x(v>@V27*k-a`XF1LM zk!e1oFvH(}HGj|iKJt_Q$I5RLzRvoh@Y(az(+~UKE5C1f_xA0fH??oNU%z>!^6Jft z-WMIu1D^Xo-}wCV^R5@>uj}7V`}ptk)GkU}i67 zea`%f(VB59gBe5W|Al{Bf3yBl{Auvx?)QZ6Q@_ZN<@ z{9m=ce*Cigi_6#RU)#Rve4q6__(##t<-aEWj`>sj=k}l0zsdi){@XAZGw?86_ zKZ79y=zM)shUESI> z_oeSoz2ki6_EzeRz-x_HPhPxxasEZz3z3(;FTGw`y)1lL|0?XQ=Z7tySA4tieezGn z-(`Q#Gi+hL$EL`+olBEPp6`NyhVXIGS7Og4HcI`F4wh|~jg>8tv6S(ZiIv$WEhXhE z;VkAZvP)=>fE>RG?-}m7T=zKVb9AunVCiMDV)S9i{2%vk^`EuBq<{YX-uV6Nx5?km ze~tbs^i}z*+t<}!o_tpO%=0<_v(A?#Uv7Lk^5x1GwXXu-{(Rg2-QvgpA5y<;e~bR9 z`qTR-@6W0~)_-+#0J^Zo}j2ry+ai!pCz-pyRgyqzVR zZ7%yJj(Uz{j!B$ST(>zFasB36%N@xR!83v9J9jwOU5;rS+8jICr?Dxr9%nwlIQzfP zzpa1H|9bS}+IQ9OZ@dVcquD@CPUip*lwWI|-7O3#zj5x*f4BJ@nak6)f|7SCy}4$d|XL-zM9 zI?SGonGB8pPyJ*0H}B8VU$Q?Ff293L{PFqw^zZ85IlgcHHtE}zubf}gzu13S_eJ1q z!q>vDUSCi2+m@%EXFWGB z_eHLD&Oi=U4m}Q5j*V=qS>%|fFwXfO|8K%y-#;6F-TATb`^;}gzg+$l{89Gf>kq3w zn1676U;I}5&5c)%uZ>@Gyz+Qy_9Etm(+ij9na|HYzx@39iyyCQ-YR@}|7H63=rzw-N$7XMZ0tIIdnZw=pHd{6zA_B;4@%&&}Jr+(G`-ub)w zPv(Cw#t)2b|26*J{4M=2fWeRH8j~KAD&yV%asT-l)-wt)%djqEW8;Wo%VyDGW@Vkp zQN?wWE12s&XA9?dj_vH6?3dZEar$xZ<8o&&Vp+p1%hJjs!NS85%zTh>_P^S-w&DN`CjlF^L}EU&Bi2fOSWEFPCipoR$M^li_9+B z1+vyk&8okY6Qte57{%?SGo?-oujTv27cE>a(9gl}zw_^Y243cRM)QCAf75<{{C?yU z)2H5VYksr*`Sbn8=fj`(eXacF@y+{N-Vdi=4}LxU>F|^N=jI>he%$yu?~nNZ^8ej` z8GbkXRQi?vr~m&w#_No0{#*Wi`zQOKIzuf(1;bm0ZpKdxa{sUXHT>7`|IPoc{}2AZ z$6&#D{=eJbzCYjp<}kQ0dNbZ-RAb)2e3tPKg8`Em+Y*kq9NXEo*`Km&b2PAXu%BWt z;cVn8=8R=O#X5!6hIJ#$Z)PqQO_nyMrvHonO#VIbcffC*v^qjzC9#Q@*!9_gctUCWD{k!)+1WgT z-b$Lm>dg5G+* z&3U8nM)pm~>lZJBUR-|O@#4}e^Vh51c)W>v=H>fi71I3qPqkR@LuIT&u_*1 zj6Yt|K|L6W>95V z{Qvd8^ZywCyZq1izwEyR!zqR)#+8g3j4}-F|6TtlF&t%F!!(b{pJ^`BUgm7pcD6Wn zHg+HOr|fe%w76ur3%Pl@&vJg}Si$j{!;s@W`%X4n);p}%m`^jCGVN!u{2%pq;vfFs z7k{`l+>t*`#R z*!ZIK-z~pA{dVl9=kFQ6w0?5`l>cS-%k$@nAIpCJ z_$BsR>DS|*FMrkjiT$JV+wa%gUygrR{-*u?@t663&i{S?4*$LM_rkyH|KI;#`hV4b zZ-xsD^B6=J4*nNn=w~>=aEw6_ycho(Qy%jXmN>Sz?2a5Z90xeWIfc0Tx!!VhaA$H$ zaQko-a@KL2V&A}4%zA-Em*pb!OC~19!2hTJmjBWHef8(8A2YuDe4G8n=Ci}6h>!gr zPP{w#R^#o5H`CuDBT#d2fEd+xFr7 zC;6|FzI**%%h1T~$}PkHfbR|eGohy46%#XAmqCbm%DgL_tQ zze5?XGWIj6u^eDkVL!uulHH5rEQbx(2d+47Y3|uvv0Q3g2RWlTH*u_I&t>UUva;Rf4TRC#7mQx_g)&j`T9EJo$!b5PwHRgei;82WXNO-=TzYf;M*W@ zUARgtOyZDal$4gtFS+S*Z{+tVBq;4voT0E;`kCZ52_Erg(QZLmfegO&yqsJW?98ka znL8M7|5f|V{A=5GaF~*MlDpKlc1k`zi2q&G!jEuKqmn`}%LA-wMB9 z{jU9!^}FZS&!78#-T77Xi|5zJU+RBW{gL?7_uJ{uzCSnry#Kxbx9K0dzkPoT|33e_ z@L$<~)&GnC`TpDR@4&y>e^38D_}lVN`~TMe|NpOL_{LztSk9=z#LTjnHH|%s;}pjW zPHFC4-0Qj9xS6@*xVLaMbMbJwa7A(M=3r%KXS>ZB#(agjn&H>~*ncg5?);YedH9Fi z_vv3JeG&Mi@bT<>i+4ZXn!GvqX44z4Hxh5|yb5{!{?+4G-(TH%q4cundC2o$&rdzS z_agU|_Z!Lgvp!z^Z2WEI&ndq%8DiP;I1lpO=l>uqEw)nPz2sx5oia0Jr^uJf>Btu- zWGLn;K313{%Pkuu^-X-b$Yg=VC@pa{JZ%i}lyapR;}i{C@E}^Y_i)OaCPP<@|f=xBu^1 zzlHu>{_XTz>i3`DMt|D>)cn!;tMf1apWHw9zZHK$>oA}HJ@;4o-@1P?|Ly+Q{=ffU zoZ;sG{QtrKKmV^|;9{J|xQS^ZOD&rb$70T3Tmd`_dHi{LxP!P0xIc0QaaC}ta*A+n z<>2LLWIxQdl=TZU6O$6dx_@kc&;45UW9ql^uL)l?K9_t{`e626@?GTH2d{l!%e~(7 zD(cmVm+`M+UirM%eqHkF(yMbX`(Cbo@$|*Y7w=zQd7bpG?BlM_N#A0BGW_Xc_|3Y7 zQ;GjE{|(`fq9GDCQoE(ZW%Fgd6u!x2$)8g&R{X9Qt~g!pqU;%I14$*ZO@d+k54qQK z?qQwI9KiI6;mQ9;e;j@%|9bs%;g4J2mVWK~dhDy**UevMeNFj(`$y}~q@U-0>HOLC z$MH|y?~lI||8)Fm{_XPn@b4vmIR7sEbMDXFzh?jB|E2x?_NV9XroVZAr~Ya9WA(T8 zZ^2)gzl;B_`{(-K?7z(a!2fgqm;ZnC&*`7Vzv6$3{$2UU`G48}8U`oE6ecB>C#>ci za-4QtPq|ie3-HY6zQ=9H-Ol}mOP#BqGl26y$25-P>PGA&H1wT)B6wd?_a#tdRy}5$ZP)B%&&jGn)+()tBbEVU#Gpccy0T7{wveh?5{*# zZGS2Ca`LM?uU5XD`M%}TrZ20$-}&|M?_*{sc2(YY{P{u)#1=?oNC(IS$i9;;l3y-= zSk6@5RpG0AprVhwyqu`qO(}25St2!pKY3Sh-DFc`-p4TEAK%|!ztn!({Mh-!_lM+< zz2ByMoA6EQoAcMsFLhs?zLkEj`dA(4ZCH^V?U;bb8zstY5f7kxy`s?t= z=y&FCg+FzFp8r|(XWHM?e?R|y_&4)k;lK2Mm;ZAAZT(a5XZN3-e?I(i`RnyB;{UDx z+zf0ClNcs2G%)M}-(p?E@Q<;NDUsEfy_{2)JB}xWmy<7zZ$9rg9(JBpT=|?wIpo>n z*hATb*a}!BSr4;FvY0cgFz)y-{`d7S@gIp_gFY#K=zII~^{ZF1uS8z`c_5W9wUtN3Y{7U|%&dcR5v|pTi;ra5xYwmZ=A2~j6|K|3i|99IzP3E&~ zCOrT7UkhFkKPAC0TO+$z{*io!Qk2psg;vEE3fYQ6iWUk-WP9Y!NnMb|Mq`O|33UZ_jmQ*&3`%mUHd2hf7$;n|8FrIXJljMV7bN;z&3$hobv;x zJ9ilOPVPP2$GI{q^a) z_E((EzsUsk^Kd)f8!=*u^+p1nHu`tWPxH$`tAy*c|@{!Q&`*4I35 z`d*!X?fGi;n})Z3A0K=^^riUcN6AJcy<{aNr+2%#DA6l5&u>H&->r_-|qj}e~bQ| z`1kW)-G5z%PNq8Mb8Hgqx?CT)Rd{*$>-oI-Bl*gCJGfQ3=5eI5?_q6X{m#tC`kPsu zWe*EK%Walc=6_6Sj357d{GI%({QKQ6Pd^@i|K`ox*DqdWy()V(`BmiWnl~|TAHQAw zZrZ!p_dDLLduROa#M?!0t=}@g4SZ|xmgU{gw^r}F-mBK~B z>jjDg0(teha@anzI5F(_`}~K^_l_^SKJ|aN`M&JqnUDWJE&govE#*houi3w{{`~kO z@lWsX#_5dP7_Ty3WlUg9XXyIx{LlYS?XNRG_I$JXI^lEZr^g?Ee>C~D z@sr=S^-p0R4}IwWaQpr0j|)D|{`~Aq+Sjw+|9rpwqvGej zpT~dh{Hgmh?}yiqk{@@!tN*<6qwnW~Ulo7&{(fT6Vf@N6i9LkNj7OAj8NZ9bRgnPU z>!Lbh8sd}1?ukzn$rN=F=@#w~o+G?Vc!F@hu)N@2K_Px#zAWxXoXqS8*y5P8nW`Ba z|IhuK`)AE>r{B+iFZ;dw&+|W*{;vKT^-u2K(|@Y}9sXzii~Ohe@6cc6e_#G||0(|? z`e)*wm%knVocS&L$MR3&pZCAH|9Jn7|6TKY!te9HgMMfIj{aTy=h`2=f7Acz{xA5y zkHLVkj473A6VooHl}r(`~Uj?@Bdf-fBRqRzxaRkfA9a!{#W^L73h|021Q0iWF?{`cYh!I{o*(OAGJSr zfBOIY`J?=I{@-nXul}9=_s!pRf1m%I^f&A8xxcIa3jX`@*YscYzeE2-{~P}|`)~ID z?Z1eBTmGi~75!WP=fm&5-$lRY{r39J|6A#I_3z`q8UBd!c&O@Dd+UH%*OkN5wle{=sQ{XhC&l!1>ykm1z-OaHU}+x>s?@9)3)|7HJA{lE7A zh5wrvEE!@LA{gWtK=<2=GR$L;XFSg!$@GLVhN+2pDN`NuWab-8O3W9S*E1Qi++ogS z{=?G2QpS3m)s8KVt(Dz`VR zmtPltZTVIFYt^qSziNM5|2gw#;@{+dPyRLk_xu0ppX~qr|7ZVK_&?|Wy8pop0SpWb z*Zwd0Z}wl~|Ng%LfA{~{`#a@ZW(U zlVJ{n7{d|aoOPNkHJz=(Cj$+=(EXd5r%)unebb)ae<1|JV##+WFjNOdB zOubA-%-k%N%r}{nnCCNpW?Ig?n%RWKpQWDJktL1Afwi3FGK(q8W|pfg*{nZUyjah% zEM)0p?PD=wUBJT2vWD4#31ce5?f)MCb^q`AFZF-t-`c+i{vQ8V{qMn_puZgdD*iS6WNUzfiQf9L%@^>^{#n131nF8t`MJjVHq(o8|jzD(gvt&H+a zkD1JwBAJAkl$n~D7BQ`2I?6PG`5x0Xrf8) zH4KrAe;HpeUT5lHDQ5Ly@n#8U7GSAjnag~UDT!$(^HSyy%wkNuOhrt0m?kiLFy%5% zVpziPgOP#BoAEHiVut%n@+@bV&oCWgs$i~T+062aNr^LeHcCS#_4hPeO1|3w-2 z87BW{`!DqW(SK`(N&lz+oAuZCFZ*AiKhu8S{Ppse+3%gdCI7_ydHQGe-{OC%{|f&x z{j2+H^mpCgnSbm47X7vVJN<9f-`KyG{;d0x^=HZ-#=nw(wf-*td*E;Azw&=P{}}(t z{JZ+^$-l;b1%LJb2K;UL+y3|S-vfVl{yq8c*S~lF&;0-P|Iq(A|6l*_`KR!&{O`uU z!vCWGvHW}f_xQi;|Lgvr{b%wo>Yw`mKmRu}OkfCSJji&K33R(=4f9-PUzT{5YUZ^} zIgDY9wG1f?whSB$-x)tJGB77F9b~j*-2I=G@fU+QV=Kd^|K|U>|6Tga|L4c=DZiKe zYWdCa`_dohzkB`~{CoL#-T%}7H5ror2mOEW--cn=f5*QIe}Da{{!{wrnjbPh)P9!z zGWc`(ukgRjf6f0G{)aR4Fns;L@PESpA_gzU!;DN!)=WPc^_kgNzA#T=&SCn*DX4#P5rB*xneDU7oj<}-*g zR5R!^H2k0O|NOrb|G)k}@}Gy{)Bk3Mv;P6{xZ{a_kfAjw4{j2%^>;H=X!v7ooulRrK|BnBb|I7YA`2USz7vo;WS&SzcXEJ?b z;$>dVl*bgzq`|y{c`|b>OCd`&YbD!jwi>pHtS4EMSXEfNSr}LZSe`I{WA7ROZJ_(M8J$GB7e+`=9?moH2#b z^WUw1k_?L&elcET%wS&6q{XVs_MiDbQwc*1L)pLHzdQbv{(A8H>EEOO`4|EimM}Up z1~HytaQNT;&*cAx|6%{9{A>L$&Je@!h~dwFABGMFL&kFqeE;A6-SU_DFW(=5-!{K; ze*OQg@mKQi@xMX;O8$rczxMC?-{pVO{#^aN=l989i+?5kw*0gGkJewSzZ?I2`6Kyv z!JnkxqQ7;1t^T>==b<0TKMH=7{doIB?B}ANM}AKI@%{VC?-@UYzpwbp__g+H#rK-; zFTdUT_WAq4pPzp+{yhGD#`oPnw*7eiJ>*-`H?to%ep>vR^;7T{>z~Je|NJ}jSL|>2 zzit2g{@?%C^pEj>2*WeRZ;b1i8kr?oKC`Z5JH)z{-cDrVgfsEcaL#S)MUzGWjs~G96>8`Y-lR`k&{29>$>mul}w0*Uiwy zB*5_EztR8h|H~L;|L^$!_^-(S%l|U}efz)sU*A8Ge;N!m3sD45 zwneOrY**RJ*qzvavMpybWn*FYWD8@y!BoaHkRAAo0Y{Pt%*@-2O`2!O_ zlN5vN|Hc1;|IYaH{P(8c*1!3G3;s^{rSVJk*S(*1Khu9E{CxOh_Ky=kzWq4!93`~+<#gBD*d(L*V5mgesB8|^e5!6-#?cB?*Cc;EB}B0@AAJ7f4%=6_;dO9 zr{CItX8g(gyZdk1Kj#0>|9AY?{crt$%fADEIsQKRllf=ypYlI#e>VR~{g?W0-`~0a zZvOLR2x0JLNdN!hpAm!4|4)DY|F-<){kQk8%D?r0RsV(j&-w4gpu~{Dz`;0=;VHv& zhG+k^{>%P9^}qeU{(r9j5B~3ESj^zV=*eiqjwGPW|D{{Q&DIfLK-zyAvU?fBRKKkt9}|Lgy3{%`yL=l_%c8UJJc zuln!EV8&4RKkKTrOg`osTM^KZc4 z*1r{h-~ave_x9hve{KIQ{~P#M`ET6cpuZ-6mH#sTW&daW@8G}h|33fo{cri-_J7g8 zKYusaNmi@l=XW5^H|Can)`|t4ooeVCFpBT9rXE6N! z@5``)f$QJ0|J@AQOft-6%wa65tS?#XSXo$VSqfRLm|rtjvRq|VVEfE^hOLFokRzI1 zjZKbCgUy^Rh}D2Cmh}+JZI*W|_nB3gotX8Q@|Y5tB$?A03mH=wr!vShGBfD^=lK`) z--zMg|AYUh{IC9h@c*&@VgIWCs{Sqi8~ZosZ}^|O-(`Qk{&D;p{Aa?SMSo`hG5Nds z&)eVTzgPdB^ZWYmW52_GGyHk;$KmhCKRJIq{zU!>|FaFWQ~s~Tzj^i_5e&;Nh^|Lp&#|1bZ4{Qu(rhyRcN`!IAd)iOP1{>!YvqQDAjM~5=) zW;A7DVcg6Z$7s*Q&%Bg5o5hCZ4NES|OqNH?*39Rb{xC8!?PT1-c#zS9=^#@P^JQio zmZdB!SQ=QGS?)8>W!}o9#`Ksmg0Yd2o3Wm8Hlr+4Cet*gGfb11HZnyqi7^#3vN5h= zh-P49DElAwpX2|pf6xED`1j)9^MAkoUHEtZU(i3(e;58X{MGoo=1=_}+dmn9*#B7n z(f^bGXX>AfKfC|j`t$6M?q9yYVt?)bYW~&v`{~b_Ki~g+`E%e8|KIvQe1G%)TK{eN zoA|f<@7=$de^>ks`fK^O;;;Q*wZE(XWd6Y*3$MDbAKQVu<|6cgp=C{Oe*Ixp^*8N=j^UY7~U#EX1{_g(c{a5zi z(|EFR1Um?kqVWU6AC z$atG^0%HkdAme(5EeyLDtQhhbgcu|l?)-Q7&+&imKdXO@fA9R!`?L0U`tQlVN`Jll z`Q_)Gp9_8#|K$AX^7Gq|_diblSn=cakAFY@{8;rf>gT(k+kPJU$@S~tPw`)qf42NI z`?>Uo>yNJQ6Td(HZutHEca0w{KOX+@`|0q@>i5#$-hW>HR{Qhuch;ZQznXvF{eQp^ z#_Yr@&$fX>iFd02r%0=4w%BcndE$9u+QPR4p77i82k@3~{$ZcX;mSFOJ%V)!n+8V& zyCQ2H^AQ#ime~yZ{$~Bv_;=|~_^-D=cmLk<=f&?Wzia>W|CRmA{AbGVPrp=tz5LPj zW8sg7KNNnp{7nAU`1|-D#eZ`DyZ)Opykp>GbYMvNul)bzKfZs`e;@ww{9Evs@$ZvA zNB^Gwcle*~KlOjd{yF_W^3UR*-M?M`UjDoMkK_N;|B4JH4Br^)81FD1W$a{(1-Hv| z8Iu0X{=f5Y*+1KVZ2$iJP5L+a-?o3({@MMX{eQ*(CI7eoH(>~1XkmHG^qBP-do9~B zP7RKe9QVJRzX8CvFpZ~v@|40AtW;9`BW{_bpU-+O> zlHZ?w|Mes1=kXs(KZ}1p|GDVrtRFf*Z+`dxmHl)2ufM+nf1CdM`n&&s>_2vyDhdts#d~Ef?0Hy5W6s&U?#sb*J<`@j%s#$wq~Y{%+px;SY4U@ z8Fg7SSgILs{7w1G!_e|?-tX9-2Y>tibN{>M_l(~Y|2h4i_Sfal{y($-vi@QF_4udK z@7Uk}e&+ua{+;%>^q<$ii~sr<1eipbau_WcI~k`i?qj&~zy1IE|84)b{B!zu;9v9q z=Kt&dP5HO-|2BpS2Fw2v|3w+LGe|L{{pV-UW~^cS!(hi4!gQR8lc|956JsXRZpM5D zA%=8@AcoTa$NusBkN>aw|L#BL|GocL|L^$!1SzV`N<^CB*XZgVFttN|11Ch`Jc?-%izjzh9RAC4WknyD@F(|An;)h>=6?V6E#iCn_cPzizfb#a_+#pi**}tg zi2u0rUE;^?@A==;zUP0B`M&zQ+4t$+k9^nvUixFf4GGsCMGL$oH`=9df_+Q(9 zo&SRV>i_BeBld6ZzsA2Jf5ZPB{onC_>Oao^!3?t)uKwTs|1g6eV>83S|JDqGjAt3x z8E*d1Vc5iA#<1o8-2a{b{rv{kQ4g(SM5nul+yvUxT5CfsOe9O9#tm zRwg!Qc4>}Ow!f_MZ2wusSZ$eknbH}(8UO!Z@&Dfci~lnim>3Q+tY;KsQe$Fedd29@ zw3{)LA?JV7zx{u2{_*+k{AHzPW!_`riA!<;VJ8CV$rc zo%8=G(`UAeJh}oaMHfh1krI);A`>NhTS`5{IDZy=&;P#QN5W6FpY}gXe_j54;ZOEo$$v-wrT&*-P-l4a|LlJThG+l3|NH*; z*IrTi=A2gCOlUz@&s`P}#U`=>LX z8a~y2^7#DV^O-L(Ujx29`zHP)-w*0zwQ25{AK(n`p^1b=>KmF z{EXfVa*R8e)-Y{ic=^AGk&kJ?f7!o#{x16e>!1H0?mwmfj{M*DPw^igLo`z<(?!Om zOg1ddELF_Em;{-xGc9GD&+z=e&wq*kum2tRm-DavpZ5Q<|BC-V{_Fg|?LYJX3I87a zi~aBY&;4)OU!8xJ|C;{h|IPoW{omyO_kSt>FaH1hzxTiP|JVN({&V{`_wV2TlmDAC zKV|vCBFd4;xt2SZ>jQTK&u-p6UMH?|oD!U;*cY)zFmp0DG96+#^ndMtFNPEUW&d0J zd-Ct{zl^^tfAjsy_!<9w^*7Zo{GXdXg?~KzVek9W_fy|neR%L8`qP6?ZeO;2vHBYI zZPPcNZ<*h;zuy1)_sh1=k3T1Wbo?y!ap%X`pL{;v{kr$d$?qF~uKUIDf6-qlraeq% zY=)d2JnIG1L_MUUr9R30mbH{Gmer9~l{qPMOG;k!n4r0Ep3p!chsMjKeB%}{r&kj=bz^Pum2qWb^km0-;}}dzwZCb43ilx8T|g&F|1`& zX4w0e<=@W#rT@SFVgB>=uf_lOe-HfO{in(h$uQ%8Aj5CQDNOenvl$OEK47$D^kDG( z-}BGmpWDA(f6xEf`e(vlnSX2l7X5AdH~D|d|CIka4E~IYjL8h`42_JfjQbeO8D2BQ zFmf~cGk7x;F;p?U{=fbIn*WFXH~;_o@5;ZE|7`y+|7Xvz_rEL4Q5GxqM9$yrsk|S# z3VCvQDtJ2ha(IHdmvRVjw6Mi6yEEPRfBL_|KjuG6|2+8Z_^0akwqG}Yb^NaVwd3dU zAKE|ed=L62`1Rc9_Rq&Y>3&l9`254C4dLdb%YVKn88C+T1i`W-1CNdjxRI#o3zx=N` zvl;8$|3N=1eg`p3`1k%t!S~GHPJd_oO#Xi9$C6*ae=Ps@@%#0kqQ5@=$ou*8*NxwH zf7t(S`&;;T)!$S9>i^sQ6aVY-Z~y<-|GfXY{YzwEVX*qw_RolM6XVl=2mkK=AIDhr zf8(F@zs>*U|Fiu)@Tccr%l~cvvi{xquf*uh_?dx~F_Cd8<4VTo47dLW|KIX|%m3hi z!GDkZo%>Jypa0)oe;@qQ`hVb`(Ekz!PR3S-W`<s0s{~r6R|L^SIoqr$x&H8Wke+45OLpDnz`x&-1-2b@jd8hG) z@#XUB@a^aO%kzupCD%`m2W%@@4l!jhX#acq*ZR+=Ut+({{qp(s=U3D(^r2&_%x{I?lE3f% z_Uzk&uae(>e|Gvj=X1u#-Jd6X-0?}|i{F<^-+q7l_w)Im{r`fPdRYv3Lix%??}~Rx zUX}eKV=J#N?JZp{{Ypk&ibKR-*iO`0w1Kak)0@|TKb-q1%LP_f9v$uimVSn%YzsLL zF@OA%_3tY4Oh(7wx<9%9-ut)l*QW1$Kiz-F{k;86>c_>O-+!L@5%4qg*W6!>zt8^` z{v+}G`>)zR=l;C^HS4G8Z;ro3zaRgc@jLwAzQ1|D{r;r>-}rySpF_Wm{*{4Z>X*?U zm47~elYa00z4ouuzkh$H{8MB|VEoFsl}VL3iTNtC2FnxXb|!A7DNNo>hZ&Oo|NlRY zVf+6@{{;VgGn6s>`=8A4j`1*4CsPhn4D)+tALe)_P9_7U4UDrGPW&(Uuke4uzkPp8 z|EB-V{OkJH?r-qlGk<0NG5&k_x8R@j|F-{1|0Vx7{kLN*%qxy%59~wVg{7~`H_9NG4?k|78-1s8( zE$kcf_nqImzi#`s|I4kf^FB@aJns|NC%I2QKc4);^ljmH$)72IGX6<2F|br|?c#ka zQY+ppX(YQ=RzSf}#z&@2rdxKC)I$+RVFA$u5nsNKoM(7u^Bv=|V)bMT66rB>%Plp8o6NpC^Aieuw_*|1td$_DmT}_ZYnyPcvjQ zurMh8Z}}(xkK^Cuzeaz5{3-bR;P1=7m;M(03;TcV|NH;a42K!~8Iu_`850@zGU_v3 zW@KSH%BajTlj$DYF}BBSo;-#e)3`f1&v5?Ze89oQ8O@f%K941iS&M1O|K9&{fB*dc z_dDPh=kJ}rmj6=!{o{AsZ?8YI|E&A1`G?`R=I`WR**}GTPWyiQyWcn2uSQ=uK1Y5w z`ZWJj`lrL61HNqg!u-wX+k@}n-_?Iq{5bhT_GjRC|nn8e?RkLj(S!ZhD?SW)*2Ru|8{?O{7+<1`4jMy>(}m| z-@fho!tt%>8{^kAUv7Wn`r-9G`A6~Zdw<$~@Be+|9}B~oznOn#{BvLs`X}>e`rqaM zz5a##nfLeV|IYv0{@VR(VR+53?|;;PCx-P5um1b~kNO|~|JT3A|Bn8v{#Wyl@qgt1 z@BjY)6JcOtyv-2Fz|LsI6v8x}QJC>Nqb*Yb<5Y$ihJ_3q4BY=G{^R=}`~UmDUH{Vl zn=>>rWH8)dn92B+k(KE$qZpGh(-lTF#?=h+43q!2{%iVs_Rp$6UVk3^p8Nav@5Dbl z{~Y+^{a5tg+kYPa+y7fIh%zo=^kB?o+|TIBw4E`F@jF8g<6?%>pxZZ?WEeV`=dk86 zNpK~yhHo4{hjeg)c4pQ6TW->X!xf5o$>qjZ_mE5e&_gp&4HOpF)2>|DX5o{h#aqiWyB9>i^3BOJ(T(fAi0&Kg<8!`FH5g z%ioEAb^cZUW&FF~Z|A><|3v>+{@?YVm!Y3wF+(gvBEuU73&u4Jx(vMxc8u2Ew0 z?_~&P+{aME@SWi%qaIT`V;19m#vhEDjD-yG3|$QJ40ZqQ|5yLt^1t){{{K=8CmD1Y zr5J-5r!q1!eP!IiIGwSC@i7AnL&EF$?Jn|#)=g}XJe@^}J>W9J4>pw((+W&O= znei+0_mki0e^2~lV>rvSi}ftqBfczw6k!gLm0}T6suEMgLnOY*h)A`HXo_4G4;T03 zU%_>b{~Z4Z4qK))*8QBfSa$#A`G1A=JoA*l$$tcyB$;FXz4*h%P|UdR-|s(*|FQnl z`MvDt=3i@n#(&HHruL)x`>HSRzFzz};g`#gA3wVPy8h?*8}ujYzdGZ+f9`*m{=4~K z^`FF_J%2v`HThfpTkiMj-+z8L{&xDk?|14Sx4$d@9{n5iPv-ys|AGt(3^y5WFs3nG zVcf{5#dL8ogr8DI*{bCGdyu)yT z;VgqH!_@y7|EvC|{eSgu?mw3Qw*PPZTlw$6KbHT@|L6Ym{nzks=D(PKum4W_d++bh zzv+KJ{t@_F@Ymw+y+2R>Z2If{&-UMvf3yB4GhAcPVBF0(iD@;{45kH4cbHmuuWy($^D=GE_W@b42LaO5SJ^* zdbU$+S6COZS}`*-%w{V4fB$dx-vhr6{^9uJ{xjs4%kRfOU4O^^{QP6;@2x*){=EA0 z`;XnfPW=4;BjZQj4~6g6-&(&~e<}T{`{nEB1z+ZUDg65JYtT2H?`l6He#HOy{A25n zbw7K4y8PVqQ|u?-uN^-={Fw5i{zt^m*WaCg9sjBDN99lP-zWb#7@}AVm~FX=cvS_2 zgw6@6N;HWd6I&!6Exku3M@(BZRpOZBM_~uvPQlkg4ZJ^Cf3T%uGN&`Mv#e!4&h(D)7UOG1 z(3!Dk7^E398ABNb808s@7`qsS7|$^LU zVG+Y$h6F|qrU<6vOqZEVm}fJ8X4YeQ$$W!(Df4b-CgwFvs!ZP*V;S!-7%&(!B>b=b zfA(Ma|NH-p|DXK#^Z#px8iqo)K;|oKm)S$vDmaYT9N8?`3)!Vv=des?4P=pHX=Gf^ zAjkar-`Rf){=WWm>aWC~yk8Z6GXG5dwe+|3FaO^Ue=Gkw^UL`6uU`*;EB%i8&Hek$ zFUemsev14I{c-C@^>^Rzjo<6OZ~NZ&eeU<^KR*44|FP=Fx*s!tO8sp3>GAXX&&*#J zf0q67{iXdo>sQ*Z;$Mq@efYKF*QH-`en_4;rO8@Tsb?~?BpVFV=KSY21`Kk4N^Vf~vb$)*N z*7a58yZleDA1lAj`(E|)(od0}bAEdLUiN3>pKE`N|6Tr{!XV60!Z3}|fawUMCZj2% z52GIA1_oCKNd`^^euhi`Q~z)HzmVZLLpQ@7h69W(OnOYlOh1{XFz;gSVxG!;fcX(K zC(8=vV&-P%Y0MhTu1q{knoM&UTNrNsfAoL%|6Bj`|2g~%|99oD(BHFvUjLc*=j-nY zzc>Fr@;l?V_-~irUB5ei>->)Wee$=@pCy0({;B@^_wU4i7yhOGuljGmu#Djt!+i!b z#}7g8{?CfA9YN`nT)Ptlvlf)cr31rSr@1=a-)ge)j)R{h9c4{ZE5mDZk8q?ftdl zSNSio-)_Hnf7}0F^6UAp+rJ!ti~Qd7tK*mAZ=c^bzfyl?{;K*__G`mW&0i_M?0(7r zTK9|Xci`{KzgPVZ_*3xr-Jjim!~fm>oA42a|6BEU^`E*wQ~!qjyZ*P}Z`r@c|Ct$z|DXN; zgu$P2B7-}_H3lKZZwy}s^`?uiVw||=do&KBr&;9@Mzb^;*%rnO8yOma*=7!NbfWNc(KWjw>s z!(ha~$MEIV<^2o(SMks5pU%JE zf3N>@U^HR8#L&(7k>NZ;EyJe&D;c~QpZ&l0{{w>n<4nd+45t}PnAS7eFtRXCW+-IL zU}R(nV>ryP`+xZV(0_0Kw*G7VyYmn8U#q_h{^tID@u%|dyT2R%UjDoF@3gx8KSh5I{8{?P>yOGG%Rjk)%>Ib{S^OvbZ}MO1zYTwH z{k`z_@?WXH4S$dRwfUR($L`O%KOTQx{_gl4^Lyd%(BG=R!+#h2{`C9*@ArS!{gq|# zVF6Xg4_OS^1K8)Y%wvh=u;5z3wuhyFvx4_B*G9GjT)qO&dEGhpa-HM9%=eP>GABP@ zF<&F+S@sOBlU!=-t6283?qhw({DVo9*`BGH;okqH|1s&n||H;IrYc-ALT#Ue;WR<|55#u^;i7QsGp6$T7IYemi~R{ z_t`(ve>wjq|7HA_{%_{Lm;Z$STm7H;|G@tV|GWR6`+wtq(SMWwo&Oj9_xS(w-_L(q z{~P`<`oI4_KSLLT1>;=CyNtq2lbJR$tzwoEgvHvgsHUE?Rcj0g0 zU;V$Fe;NO3{?+_@<DD<|Ly$S z`ghr1-hUJSt^K#|-`9UZ{}=o}|Nrg(K!({2Jq+Cpvlt#R++(=K@SK64(VS6~(TLHG zF@!OSv5s*K<9fzDj66(XOpQ!Sn1q<)See<(Sfkjcv6iyPva~TRW6ELHXX<72XPU9 z?z|NScd<@HPb*TJ7jKb3#J`LXjy;19MRra#{PVEWncgW;#gFM;2W zf4Tq8{G;$!^)J`o2Y(O#+x5@?U+2HJ|4067{}=hs%&?7NF2f`S4aQ%Le;At>Co%qI z+{5^lp@*T5!HuE&zwUqC|Em94{$Ks~@t@ZJ;{WykEB_z)&(C1RpvDl-uz}$s!y$$} z4EGpLF)U{&WN>8QVR-ZZ%l|w7U;cmn|H}VM|5+Hk8Qj4*i=l`ii=l=gpTULU>i?Yo z%KxAL+xc(Hzg_=M{d@TD-oHElWd3{nkNThVKjMGM|Cs-_{{#O!{1^TI?BCgc&;GIf z7yhsQKjZ)U|9}3=GI%p=V0aDcT{DI;W-^8`x-i-^S~J=+YB35jaxlh3PuuB1SjHG{zr{dl_dl{$f;Rn$DEL zl)_ZRoXvcnNtM}&*@yWk(`Clfj29ViF$gg1`Jeiq<-gLuPk&AR-TJ%a@BY7U{~r2# z`%l^*jXzg@Px&qW``fPzzb5_i`4#=E>Q~S&?_YtxZv5K)E9=+BUxmN&e}UG68UM=o z<@D>}&mTYKe%bz#{^j}0^4Hd1TYjMMGykmrv*eHa z-!Ff&{_+3c$nc5fB&#ONQC1^%UN$zC1*{i2R&YFLli*y))4XjQ()^ zQ~I0pE8th_AGtp#f6D!m`qTJ(@(A(K|^!S$DE%Ne@*#y@mJRGE58l@oc|;HSLko$-#>qO|7rc(@Q?Zbxqm$WyZ`6@=lP%c zpNZkf|Jwh|49ghO88R94850?AFt9LgW$b4B%@D)*pRt3{obf4R7}I^mmyC;o zasPY&7yhsNzx)4}|DXSJGZ-^CF+?%UU^v9EfnhhpLx#N!hZ%SnZ5U-3x^SsASvk1~8_OlPiPn#z2Mc`}nEb2!s+#sy4`OlO&9GW#;`V7|lL&Ya8g zn%RZ9h?iBbcR#p(e*3Zg$AKTGew6)C|Kan)`p2j5DnF`z?ECTIN6XKa zpC-Td{<{B5;kWPag}*z0$NfI`d;jmc-#LG#{Ym+A<>aU}} zTmL5gz52)EU*G?||4RSKFf=o)|1ZnX!`RJui(waI1=CDMWyY_Ja!dh?28=ryCo*y} zrZaA2WM#a~P{`=RxSfHOF_ZB=Lj!{~V;SQK1~x`+rmc)y82G_`v*`>tGNdv5`JctGn1P?+-G65WUWN((H~;_qKjZ)JfBye#{)5u4;Qx338vpJ3 zC;I=%zkUCH{ImWq@n7nH?*D}Uw*RO9fAfFm{|WzJ{kLNH^#Abxum2DHZ~8y)|Dpfu z{vZ2q%wW#&`H`Z6*wsxwwI_A&luy2qr;9M1BK=^c|M zvpCZlrYhz{=4h7NEFCP8tUFl_v)o|`W^QD@#e9S5I@5e6JthOD7RELPB?k5Xp8qWW z+5Fw`$LG(r->-hp`mOZ)=CAf&yMIpj$@g=~k4HaN{+RhA@`uU~$sd2d|N8#wgOW^%)#lj97Ote_*-I_K;1J^&y)FR~AE@o5N57d*{3kB5U)a*}j*2i~R2JW8wFU-%~-m zkA6J=De}kVPr>g~zuW#?{$ud(1!y(OKb`;VjBJdu3`-bd7+*7}GDG96>AVUl4E zVA{yo%M`(UhbfRLlqrSj4bvs&Rm`qTmlzK-RWJ)PPhm=AddgVGIE%51k)KhDF@W(5 z!|(rE3^y2NGc5bh@c-Dq`~TGc^ZXC~xB2hyzp?)W|1J2t>+j9K7ythGEA;RBUygrS z|I+_){tNxb`+xTTj{iyjC;ew*Sj6yy;WWb`hCIffj0Q|%OfpQ|OzKR388w-1GfFev zWL(84%BajJ$r#6YgrSDPj3Jt#nn8x)&i}OkTnst?|Facxrtod!yv88SbeeGvM*~j; zrzE=y`vsP%Y{pDx|9t*m`@fR;B&##i%Kzbi+`gawIrn$|KaKzPzukW7{yhDI^~d~g zE56u#pYzk~xA8CcujZfUe{%VJ?5n`{kROM?KlvK)b=H@LFJ50hem4B7^dt0V(ofTG zQ$HX7T=G@w$Ht#wzw&-s|8)L&`Dei|tKTAjuKn@;_u%i-f6M<@GI6sRva$19i`z=M z37WF6=k=1jBU>!BOrk_0U!qCmu8@_Gw}8IDNrC5r>jYT1f3o~x2>!o{DV$A^Z4HCS z?}Xp>e_#C0{Q36hjz7%*_Wk+#JO6jokM3_--wl61{G0XH`nUBDtM6BT%>5<%tLf+c zUy*-3{#O1L|NZyZu3tNU+y8&aIEz8{U&mk0f7||bFnnPYW1RkfBg0chaR$AA+y3qR zZ}xxje?LYS2ByDOe{BE7|F8I8%^<@F+KK&}A(L^-|7ZU>m@=5U8K*FrGH+n;{?EZ^ z#^lQQo?#MWBEy+~TmJ82nDw9czYD`zhU5RM{xdKb{}22Z_P?8fpTYir?SJe4cK`DK zIf8Dd|NG}}{=XxCKmC#ZYy9`sADh3z|3v=z{`LL)@o(V2=l?GK4`T3VSk6$!_>A#5 zqXN@?rc~x8=1a__%(IzPnU*p=V9H`*XFAL%#Tdch!tnop#ecT{1^=7>8~uO!Py4^( z|G5l1|7rev^e>80kf|Hg0%!QdmdBpXwU?uw)s$@=%N;gpu5|VnY`JV<%o!{?jMo3t z7#98aWW2-_@z3vX*RSTEYyZ6cbL}txU*BIFfBFAX{dxF@{*T-rvcFdUtp8c`W6f9h zZ}Yz<|6u?5;QRjXLf<*RH+=u|b;h^D-*$eV`}5yV&7aeMcz)yip7&$!&y?QOD$_T>l~JFmIJI&EE4~f|J?dJ|F7weGmCL=hvU#OaI9JY4|ze$LgOAKjprweV6&I{P+9sieJgUntopUzWL{= zzv}Oa)A8Oe>g9GwCrt z_#evfj-islk6}B54}-w}1OI~l&-%CG?~A|F|4sgv_b>O~wZAw1?D-S^SO4#uKgxeY z|DOHR`{(B$=D(-@sQ!Ka*Xm#Rzk~n&{yX^3`oA~BUItahO^lnEl$foUH#2`_p2@tF zS&=1!#f#+~^L}Pi=2=XhOae?zjMMU7=8^FNjrmJ6&)Sl61W%|j;!BoI#&h(oh zhvDe|o_|;VPW#LG7t~w6{QK#z3qKeCQ2%l8yX<#|A5Xsj`d;{b^7r!Z$G@e1pYz?| zNA{1pAF)4~f6n<~|8v#P`9E8JG5zZJ<@0Orudv^;zYTvU{g(Q}@~7_4=RXtwWc}Ip zXU8AKe?@=S|1hyd_-x zJTCkn`Hu74;JLyV!842V9>;CYOYB?N>ezR(e`9fE{=_taG3S5r|11Xa|1FWCn4DBmestxEX60)-X(EENAp(n9s0^QIRo$L7nkEqY>k322Dm+Mt25Th7}Ag z42BE`816AJG0gq{<^Q4o*Z;BqKmNZIeERQ}|M&ix{hR-9-@o{O`~G$Oul&#TU-Ezc zf06%k|M&j?@c;P#U;ld;rZBiM_%n1eI5I3}&|^$x3}wt@T+W!ssLg20n9dl;n9X>G zaT}vD;~|Eb3~mg(4Dt+O3<3;X4Ezk+|2O^52e+y>{J;JG&Hsx3r~iNXKaoM6$(?Z} z(>JCgjK7%Nn6@yLup}^7u=KLbV>V(@XMWDKo7s)Il-Z7D0&_C+CZ+<$5XMam3JiS= zFaA6KpZ2fv@5Vpoe`|{afa@)2|CZQ+~4i)c$$<$LAlve;oa>|Hp~%6TV;k zHt*ZYZ@<62`*!u)>2Eu~E&bN_P2_v)_s!qmf0z65`TN)J7rtx#nDe9k=enOeentE~ z1G+W!|5c`&jN(i?n18WsX3=H4$#stB7S|?jdA?(OKY3dDZU}MQblSpPD=WxB!~&8*LG=imPSd;aDAS@q}d-^qWH zer5gs@O#nECEufe-1)KZyV>`?9~*wSf8Y7-+qc4R7rshNG{-4snC;v_Ocle(u!zYGV1_g$>3^N!)8GbPoF*Y-P zWn9b@#&nF4jY*s77UNIG=S)1z(M&B&lFSR3ZZOI*&1ceNn$5_{WXsgTD8p#OD8bmx z5XRuoz{bG9;L8xgV9cP%aQVO4|KI;=|Ihoc@jv?ip8uu)|Ni^>PxAlnf9?OY{-5}N z`#&=S2gB9>9sjrgmt$~b$YJ=(AkX-O;WEP>hEj$qhUW~XjFyc57+x_fW(Z^`VrXXI zV|e`k)qfWT5e8WX1%`kBTmJJiI5RLZ1~IK+3}pVqw1Y93$(BiunTJJ=g_pIN#hfLC zMTq4Sb1F+UOCZZ*7JHWG%;C)Im;{*I7#A>DFzozq|9{E9%YQfhdGovfm-w&uKQn&n z{#@{5%8&cszkb{NE$ExUH|KB4-_pK4{<`Ap<*#mECx3nRwfXC@ueZJ$e@p*1@7wfm zcfY;==Jra+d{EU#JSaOCjl z^OSRQ@Xq4X;oHcoC@@LLT-ZxkOO#n`k;oCD>q7j(_XRHqd=YRGSjlI@`;fbuGl0E= zt%Y?a%RH9JEN)Cj47Ln<47dNz|1)+U zhku9u&ir@hzX}68LmLA-<8p>*hC+t93@;fn7#A}hVEoP~$#kA^38OGm7Lyy(R;K;T z-)@>hyM&g zspY>0gE@mILodTFh8qkjjFTC6Fuq~@%6N;hiLstBnlXqmlyMc~ImT0r7a5;1+Omi< zuVlW&qRjMz=^xV~W?ik&iza@T4_?Gys`rDarao_B| zJ^yO>HR&tEH@$Su!vkJp&bf;WY)OF&U5Tj;9rPEl9U zUg0vK452xK2?E*zrUE(qPJBwdBHZ;H2JBL7A6eX4j>{go4+&u`1_;c=k}kxzt8{i`78DJ`QPq;GXI|ZJ^k;M^D<&SMm0 zie!ps@?uJ7%3-QyvS5;95@M2L%3<2Ww2CE^MUG`6OCeJ)lNPfvb117XYXR#P)>xJX z76z8(Ov%h9Ebo~=v8-o#%51{Cf-!;dCc~cp^Zxt%H~7!?|MlOjKl^_#{B`T6{V%y+ z-+ng#)cC3PGyg~XcgODszb*Lo=^M}Y1K$pREB?mt?a|jSUq654|JL*E_qTc9>%P~2 z&;0KDJ^%Z~@4vq{|9Jc3#*dVr%|Atd9{QR3OW?QNpHF`_{%iOz#-zpq3aR7F^I4=> zE!a7@&3WW_KJgUt&EcEEyN^#zz(w%3V2|);ktC4=LMsL53GNg~;+N-t!KcTk#2dkL zm@AOuG}}_v7M3l{UziP{AjTxK&XaC*sr|XZ^@2p>(zkPmx{Q2a^#UGA8{(t-Q z^~YD1Zy&yTecSl0{@cEQU}m^_#ynGP||VDx6>U_8p8!!YxI3?VcQ~zJU@P?^?c{j5+%S={HwwY|EoCZ7+JRLk&d4KR7 z;w$0%%zsh9O>nKCg78`4$3hDPCkqM)G74uN~j& zzD@p?@-5}tvTuLBrGKCJJ^e@R&*ERnzwZ5d{X6+j&L5$_cK^2gTl%l>zZgRxgAGF$ z!%K$e4Bm{bj6WESnR=M^GA(D?!E~8vHq&$_VdivZBW5S&B4%%9W#&UnkC=Wi?PNN` zB+LAtX)V)CrfMcDCU&MXj1w3W7}XgW8D}t+ z8O}5KFkWW}X4=Zc%jn1)%%sia&s@N?hDn%tHj@Q&Ci7h8ZssM-bD4XYo0tojKQY%b zZ)Bd%JcGHAxq?}b*_inm(=nzKO!JvGF-0-OGO;k7W~^q^V%)~CoS~f|g5mgo`~L_3 zh5ftwH~g>K-z|Tp|GE6z_xH13n!jHDtoj-I^ZSprKUVw*{$cau$@g2|E5D!mUhqBh zd(U^*?_0jNe^30r^1J`{y6?^3cYF`}q4neW5B8r?Kf8Wb{OtYt_2-VC?|z2=YW>yu zTkp@TKMenh{s}VhG2CQ!XFtq7iKCxAm8*u!l{{0A9*u~g&STmSkGu>yp!Ep26*S~N7UjKXZZ}s2M zzaf7*f1mx8{@eT4q~GFyp8tCKNAM5RZ?|8!f1dw2>F0tUmOpEMZu_3`L*&<)pWeT2 z|M>Oe=&$-;4Zq@lnf^BW?e*vUpXNV;fA{}6@`vf4#sB#K!T+8AvoIJjMlmEYbTj5K z2r|B8tYWHSI?DKfF^lN|QwY-}rfjA=Olz2zFqbhGGOuQu#x#fN1Jh9^3#NIDoJ{ph zd5jT^cNwNIq%#CEa57x@&-?$`znlNM{>l9d{d?ih%Rl0OIsZQYQ~D?159c3~KVE-A z{^yX%=}F zCzd#t=`71xtXZBhFJu14%*?uxrIA&NwSaXpt2yf@)`hIjY|mK3**~$pV7tKP!}^|8 zoAoZs0+yxBh0G6`jx)+JZvOB7um11vKbn6Y{4V|V@Mqx9#vdv_*uJm$7WmEJoBp?1 z-=x3Y`P%q(`B$T_MPH*qC!K%W_;usAyp5zP^07W*u*O@dc)ucVTcmIx=G25&cSJkKvqO%7=eUAB!Z!YqORAN}6< zYx{4NKYo7|{^kGW`Mu;%)4x-{KmQ2-W${<%-=Dt={$Kt7@Xy)b2mbK=z4vRv&m+H- z|GfA;_4oB(cYa*{uJhyYk3By={gC^0>(}bv?0lKkzT}U&6nte@g!~7;Z7lVz6R}VwlCi<kk z%Xph{KhtXFH_ScE%*=^QpBTR}{$$+D=*cL|_=KU1;m3d7|K|Vh{N?|r_D|*?@4wrB zSN*;6_ub!_f0h1f{%!et>aWMYntw+BB>(CE3;O5$Px4>jzm$KU{(1jz{4e<5|G(k? zw*N=}yMb3LZe?_0N@Pl6n#)wd#KNr1JdIhG`44j)vlELWvnxw5(-vlT##0gUH(VP54Io1KX`s*{Sg0__3hRdk+0LftA1DivE=*eZ)V>+zVG>F@~!oo@(<&m z6MyCZTJiJtFTS6%fByUN@~6h{>fhh~-1)QW?~UJ`f1UrH{rC2t+J9xn28LTqg^Vv4 zLm4kHo?zl(In8p1x_G5+~~{9pXPmOoGbJo=OQxBTCWze)ez|Knun`Tw6`CgUTR3m|4;ni^q%K?^a%oWT*EG(?mOwSlkG2QyV>Hl{|AI6>ktQo%l zQTn^<|Fl0ze--{t`hENF`@bna1%5yK=kfRLAMyXo{;d8t<4@nOQ~!M#q<&`o?`HV% zlacBDALri-7;pVk_?`MUoN>;N9lt#o7yRS=XYuL-9KNJXa6G^k1;O$x0|_~v6t}z3<^2I~MN0=`2tF zEdDo)f68!~{rU0dDMQNN>3>1D(tlxH&am;jk-*2Vdw;fZ zTmE?QE1Nz2pX1*Urc$;U|M&d6&35JQ?BC2Rmsn4K3g={EIQ6@j^TV$M20x}cw)Ee6 zoQ;f+eyw8@`Krpm@o&YSn}2`(-}Gnduhk5RKkNT-|Ka-ek;U@+)<1o$Yd@H?eEYxP zj}emzvp&;@-}_i3{;gnO{cp}1!7BIf4(CjUnSZr7bAF{TZ)Cp9+|N9L;X2>bALT5| z*bRSIuCmt|x4Ud{fFso_@*V<0=v&)@&oa4G-vVwYnr|Chkp&bWZVor#Ze0cZ1% zxy;#|@jrzA8t}{iRQSJ%`{Wm9)^LtpUsiKGVp{UKnakqm(!ZZMru@$M^PaEi)6ZXO zY#)9Y{yoF`iply%6zk6)Ilp?D=KbFLyOQ<)_ZEhe|5Ja(|JVGtgK`|1{>F zKdkKg{xSZv=3)OQ`lp{+jq&I2WlVewVm}WuFJ^f8X}^%gkC$I=@;&&)_4_>2!~fAg zO_|sI;OE%>yWp1{!Go3Fc=9>mP{HHFuGl4F?2f8uik+dMXhU#l46IhOyJ&FK3_ zlZlsc{-1W{$A7aJ&6#EYzWFW8d+G}_ODT)Q@2`JS7zG(m|JG!E{L_`ygki~7Hx4_- z4_{C482|eFO^m1Q=bE3q+<(6<`16({`R9&*HB5*9Xa8nrGynJRzwJLsHkF?{e{bM* z_?*G9=zrY5Ue?E77P4Jo3IBDIv6y4guOGjo`AR>pXP(Gb`df_Y&A)T3d%ycJ|KTwC zxP*Bx`{|#)zb|qh{I&CsJnOzs-?>}fz2|lP_UUgSi_6cu3{`)ExYoS?$Yu0D{Yy7b z=eNY4SGhlb_{`qKiTFptoSSY(^l}qH^;AL z(lzgLS#x;!{!U_c{9DR=km=bUWkxSn(O;?m|MS&+P+`5t@$^^jKOXkqj9-5(`n^=} z+Sha6UkR~&e#X4)S0Y#L-`ZannXYgsFm(Qy%y;$kab^n+voFj{C%7`dzWVi)JNJLY zmwC*g95=r7{<+G&>VM|P6>QtsroUgsx#CCIA2XH}KZRK5eA~s!#!&d9;NQW2p8t~n zuV8rdcL$5}&wR#crtp7K|62Z^`_Ia-fZ_a)&;Nh1`~SZ3H;?V@Z-IYdET7n2Km6cl z`uFzpDSrMx`~S`Se~>-&XW{=?=HmZi|L(Gx{CV(Sii_)O0-Nvu%74l1=YL)Oe~xp* z@7cdMvTymX{Cmp(xvV#t_xwoy@l4S9lgp1U{O`Xu{oTmJ`L*ot4x#O@|L{&{%>CNS zuKb7duNCW_|K`8WvxR?;WoP&i&pz`{(We;#*T2mAxrkrp+s=QQ-2GqF*y~t6{rJJS zfN}Bf3rt4;omj4XWoEbH5qKjd;P!9Ems-vz{|^1MWB$RZ`<|8CjoJUxcJ3Sh{eDIN zpT?c}-Slq~m)*Auf9JBD`enpq%MtY@kiDH}<11C613!d*-{yDwOKcC9$;<%8o(gHmc$bDVHIaP zr{>$2Z0UcOGBf?K{J-nlB$i~xh|lRf(>~AU68$yp$3E_N|J1)|a47PKzx3q%&)D$x zGw&P5f)56q0UZ6`r~O3`%{Vm`!{+<83S={Qm7!#-HzqfyWx{6i*diYjPlJDQE*Wx1ee`kM+ z6kPLjF3X$0@Bbb7DbJL|aQFKh_Vqt6{L}fflKuBD-rwul_5VHm%gD^nD*f#@J13Lg z*L?OO)>WSmGHdbYKl9+5#p?C0PhkH~roXEhnEspnKgV?7-}kS__@;l?{1d`{`TJqs zq#qi;PVim&^ZxrkrgqL}Kezpwz^VE>{0~28;y;mJ++2r#zxf%)A;rS_&4=ML$LwDj z3?2VG*bn|%&v=V%i4>$hp=6~|xGh;f3VAeq6>I_2bjG`vO0|-~8q&?)~}3?>pRWzn}cE z=HmZm!u|7G&z~@^soy0Rj1m7^e@b%5acutZoc9Oo ziBJ1^PyD?4y-+yio100rg-*QZx?WXX4HPkA-Vb$D~q#Wz#}cT zT7g~9wlYrVYx<(gpzz0;{ng)?_w^#xzm|Sw;*a`~^V6E=?Jv)td|XD~Wf^U`GQZFM z^GoRE=hXk^T!r8GSiUh^{#wSE$9eO+&;K4C%U?f!S#kw3)qmZ{6vw9efAR0Mzo)o( ze_!~mz^wa!*-uTj_h0HbH!^(v%)t7TW7$vkPdBB1zdFyD%B%P8K35xi&xcs{Zf2)1 zRqSt>-9B=0RIzUS^p^YPkA8;rjMhJx`9FR#_*5u8?`y}Ww?bq^I-`SYzS=RkyVLZxy?z`RZ%fb`h9AKB^YyA|#ev75`mjL4&rh;!QoDaCZy=WBZ zVz2q!&D_cE!u(w(Yk*%lhB8Y%_n${PSdKVQ64r`xV2x`mfXf z9shQ)T>Lrpe-Ed__Yc3$^T+*+`glOR;m3o&>CCx*G+DNOTgANQFUR-goXU(jU#eMG zaIkzg__L4q&?ik6ZP02#_F10~^W6RY>)T{@J9g<$#w-swjlY!tUC5rsQu;;x-wB~< zAGiK=5dQr!x+`|tO%Y@(dUzPO2IupSN%z5nZxA&jf3edPuiO^ zJjFjWSi@QTKTGr9`1SMm9FFV11AorrabR5Z-k?x+4uZ@`%{@~>Hno)O<65C zHh=bDd&QdkGnCPt-S_t-<`@=*pUey#Y^S~_GEHJj`(nz@$h!FF_ka1E6Mn}03gL|U zt^0coSHtfQKM$~f_`USUWp0)q7XLqUF#Qtxe}!4`-@D({j7bb!|5$#Dupjxkf$7D+ z)xX6Uy;+UE{bu~bmipa+xr$-cf6LzqY?^=lnU?+YW#D6G`}3J$22;d8ZN_}Yz(3cS z6aKINC&%!cVb1Tp%m)8j7?(0l_^rg;`+qxw8$-r_)BjJIHGU~Gn=!xswS{Tjf0KV{ zOkE7gf2x?p8Lb$WGrald$8h)WpZ{l=fBa_qe}<{_uQ|ha#uxu87}orcXISx9h3WJE z8-G%m{{Q#>x8sjKa|=V^e}#Vv45yh`{>@=JR*>E!?H|9csW|2r_eXPEM@nQ;TdDh7*xE0|IlE&n<&=P^8dsCJpWhy*Z=?G|CxVT|6l#H|G)LW{r{i; zdj9|Y|Ks1(|F8Zx{h#t*iedZzz5oCHKh3cA-))AI3~v8?8RZz({a?i3#IW+e0fQ?; z!T&0T9EK_XH#5XA=rGta7&2Hhbp4;qFz5g8|2hm7|5F(p8MghGV7SOo`ahIGgprpa zgn^0Ci(&77Ifjq_FaO{2e>;Q5|MmZ~87luj{m;#i#SrsfjNvT9wtuS_LKs~C?O@pd z|L?zD|6lzV_}}#3li}yTAOAo6xBK7ppO-=If7|~6hTs2I{pVp&`)|SUN=u8QA{sW8h>s|9>&#^M5Lg zkN>;>Kge+Xf5ZRn|8Fu_{(t%3kfHPcy#Fo?OaF8KU&XNKzt(>V1`US1|0Vyw|3CTP z?*GyM=l(1Hk7Y3UFUFwEko5l@0}p725ra3wiT`;F)(j{9doidpurLHNBs1)0U}xxN zSjE84kj?P*e+|Qx{~`>o3@iR?FkJs{!C=E6&rrtj?f;|ybqvq{KmK3LaQFWdhIjuh z8G;x-{x4%V_J0Dy)BlhEr!%bhpUm*+e=@_Z|Evt0408Xw8Pxvw{EuXi`QQ0Jk-^}9 z$p3nVNB^?^M==EdJMjPB{}cb>8J_(2{qM`*$Kd|oh+!WC3xhmEGXo1lJi~Mb9tJsv zs|-#IcNu&cm>9Sj&;E~QFk}4pUy0!(gY5r_4D}2v{wpwaFns+#?|&jgIKzYgx(p(W zbN`1hgfL_?`2Xi%+`!=Ye+$F5|KI*k|6j=P_y3Fkehf$cb2ErB=rNQs%==%>P|Ohc zzk}h~|1bYf|G&?`%kcPL24m`fpZ{AJw*Qa(|Lp&{|Jnan{crp~>wn?@zyDePANk+- zpXt9RgXaIk{|y)z{y+Qg%^>q%f+2!o(SJDxTZZ-jK_gf)3@Qw#|9dj*`ESd>%<%rd z0)rgG`~T7mlNh8J4*Yjx;9!{kzk;Fh|E2$K3_=Wx{yQ*;F|_};W#D7z{cp+O%<%ai z6XVtY@&8j8uKz#$pPAv`f1&@b44MB^|2s1H{ZC+c_1_HS$NwD+yZ$FKO#Uy(pv_?P ze<8#4|407|F|;%E{{QrU4udE7)|5^Ezx}`PUy|YA|K>XpPQkNA^ZOhhU5QT8NUDbX4wA!!hbJ@i~m&_m>BdJwEtT&eEa|R9|r>q zgTsGKh7bRD{nuic{hyzqjp6KnBZdPEhyTxDNMqRXznsC2q2a$jLn*_ae+rDb|IhtX zVtn#%@BdBzkNki8|Iq(P25tsEhQI$8{jdLT&M^D`y#EFaxBs8||NQ@^{~8Qp4D0>} zGe|PnFueO8&M=?h(f?uwcZL{-UWU8>moqH+&&y!O(C~jR!<_#P4FCS${eSTP-Tw>> zDgQMXivC~t|M>rc|9k%1F)aUo>VGCf$$u4w&;O78Kk(n3VaNXo|1}w6|C=ym{4e~U z$e{Iq`F{b1BmX!4H(-!vDF5HgkpF-4e|?61|HT*t8A2J3{8wVQ@?Vu9jiLF!BLgRc z<9|~IS%$Fxkqkfn7yZ{_*!X|S|8xHx8BYI?`Tvw*_y2YOr!xHc|Kh&^LnMPD!)^v8 zhB$^m215ozhC&8&hIEF0hMWJ>84?-({1;^~VBlroW#DJ z{GauI+yC$X`TyVhfBXOB|Dp_+|M&kFX3%1Y{I9|w$FTMPwEu^hRQ`GWvu0xbZ}dNy zL7d^`zi2{|hpd{NMHe z+y5p17yaM-pM$~c|HuE^{#X9r{(r`QHiqv1xBf5tzxRLEe?x}5|Goc*G1UHF@?VkR z+5aW~;~7r=XJ?qqaO(e*|AGwC3>*HtGN>?|`G4;J+yANzDhx~i$1=qIkNJQ6zr_FE z|91bU|L^@j>3`q<>Hh=%7ysvEDEq(V|DFFk{;&G4$N);qk_-j^*%%)Dulvu&z``KQ zz{eoW(9H1qKWGnUF+&}L6@xZ|D?>Je6$1lf7(*4qOa>K(Eey5{lNmNLXfSj#q%-XP zFUzonVd;N0hC&8029f_i8KyJjGraiU&v1jm=YJHV0z(}`D1$wNGJ`Q_wbg%PhRqC@ z|7S96V2Ecp^#4DDJ;TBO`x)#Q>KQElzi06J-^397KZzlbVe@}O1}=v6|JVHwVW|7} zhT;9cwf|TC`|$tuzs3LOF|7YT_5aTQKN)ua6JiMcU&*lee;PyE|44>e3=9l28BhIx z_y6txBnCN##Q*jTTmPSBNd3R~Up5ole@TWkhPD4M{by(R#&CvV@&D^ghHQ-t%?#{Z zJ6X&ARsO5}Ys|6apXi?}46F-2t&yKoPXzjZ2DXBTjX~>>kCHDA3y#cWsdl}{!bpumw(GyIoUlJAN{|= zaE-bC|DFGq|0@_4{(bZ(=MT%j|G#Q}U;4fNx9R^DW+jGq41o;F4CcQi8HG5`Fid9H z#8k~{@k@ijl!5q;nwif0W&J46 zb(UA2(Ur}V+n@C<+ckcF&S>6H!A|}lURHrAJRV$W?5rFM*PTP`>ycg-Pbu^mw!L{-Qv6K&!%4@f42UK|EI$AnOTN;J+mL{B)0Qx zQ&~^2?q=;^ZDTcJ<6`^C{D?81@fgG2|26+Y{<{20`y>2s&;MnNbxd5$)+{?%UDzwx zg*l#bxN_d%a^}h9iRV7WWzUtz$;)BEX2WuT@gBqd|GfXN{0;s6?MKLuitqYgEx)|{ zG~wg(k6NGBeRTS`>7&BO`VY+?V?L<9zwl1s{et&XJ{WzR{L%Zv)%ThoHoZ%IKlT0g z_q87cJ}vmX`b)%D=kKjQ41X#8S@vh$-}?V0j2>)HdDum!$~GwVs$Nn(q9&sis^Ot@ zOiooHQ*n<%t89UEous_T8=fAvjSQ22N`5qZ^Wu%#JL|V*uWMd^d>i>*;r-)xJ3g%V zyzyJZZ)2uI9M5=c`Ckg^i9Qtd5M~k(;S1-r=K08}!V$&R$<+Dp_pgfYZlB}dCcJ#~ zLhR+1m&;zwe&hIF@RP(>lONK*P5#>aU(Dpg&cl70S5x4ZV2W^+aIH{*z(<}SP7by! z%-W0x|8DxF`NQ>_^p}Spx4-}VcFLRSZ7!?2J|C0T|{!QbH|K~lQYroa}Z2P_WZytjOODFqP zPCIT^o_#z!`L^))^S|W_<=x9|#3jM;ie(z3>A&3HF+X{J-2e9b%lpr}KeK-^{!;Z> z{qvkp_df0TT=(VkmselDeEs-s%J=9WsXx|!U;l0Q*XA#;KhOCb{proe%8$$+pT6&Y z*Zem04bz(`Z~foj{xI!x*0-+TW=xgb_9FLWlQiQEd<>d&R_R3O9#prI>kvOB`bbnj z$c^U}hX?y5#-~5*KUuxI_tO5c*h96aFJCEtxc?>l*9C^jY~ML^xnp_u^IqkfFECfQ zPGptH8xanXH3DqhLag)u{r=|o{^Tp~m$5HiJ$w8t_|=YgO&@YTx_ZLb{F7(OW1Gfd#rc!tKYIvA7{^~WKGr|XeazO(Az896bKkMOV=3{FLLxmbWQih5i<^ zY~nT-d?9pGh+nv0=o)_&_a%_+U-tU@fT%-{a&{QmLn#%KKxA77iiY@2}-QX8%a} zcJfQlmk(cke?0m1^q&`F4r>a>S*|!99^Q>S+}y#O861*aB|N)%esewH^5jwAZRWnh z#?H9u@4`PBzu*37{r=#`g5UT5O=kSUbeB1Ug@L7j=_JFd|GNL)|Capq`Df{`tY4ph z@O+Q_n*F8Wv)RY^cg$~P-kx~N_CE2$?vMLFxqn{qdGBYY&(@!2emMGW_S*?>=e#@r z{`H5YpIE;B{TB73;Mci-C9F62E=sIYZ7}+4@z(sGNvo-+(Pp)YqE_sa{{Q@y{H^*^ z!biK$h2N}xHvg*lIrW?L7tt^MUnYM2@^d?*4(C!nS5X()R;46`I}%$27jR!;KJfF- z2d~$LFW0}Ycy;P6+sB=sZhb!fRqQ+0ch>JGf3*L8@$V(GI=7XOikP$bS@C@Fxxzj? z2btghcKj;;f$eR@oBX$q?^8dm`xg8w;g8GTeSgjV+5EHpe}wTAO9T5Nt~$P*f}ezD z2nz8ja!N7p__O-kg%2rjZoRzl^7m`q_ZFX9zI*?k{BJSiDrQF3OxAC#LhP9w@m#_@ z3A__{d3bklCv(na-N!KF_n~hwpYMHG@czJrjsneY%c8Q*^N2) zId-!zVn4zDk9`k2H~VbX5ay!{xBhbcR{JsI>$K0hUvB>jXWY-K$FY&;j?i?mh2k>8 z_jzjB6MxvR8|A_K8_}iJ`Q=V|2!A@T=|#s@8z%Ni{$oY z>-jJB!~J9G8{?O6o>@Pud$I2A*UwJBSQx&toa5l*HsW#PDd)bcZT?u0A9deecxCZw;_H%kCqGI4aQL^I(4K>KYD+k{rke;!ji>d&66U0 zO*&g{g7gYeB_UtFQyfiQtd+^o8R|0Pf-phRo{Nnu0;D^&s!#|Ul8oB?8Dk<*Jnr(Q-__hhBdARWpjc`dh zZgs}bKl(qGyq^A|@TJbXZ=War^!pR~m*u}ZV>i=I#-)t@Ean{hxw8e*#3W=a<e{}yBsN)>SB zbKu#-d7I6dMTMyybVlSKj^B@e6n_)>qVlQf!^HRMA1-`s{37vV=AT=P;_N}()_kf0 z+XasE&*jbJDq)jjlK3a~`_vEJ?{?o@zn%X!>-+W}=YF2}RrOotkKo^=f5r^5OhGIQ z*q(E|;PT+v%yXXm3Fl3=R_1U2jsLv=;rH#tmyKWAzlwk7|2g}&+P^Rcex^WXF&1wY z2i6QWQw|j_NuIU53wXVF4S2k`+}KT-PyK8B<@bI5*UqoP-xhsO{Pp(lZpIkaqwL2x zm^dRjxY+-(crvB^|MBPJub!WAKhnRqe!KFO`|F`EN53e1Iq^B`^VZMXza0Dea?3-}n4{^f#RCiojYK zXN~!WTg~~*gDke15uB*RZT( z{?C-nI*+4_JDcA`{)MJVNq9TGkT)m7DKc2l~c$Rpd?Ow=(V~@8yuXua=>(qZ0 ztT#9>@^JHU@Wt__@!a8R<1k?7Wq-!@h_!-+i}?pb&R_PQf?sEST>7r-&E?n1Z@WH} ze2x8`&$xokp4*yVO=z9ae*u5KCtQZ?O3eQM|Nn0Kx%d06Z)x8yf35kd`BnR?(AOJZ zB)__RGyXC0*UG>7j2l@GahmY%;D03WR$wJR8?Pp3Im^}m%YN(pDERvH^ZC!azEpi% z^+Wcz=U?vsl?=s<8ca)=G?{r>Oj*O&4zeq9e&^)m3g=wGF2j0<(e&TKU+UkjzQljp z^ik?l!{;AgIe!NJY5hN$=^e`jwlq#>ZVjGJZZ2*MZd2|QE`N@VtdE&88CU<$_&fDi z#SiZv-ak+OlKeC2Z_ob%CNowR_ORF{%h(Fs`BD6%?Z@^XtA9ND@$g5_52+uq-!FaR`MTq?*r#hBzP%TC&+&fg zyWscl-iv)Q|6=^@F2T%nt;bWrd+^Oql=J}SHk zeR2P($FsuM*`Id&y!prdA2UN2(<|mM=I2bzEce*faBk(z7Md-lCAnWxS)yO~J&!S~ z(_f)47hefIo_4?OfzRU?&mvww{FwgpCBtU6Fs?b=OSxBar*ZG#`o{5${SSK;M-9hO z_G4`7tW`{9|Bn6C|0?lO`rXtw?r)Ue%75_vTKzkjQJl?&YX|QGehYz{e0n?wIHFji zm`?qV{agCm<=3L0tUo1xxO_kOP3oKG*EL`Meu?GHkff5+d& zSHsiFxrcQIWAHm5D*W$k^gD9f}(`}~t%ndB{tj%n7 z?2;S-9G}>Kutl;yV7l^u?Vt5O?|r-QrT=sAXNxa9-;V!y{#*RNJkti232f)s?Kl>4 zeCKfFl;v#V5a)>Hn8uC;YtrbLubk-x0sx{PO>G_-E?R=Rab8w0vj% ze(~GyZ)V@!zB_$C@$JFa8DCsJr+)JMxaGr*_wU~AdO!Vx*yq__LcjHYXa8yYn*nr( z^}j0Sqg+cwZz)dIRyQ&+Vl+Bwcw2XwN`t5wTi4HDAHKiJe9`^f{>AAxS3Xz#`O6r| z(#tx9EsU*{rILxCDVKRJt32mn-V~t=q9x*Q#hitic|4i7{j&K~`f}dGh`V#{);vsm zp83w`+n2w;Sp>N!@ZS~)=fA=;mFq1BC;K8+J=Rv%zpUcEU#EZe{e1Vc{nxW! z-+xW})%#QV$C+>X-{QVa|9<1A$KQU&-K^(1(|ONbrXWrkKf0O^s`4{!?-QNp;6aKya&&*W7a+B>QhZNTuE@!UA94A>l znAZK1`mOxq-&gLhd|%&wo%Q|w&*gvK{X4_(nbD0&m+3s?M8+USV@58fm&}Q5{v4+{ z-*AO+@8xFUHsfStQ(*e{@AjWJzrB8Y{NDT9`S1S!woCymqO4z8LRjjVQ<>f}82&%} zSM~3SKac)A{d4CJ-`^X5lK!;*p80F{Pwk&af6V!D??=YZ>|c?;-~Zey z-zR*1`}y>zn;%0z{`;W(an?tVPsyJpzI1+h`z7_O@;9^Zb>Dq|T>P>6r^WBsKl=a6 zS*&>SMK3GVXiYcdG~TMeU)NUil46bMEw-FryFQ+Nlkr;rt?RqMkN3as`EA8Gkx7+F zfbs4BzJJUA#{AP_WMFOJjODi$$r7I>@j(2&NH^bn*785aU((+8J%9Un$zzLWuV4NC zQ2E{E?|Mca)^+S5oCi3#+0U@nv2w66u}|Un&3Tw>3)gebXY4#I5C6sdl=))tKH+ue zONCd<-z0vB{>uM*3&T>@Oim`A%RJw>ZMb4N%Gmr^TA0ch#TgkHA2Wn8u>QaNNAXv~ z_s?H?J}vt2;Qhf5hMy09i~ilukjXla^B2!wzIy%|zDS-jP9?SzOq>jf|H}Ug{$>2j z|JUrV#@{`E-u|BbOX26Q@9V$w|A_lJ|98y4{fyeIt{gkKqlzvX_P`Qzu$wBO7A9Qix@AMgLb|Be4Y{6EETgHellHH#9PE&FzkYObl= z7rAS>*f{Ok_p+w4ykTl!T>Ag^zp8&y|CjxL{NI>fA`<$Kl^{p|5^WI#rKf!mER42ocj^;^ZrlMU%9^`f1UdI?8n0I z(cfIZF8Xr$bIE6c&)+{y`F!AW!I#xvTECqBQvBui=b$eZU(CM7eKYtj`ZN5O;-Bq* zXES_aRp$*AKc_TXC&BQp{wbv+evP^Ev0eul;gy@I<%?8N4aiHfNRJMmm&`uOv~C*QaIFMFOJd4BWd z?zi(kGyIbHpTpG0!pauS*3P<_#g!$7l*28R%*^4;BILz6DSl0hf{59|El8<5U z7~a^wE_k#2edm|3U*G>Xu)Jo^;Cj!k!z0e^!5P6`&w7)2Ig=pMA;x6JoeVkuv;IE* z74}2xo8Fh+PnSMU`6TuA%a56VyO|8wg}LwZPT}Vk_`z4k!^RoQx`%Pozm7i=zcqht z|GEF?-Jcx4lz#pFIrXRQ&;LKPe)9gB@_XOk7zQ!sovaERe>wkg{pb3?`HWqX^)h4C zzu?~&f4u+3@a@^xtZyCPd4K-;#r${cKUszpMjIv$=3UI4EMBa)S&P{i*@M^}+0U{a zU~OT!!}OOy`hU#7ga20jH)K?2DrerqvV&ECU5?$9Z7xe0(-ww_|EK-?`1kYQH-8uY zi}}Cfzcj=4|NsBR{A>Gr<4@=x@juJ|6#Vu7m-^q0;XOkfqYYy-L+1Z;e?9(4{}%e? z@H6|z&+lixfBb&u`~L4AzcGE|`}*~>+~U+|U zGe69K>ipvPb>)}Q@2|f-|0Oc-;!+g7tDvp5+Tgl=xh|XbN!3_6E}>;CazFon@Oj(+ zF63kP=at`he%Ake&9H=VJ>%a0Gyfj_z4SNtzsF4N9BcV@h+Ghll=PFlBhDytnY)59 z>3jHl{#T99&7RMF;rv?d{iQGGf3^R2XZpyrfaxKlKjVDHMND6rXR;n-m*jfKy_rXz zX9cGqtLFbZKVmB5| zTETvUrI=|ag9O9p|C9cI`1k(rxj&`9*?txOc=oOL>*FsuUv-(>tzt#RG{oC+=FGC~aW5zd( zSC|4>0@(I(2y#8)66b!x^^)rY=P!0k)>Din|MdR!{<`|})z2qCKmW@60~){n%E-su z$}GZc#x#NPDnl8AIz!C=W&h6q)%nZ%ckUmiKit1x{ABxi@WPS@PFs{_QRV=Z@J#%$F+Vr6y>gV7yrWu{OVYjN%S)e(rOBH-DP^TKiS+yStwRzN`Ma|7Qk+ z1k)y_cBU4FMSn|wH~+r;Z#j!HcfQbnF;0n4@n%tZ;jO$kS$coH{LuXJ{8P?nH7{Pi zR(-Gbx#dT~UlT@t<|t-Qrs)hv{--ibWGZCc%n`li@GJeMWKSYL>~Y z>};HDZLHH+Jeb!oZu_tL@8jRXf8GBdGH@^@FsHB_V7bh4j3u4r3o~f1f295?|Nr%$i{a6Km;cTGI{v=<5es%m+@^2rb8p|tI3-*`nZR}-iNvuy< zl38D~ec;gKdd_gy}`#+-p+Av13WU-xO&){I^ zILUUG&_A|+4FAskZT>6p_xT@@ zzq|fk{b$DTfH9GI4oeT~7uL0`Pg%|}*D(EHsQEAbui~%&->ARq|H}Mp{I~tzgMa7# z&H5Mf&*5JL_*N`;24}`ZCKG0HmY*zhSnso@vc6&AW_ipsjZudo>c7!{r~gy`r~POA zf9~J1e>eUy|Nrps(Z5&!%>GL={9`!62s-iZCet*gb&R_hcK(0$FX7*vzaD?@{kig| z`ESoZhX0xW-~4O+XY+5$U(3I?f3kkx{w4M+^Jn&tJKt7+ZU3VDIrP(s57Hm%KTP|u z>Z9bR`JXI4JA9e{_2_q(pWA*4{yzW9`p>Puml?}BJ_|0Di`G1&@1z@}xk=@}oV26} z|67(zKj(dLd0YAZ^yg>acm3Y>m-GL}KPP^P|9; z&0~4Yn#sn^_M7E2^FyXvOy$hhELT~cvly@(X8OSJ{$Jmp$3KIHAND=@ zqv%JcPoKX={I2*{{a@p^-Ph>Pdp@7~miCL~uQbCPW?#;ag5M<=W%{JK#hV2duz&ye z^XIc~-e0A^bbN~X82oYF=cnHf|LXje^yB>3)nAytJ^#ti;K7#39m2m@aHUYGP>rA; z|8%Yf=F`9XzUF<(`ZV#2-S<;Jlz%?_aqow~PrjdOKed0#{;c}>;aAwdwaj}s5AlTv zz7f#l%i)^Lc82M~zgxfjzpwbx^_l03?Kgv;M}FJ>wfQUlXUQ+aUtzzi|E~OhnDHNT z9NSxtL~cD^7v4bbb?hIR^#3{k`u1J%yW{un@3+3M`0?;(`ftv^%K!ZSt@z6bI+x<# z3WoK}$Jv}Y*K_^iYUe88+{Et6s>PJ^Z__W)AA;YbzZd;@`%~{X$DdVy^#3mWEC27@ zzit0lGR$T?&(y>+fvtjL9_LZcV;nEo7O-eBo%(;^uht)p-|K&s{ObM1`uqIv)<4hw z{Qq=ii^n zf9?Jq`6uw-;eXcu<^P!(ni&ijIhg)4@vxj{k!AhI;>{As?7$?$`0xLNf3N>q{5||< z;-5u-&j0!Q$MNsHzrz3O{{8;9?SDOkDdRClP9{AjeWs_3O^i<%oEX;sH~s(hU&#Mb zhR2MrnH-senSU}pW4g|Cjp-OuE|US1DU&=iFN+$h6YE@-Nz9iSr!r{&*ZX(mZ^Pf& zf0z7i`TO8+*1wJacK!q1a{ueM&F^`?{r*_~_4p_Gf9d}p|M?k?{O|bR^6$W3w?9h1 z(|*1DnfR;f*MeWJzjA*X{Mh~N<(HG6FMfLW@yy4BkC7jLeti1r&zJabJH9Xa8T2db zx9p!Ye?aHRFf-0(N$0*NE~Ro?S5Hq}^R1GO^d(_mo*Fh&Mz%j*Kem5o{bBU$z`r)8 zEldV~)_pnjVb%K?pDuhq_Ro;@3Fk?^#ll@;KgDdtw1xljbTT{qmiaFD?dSL8Uq^r6 z{w4S0>sP}s$)Emz*z-a5W9G+|pIX29{MBJS%=1FfTKKcTA)c=st5{z%wKA;y7x35W z&+6YIf2{w0{9E!@@ptC;_|MZmEd3DndBgY4KY5IISlu`ma0~Ko<9)@`z{SWe%)ILV zk-r=MKKys(|EB-7|7QOA^E3E+^Vcn3u7C0STK}#8N8WFX{|A`&vrBVzawT$3W6xq; z&AgK-0(@fmZbk`4GX~}Vy#HGN1pa#R{r%TpUt+&nevkPj@~@Inheeo8j=hb&lwFK{ zD%%p)c$RSHLriCwjF_vKO_-lCzW87LSLJv5&#oVhKmPqV_w(*Ak3YKq92j_+;+P*Y zFJ=~E?qPCfs$}}lRKlFfyoqTdV>E;Af8~EpfBpZY{=V?*{;!VTC;l}5Gh+y6JkPj- z(Th=l@h5{QV+P}XMiHhqroT)v%tg$u%!`<^7!Uvd|98%xL%%D3@A~cbhw<-&zjyxa z`X9@1gy9Lp9frjWi41-Wy$oWE=NMNp1u*9`e`AtkQeos}VE-@s&*E?YAJ;#%e>DHH z|GWLK{Qt@Sd;Zt_*Z$A{U-p0He?0~+#%GK(n6#LCnZGc+^ z6aP*Ax9%VBf35#P|D*r^`?v4k`hVH~p8x&!hwaaa-&}u=|Jn6d0d(u^Ked0`|Ni>J z_V?7EbAMd_to!}+SNSiUUt531|GfS^?OWrQ>z^)vH28St-}E+ zC+h#@{}X?0_{9D8)2rieE_~$s#mM5ueSqIb@VdY!eoKBf{;xbX9Ffc$7#bOtGe$AL zVX0;5XVCv;`Puue*DH?KN$-?CMSkV@IqPo_6Cax-=TEMCTuV7ZSobkqXB1<;$ePVz z!?~4xHH!qJ_FvDRHs6}RO#ZyR`O)c)V`&%+-RzODRf`0d7b`(KCu*!&Cmx8ZNZ-zk4v{z)YuT9DyV#sqw=+*-I>^|`xQp=!qZ6YM1MmM^f4Ba*@Z0hC?O$7eefhQh zx9VTs|GJFQ%#kd&Sx&JevYcU#WVT~&VP@WBKo&VVwSQ#7`)-&)jMlfz-{LARfw3KNt(^jT9rh|-o7)t*;{nP(D z`A@( zGl?@}Yura1F?q?KavSA8l(q!7kc$8u4|Du0){?7V)Nk&W>iLm1;*Mm?rdrYB6n%%;q}Omi9cFtjinRphtGvun`0VV7rORnko!1SD~AN@T}C^G zDu!~#Fs5xxXBdzDXZ+jr>%fmm-^IQgexLlE<%i(Ugx@p%&0xI5{DtKq%PHm_CJv@$ zOsXvZSp(R6*c;jUSt^;r7%KmL`m_Fb+OLS8pMFgLDf9c|pS}OS{r6(n{$J_8#Q$~w ze=$5``o$|D z&fo5TcK`AEyZ3MQzx@C67#1;_Fa7&91V{pbAO{?G0o$G>-f#s0bci~U#jZ_hu@|Mvfj{-618&9Hz$i}5^T z9uqUO0W&M}J|<75M~p3uVvGwJQW@+SBp8nVPyc`VpZCAjf5ZNA{x$lW^pA(Zm1zbu z2kS}J|EzVasw^hVc}&twwoF}249peGkD326t1@RX?PJ`|kn;cB-&KFK{@DMy{wMjb z@W1STQU7lIRsDP95AWZ3f6e{{{_Fks`k%som;bN+z5RFMU*W%Lf7ktC`g8X8+}~Zl z5Bxg()91&kZx_D^fA;@$@#FT7D?j#qbp2@m@y^GH&jnu$zeRli_2cQ!OTP+!xBu~G z*vF&xm^o`$FcW|MmWc{o3?(>xb&M^>2Q^ zdG#US=WeDPj_I5g9JXvC%yo>*nWZ@9@W==#3QZIY7(>|!$JNZ?lFvEj+%QQhX`=7uN&bXe*fhCufiOqv;I-3Z4 z8#@!eXM}AEC`RZ59pB;Z4{xdPI zWHMzr!^+Oy%KnvY4XYW;V2KV>S^pm~h%>!q@?o}PzQdHt^ptTW<3`5yjMa?hjBgnV z87}{K_S_FiJ6+GKMgQF{(2@V5nsH{@?6>=Rd`Nzy2EiTk|jE|Ly;F3{4CP z3`z_f4DbK{{cq2(lEIpB1LFh6AB@aQ&lsmLnlnCQn8FawAjxpx|D^xv{}cb0|4;r8 zI;-={KhV7t!vE*~2d$Gn#PFSggOQo>EyHbwJq+y(h77_Czy515C@}Cctp2b2fAhb9 zf3N;t|I7Y=E5jkCer9WyK9)R|6U=j%RGEaB@|nIf=`aT{?_hq%e24i7(<4SXMjZy5 z|AznW{>}UM>Ywd@&i}9fZTV;PZ|z_1zd!$U{!RGD|KI9=%m1zayZ*oW_v3H+-)(<> z{?_@u`d8eq@?U9 z_5Jw|jbF!piT*v!aGjl3s7v;_>OD1M<=L_!;*kO=JSKd>!pve5MP~`!;;my(X3YOP z|7Xrukx!F9XulVJSM?$E`^UdxjIE4}jNe3AUjKddcZ=^6znOlm{S@?m`CIRI-#(~*Xw7_z{9O9;#4p`H-G4X#>-sOlu!13zF@wpDc^&gj=6lTN znHMpyWB$g>%rcYt7*hx1ga5|=*#9>D>H72MPv_sGfBXMR{N47a`_HjIIe!cP<^ES@ z*v^p5D9p5nDVTXa^BiVN=H*QH8EqMVGYBzyG6piA!=2U;JtL)A48TAM?N0 z{ucau@h|=Vz5fafx(r4PnheSeW(>0!k{Jz{o-rjchcQ28@?=V8OlDaAU;qF2e=Pqc z{{Q^9;$P%H$$ww|uK!!{H~eqt-=@E>|8Dx%_CKCMkgCr#W2P^R%Nc_hKQja~Z2qtF|Ixp_|BnB=_mBI(%73Zu$K1@U$^4U1 zmT?tB6hqp7;eSzobN|lxTl81wulQe=zfONS{x0})>CfrEm;M?2fAH_YKau|`|L6S+ z|JV0#-@oR6TmD}8BllpI@84BN|&xvgjFyy--5GQ6TIbC9psJ7q& zuD{Hw|BwGp{?YT*?2Gj$%@30s>tFYx#C&(80^zsh~t{PEO#tM`&0rhf|i_V(wE-^G6x z{F(Jw<(Y+=`A^I+{|DP~^DIQRdnznlJS{+;vd;t#X$ ztG|kUdH+fNv+`G`A0EHg{`LE>!w|_3#_)xqoM|tM0{a_|Q=BU}uW?*vpT@?+x{>!}EWAf2aJ>_~Yc3TgWBvyJP5wLYFW zq8U#AkN7Y0|KGpc{|@~t_-Fa=?%#ud%m2>*d*QFezx03C{|WwI^Z1y9}eGGj64>SB@3FFM?KF7I+<;efPzvulb{#o$z?XN3;`2Vi`-TI63cix{L ze|G-K{@wfQ$IpUaF~7b4?qoQ{?8!2X(e2;Q-;;hV`T6r_^Y4hid;Y5a`S>H?>#9%E zA2mKSzCZDP+lQEsJRi4wi2AtUlfaj+U)F!k{dW1g@vmq9%GuWlU6XmOpddd_>b0n< z;1k|HeqFJTGNj-@h+^Xa9cm%jn0cFZ^HfzaIXo{iW}d_$QN( zsUJi>Ie!1~H=ZSia|U-h*JAdWtlwGeSMEjuf|{aJ}>{I`FZu1S>Lt(q%*d&wy>{bzs2^DwVm}hYaaV_&KcYj zc^-0~=hR`J&XT~?#IWU`^52)g*Zgw+Df46X_w(P+e0Tr8=9~Msf^P=jAOC3jo%&Cj z(S+HQg`H&u^LA!e79my%wkURUj{ofU*)mv}S*|f1WBkvc!0`9q^}oV@Bmb=W9sXPT z_uF4Zzh3@4^wad0?C-We*8kr9H)ed!=*g7LRLgXlshzo;C6!f6|4;oN=RY@p`~JTCYtyekzb5={{oasQTw^F= z)MC2E)Wyuh;=`iO@{oBZvp4e(rVgf;jM|KO4DEDjO<$oRi-u{#ONBxiT zpWr`x|Cs&V@;B(;tAF|bZ~aeZ&}U>~+QekV+{ZkhIgt4`Qzw%K(>um3j3tb!j8_?w z7%u*o`=9#n>fiLg9DgtT+41MbAMU@Ee?R@z`j_%=_P?9|1pcS}zw+OPVIzYM<1)s# zjABeSOo2=`Osq`T88QE&uz# zyY9aIXJdHx|I+`>|EvG2{lENg=fBhcnEt>0clF=le_Q`8`nT%egnym?GX53+%lvob zU-VinH0=}Xr~Z$9A4NYoey;m6^K0t2 z$=|R2X!@o8r|*9wo25X3#7-$ai4P(o!tVrP_@xBDi7k|Al?#@cB3>hSklT`jk8L5d zCSx;$EyKLO3_kH z#}=+YUQNCTo+8e3Y>!x$Fsn1)VfxFsm7(JQoxiHT1%7aSGx<9I%bw32pDVsdd^`GG z{ioM2wcj^>*Zi^noBgkfK?QVLA^SfLGfolC4IGEqkFuq)6|+UMZDOru`Og&0xcR^M zzlc9Of3^L5^JD%Giytq(tNvK?^JM*WB;ld_?gm~HCbk`2(Zp( zwP2ghHj}M|t%XgGErzv)WgYVarhkl|81DYR@bBN>ioZ;M+x|%Yx&C|O@5GY9iBCKINsj1w6n7ipgMC*e==pILvl{o(i<^0(n{%isBb8UD5XGyZ@3e+`2$<6=f8CLg9M zrfQ~Eru|G`nZ7ffV9H|>XPUzJj=`Q`<$uxt+y8a^v;BAb@8rK7e>?xq|GVh#$-kWc z%>M=coAB@ZzwrO({wpvPF`Q(OVr*u7$;i)S#FW9*##GEC#&nXgnvtJz9)lNyAj7Hu z{r~O%-~CtmkMZC1zu|vl|MvZT`Zx05hJWw>S^RJP|KPtWLj%KI20O+*jN(k0ObeNA zGx0IAGoNFc%9O;!#jnR~mmGKlq4Fea$#{bR#J^pk5Km4!gpVL3}fA;^f{;m1< z{vXqS_5a@g)Bdmh|NFl+LmWdj!+eIP3`&fCj0KEEjLD2jj7J&L8QdAl7+y0(Gfrik z%jm{9pMiy8?*AqK@BP>KRpZz!HxGTshUM3>SAJ5~*E5ZL(utlswIzjfWw4X$+ z@N>T7JZ`*`d2%^;Si+d&S*A1b{nPtp{{6}4X&)xMKl-8N)2c6p-(!C$d^_>^{g?V5 z^1o;P;rZXj^pWK&>m{}Y>}T1Zusd@oa6DymVQXjSC|{kZu(_*?W>o^KA{XMJz~9{wZtSHz#`f7kxI^8X&gUdH`Q>zFH8rm@PiXLBTR zgs?}kv9L{MZDI*z&S$#B_=4f^|F(aMf1mzV`CamB!q0m@_WrQ?x%TIZpFKYh|BU#> z{rkgjw!d8er2j`VEN5(CVq;#x+`uBhs>-U&>du;KpOZ~UM9U-tile?9-a|H=Gg`FH#8gTLbc z68}y6xANbye_a0~{vZC&$6(Ho%+SPel7X91pHYX=mC>KkfRT@ppV5F(jZuK{Dnkc@ zH-iKN69Y2?1H(t~?)kp|mj7A*|M>Ue-`{`c|D*rs{crm}@qgd{Y5#BjH)Kd)$Y7{t zn9Q)7;U9w(;}?dL49gfA844J(8N3<17~B}F7%UmI8N?VE7@quJ{lD;k`2YC-Q~rPd zug74)pu?cepv7R#5Wz5y;V#1;@NK!<7`hm`7+M(m7#bJ~7~C1G7?K%IFgP)GF!zGm{R}LB_+3KNu~UPB0}gH#5It+Q7JnA&()OA&}wT|JeUB z|DXM<`FH;B{XfBfbpCk$vHBzbN8pdrAD%zEe^2^7?e~}81%Dp=dHd(epXYzX|Jwa^ z{cHR;{_oPi2mZGH75lsRkItWyzcqiC|6>33_2>Vemwwj%{Qu+PkCY#)z90Ek_f6q@ z@Q;9>u|Fk1r=b75{LA&X^zZ4v5Bz0dzQpxLP+mk^K!RJJtDBc!aKF$3;SVC>VzwG=+ zrT^RVANjv7|JnUtm}wI;H|sW5XV&*DjBL&9f*cwgcR5aSN^-7e6Jp_JDq@KL|Ky+f zzc+tY{5Jl5^H<5wrtcx&RKNLuTk);+``#a|KUsb){H6Z8`M2VqrGJzEZ((@F=)>I0 z@`7a!i#5w%=D*BNEPGf!vCL=r#_Y(P%%slv;Q!};<^Od4J^Fk9?yVOm~>3GKn)CV_d+vhVdTbYjAn?pP`>YhGF)9_x~dQ zfBt*>@65l+|1|zx`n&P(%)c3bjsHgfJ^k10U);a8e@*`;{k!*1?SJoo(D^1)7_KpV zWq8T}Ie?BBj{$q4-Cf{CNelNFf!cyzvh3_fB*lY|M&l^`e*l#|DV=B zvwvm(mj1i@kL|z7|Lp%${~!2&|Nq;Z##F`v#=DGDnIf3w zS;ARMS)VeqG0kT@%Cwhx4)ar{Rwf>1ZC5?Vn?RHvK96^ZK{n@Abc&erf!Q`jzoZ;1}pNTe+VKKbd|$_|f&_$d3g- z9ezptO8A-Z)8SXiZ~Z^se=`0Y{B!ir|3ArpKm7gvZx=%p%V`c)-g#kDzbN-(tU| zemn5>;uqF0Q$A;Z*842;`RM2TuZO>z{qp#;>un_xj%xemnpE^=tR9 zzF%FxzW%!SoBglxKcoNW|Nr{`?f?7#+6*lW9~o*Gm6-N0nKHXE-(}Ke%3yrLFo$6t z!+wUZ3@nVt82lOL|3Cik+uxAC0)ONF9t5wU68rb=@7BL*e+~XR|6Tq!`k%`GOaCnx zY8dJniWoc?d>EP-ZZSABE@b3pQe*nYxQNk;@hU?#gD*o6gC9c-Lnwn9!|DIC|9Abj z|Nrpcw14yez5EyPfA0Tf|EK()|9|KIjsK7QzxQ92VJ-tFqZ?xsV;N%sV=7}dV-906 zV;G|qV*(@Su5KSjea8O`PZ)kOC^D)svNPUiIKr@tp`AgVVbA}D|6cz!{&WBT_wW6` zr~e-RyZmqUznXu||7!j%`*-D^?tioY(f=L)_x<0&w4T+GO_cQ$b2Cd0>t@ystktXs zSmW5fv$e7BVUuB<#JqxO7gHEhEn^Eq!T&%1R{cBt_wVofKiB`T_>ueL?T@8DtbhFa zuKHuikC#8#f8P9|^;7eg@o%+1ul|_+E% zzw)p1pWMIwf1UsK|B3q3^XI~!BY({QocMk9ch?`@zixkh{-*w&_?Pov?!UwTF8-VN z@BP1h|L-wGGO{pkVA#U&jlqLaiSZUgIYTVNWrphvYZ)RKS(%?PuVQv%4reZ7&SH^e z>tUP9c7`p3y_XNy>mSj-%76d;dHZMapE-Z-|8f6&@UPXslz*cCg#OL{=lDPI zzv6%P|J?r-|A+mr{~z~%^FP&py8qJt75{Vm$M(7GfA*h~;rIXD|5yH(VDMrnWLV2^nBg45HHJ?N zZy6Rafs|JVO#V|e-h!G8t@4F(T}7KV0)ECxC7$tfNTo(uvE_6(aC9x}{hh+^Pk znEik8|3Ckk7WHvgOb&*a~?zbgM;|8@Lp@pt3j*1yVsH~c;Dm+9}) zzh3`!|6lpHg&~V^;{W9TevD5Ul^La&MOhw$S0F?&Pi10fu4S%gTFdCjB+N94k%6(3 zL6Kq8e;I}&|Ihu)`ET_<;NQ%DKmMNjtMV`OU(3I5e@*@!|EKkD&OdpERsS~q+5114 z@%dl1zext#6oFZHh~L+t;5f9Ek={xAOb z^S?%>S^qEm_4qfBvFm^9pTA5EOnrYY{&Qpw{jc&@jp-j#&cDJxD;V6ECjMXbZwtc= zrnCQ_{hPp)!X*Eflkpnk^8Y>z=NZIU7XI7y{{wR{^ND{O{zWpcVG#eH^8XBjEn_#M z-=9u~KqjMq`~F4$kNH3Oe>8*8f5m^V|K4Xh`1jTSA_heU0S0e|xlEh>RWM33888O^ zoyc&HrTqVyKX;hrSttJ!`qv0LPvcMPpHBAAOjUn^8O)f^|7rL;k!|nasekq{-C$My z`}NN+=BNLef5-gqVE*w>`u|O)t4vM*%o*1)&SUuX@6-P`jQjr!GTvog^sn^SQKn+% zeLs@_x3ic0llfD^B+Yu~Z`$vxjG_#mf0;3>GXME``o9tDtUp(N*)W`ERASKk|Bq=7 zvmrylpNmY_7(V{p$Y9O-kkOGLkb#eBFYB?Nh5v$>c^Uf{CjLM5e;IT2fBD~wnT(m& z{9N%jf-&;n%D?~rAOC;t_ocrI|M&h6{yXbmEu#0zhG`<*!Fk#-zUsE|IYvW#dQ1ctiMlK@Bh*NrONn= z)%4%Qzi}+#|H>F`8BQ_i{W;2@#_;f04$~8+Iln#sO=k^bu>8H9F@yEypN0QlGH+p6 z{ps#aq`FBU-I0}|CIjTVU1-9`YF!D!=mx) zHDd;=>u>en+^i!1CH}YmHUH?lEH>4iJ|$A9*g{c&~9i0 zrn5}Bzv}+4WZTD(_4g*@R>t-JcK_#R7W>Qc%a`>vm*e~1znna?e!u>)h9R9J`sdf* zN0_63xBk1zV)KXTH$R8l|Jv`SzkG#Dy z=?tR(C4Lz(`mk5$Z#|I_{-{d=0B=ASN;`2T``b&RY3*#Gxqtoya^-vuV;zd~#tOqSo)ux@0( z{L`G#h#`)t=x-r&FXQb$O$@7mqxgS~f2E9W|5OPoD!^Gd!%yn!>e!uzU$N7a}$ESD9@7NZ7vHn}e!p3yyyAqQTyX*IhzXMs% z{Xg;5p0R}O*xwc3uCcl^D1G0?diIy)zwfNG8C3okGHzjf^Dq8)8B+~Mz_)e(SveQ| z_x!!_-#S*6|HuATFlsP6F?cY>vR(bb${@-9`S+4vADOPR@ca^E5@Fu_cjNENjJ=Hc zzjm_xW{>&)flx_{>7+{kqD^U+^UeCs~0{1M2r;=j=MW|kiuf}eAL%kwV({r$%hHW}9M z->xz$vo(KbVr*gU`G5X5534ha!Vet=7B;1SAAa8byPV_OuP1-xnY9^Ber5h=VQc-m zkzp$P|DTWkOklt9*W=e~=BMoZzXN_Ua^GT|^(EnNF-r>byPs8my*XDi@ciUvT*@5$ z<1^zkM$z9gi~?+B3+!3P@gS@I@1y^n{_)$){+`d)!yNI` z>3=1g@xPaUm>6d=eEt83f$RUVzw6lF{`3E3#Ju{oT&G_TTm28El9Co&QnATFyQ1i_~8R zK4r%BADUT$1(-gof8WQug4z005YrBh?>{&FvE{7!`{t(!QvuV;pXGnvvVZ#H{c8iG z8*{*4!9RysBADL)T+O1y8u4qv-<@1pee6M5p&3@v0KVt~< zzI`HTs_Km2B5xB4&l_b)>W^CQOfzpVe;GSC0p{Ob&p6J!2w;eUP1C;xBzJ(ck< z)16;T|Ap93{LT4m%D9@@=f{DcDcpM)UVNU*{E*f5$J4(`%(?#s{=Q@0&$#ukDAN*F zroR(^c(68eEcx2}=OEAVU(0`&u!*t0_$~B5onz9!&_BAYSDCGTRs4C*ag?R@)7F1y z1x3C-|1_O%6~m2h`b-imQomdN6tI*rDgI<*2;`FdVg1XOi}BCgZ@XD{vLE?n#}LN$ z>~|`|dB(Z_YX8?T-C(r+{hvjcY1dC@rcaFJe;gU5nLq!U%P^Bw_%Fl1GS*qYZ~mRg zlK=DEzg4WWe}^z^Vn6*W{I4_1*8fj`Gcu|$Xa2v&Ai((UUl>E$e?x`^|2Y0UWu3tI z^mjKSFLUbOv;TfDv@*#5`|u}#edfQJzvG$n7##n${=30);7|2G4VIWc%l~a;nZ}^^ z@9^I*jKR!yf1>`GG6ym~_?5zb{9onol`NVJpiyc~w%|XH|Clq}VD|Yt|3@^J%iovZ zZCU>^D*U(mvx}fq$=ZO#P?wBZO%Z%d!8fen&8^XLS8j#x$2j>hIKln;4bZw)}eZ zb1$m~WAKmlj4JHQe*XWJ!Cb(!_1EEl{~0zj?E9DTHBV2okZ z_>;@?f;toCo)+w zhy18#vSYsfXUZQT1}oO6U+4a2GKVpl{PtxmW|n9C&QQj*^q<{dDb_=OWd9a1tz*h% zO#b_y(c|y`e?|-(zls?ZnIHW1`5VF*%bfa`?e901(*GC#tYTsMvi|6 z{@h{S@t5h}S|)GCb$^{0s##V3E@0^RH;;J+^PX?IEVGzJ|K0i1%&5!E@bAu_8~?U2 z9ARYtEyeJXCGS@pV+_;w|CaxHSg!of{aed4`QH!54gWm;E3zs7Rr|DDMo##H`4n8A!`52MrX%Ph+ong0Ya zaWNnI`}JQmQ}@4re^)W}{9pV3<^RIJ*{lH!zkYKvu(HJdKmFqiqYm@^p9TMV*{lA} z{+Ywx%b4_&kx87T{@?aLNlb~168{wb8!#j>TK-l4e~NkUzmI?CG8i!>{FV53k;&w* z%D+`iwu~)*&oLS?Jp5b6;Lfz?Z{a7!UlN!myJm?cdqIbC@>%SNRvoAjWj;zx%&mOq$HU8IJ$)Wxn(O z@xQJA85sf@vj0tG-1A@bZyyr}lkxAr49$#E3}TE|{)YTdW4rQu&!2X-Pk#&lEM#5r zukgp0VM7-@k(lFaO#wSTH91o%;Xk|49sW3`c&KGTdX%{dejQ z9}6!-+5ba~vi~+Pn=+aF75i_`{E|WFukF7-jCYvTf8GColKI7-9}G_!3jg0?aAMSD zVEDI=;WRVXzh!^7GFvdE|DE^GiSZqyB16>QaK=}RlmBM_&1N!T$p3eNQTTuM-+zpv z3@d)G__vgCKSR%-pA5;2JPbh$yBI7PL>OWjPW`{Z5WrB(Ao2e<AwX`*BFET#W5~t_{?C!xawauGaqBxe;LMk41fN5{FP$#`J4Iw5977pj!f4W z#Qx+nXfkF0Kk_^0|6|60f7k!qVVuGEbs zwEw%DNu6=m|KR`e41tW=f3GvJF))S$ag5U$c^Pi}Kg~FYVfw#nhM5eS z47m(U4730L{Jn{CZ?nR7yK?_ zC}TMM_sYMiOq&0{{}Ew2!PNZg(|={=8GnTS7%^^S-2U6;FCVii1H+#-{1R8 zYyWHfGx)cjaT>$kzh3{pFm^NU{d@Jl2xA!o_x~r1FBupZ7#P`@ZvU76`-?e(f#q)z z^LnO^Kk@%$S++3L{B`}mg6S1Q>>pVMCq~e05zqhU|8M=f^go)x;NO&g$3c4y|E~S_ zmVt}m(Er%~E16Omy#Mzwz58#;_~@SoLlzUqzm5M@8E^d0|EIvXiJ|VFGUFr$&3}9T zKl{J&za_)-|5N_`_@DfL>%RoXFaNLouVGMUboi(6pOK;dpWpvQ44eL`GUPLH{g?l* z#!&SCCjSHfJ2P1^p7}F{!IJUJznTA6Gu&h-XSnx&BEzizR{tI{wEj2ze}tjz z--Unfj4A(Q{y8wT{15tn;Q#LbUjGj>m@#hoyY-(w)7$^4|86kIGhJuc@jsbSgYoA7 zsf^$LGyGr5WXDkUSB9~XQSASj|GkV485#fo`L~m4HUsltL54uav_ISbwK3=ayZ={+ z@dM-i|A~K3Fm*9<{4-`yVa{Px{HOi@6Vs{xQ~o?=Ji_$m-<$u&%##^-7|Iz8nR@=u z|96O?m0{}tw|{y5U;W?mZ}s2P4Ez6D{AXp3{?GPLmuV{F!@u4C9T;Q&7yfT!{Q3Xb z|LF`WOfml>|DIrKWKR97_CJZGnlbT@2}2>%*1sSAZe#fR|KGnC3=jXy{Cob7f$1}Y z@Lw^8OUyD12mc=Zzn8I;Vg8@b|0Xg2`Y-x-1LIz%H~(t?l{0Q<@c$>uaDyT1KRZJT zQ|tf!KmQn88Lj`m`FD`$p7E_ck2HutS0{t{*Gll!L0P3^ItY|DC3LYGXDjcr~a?|XTebP zKb|p|=^H~Z!zRWA7B{9(3^B~EECGy<{_kg8$;8Xl`0w1m!%T9FGyY}%+x7qJzk)wE z{=EKo<=_23sef1h|MySgU-Q3!zv};t87}+gQ1#nIb$5dw*UYCU;WSWpMgP>LGAyw|B4L%{$>CB_kRJywEso_Z5TosQvaX+ z|K-2V|LFhH41o;o|L^}-VlZZy@&C$yA%>a%pZtIMe>Vds<2#0P409Qx8KyEUVX$Rj z{=f47GKQA_H~ua8Z^NMQU-y6a|5N`T|KIZet6!_oi242=w-3tN+Ff@eFVNKmDJ@5W~RAkjucs7{u`We*l9E z!-M}z{;&8S{Xg)3{{N)^7XSJGum8XFzsCO~|J@l_85aDPVd!JfU^xEYnBg-+3d7U? zg$z3w+!^luH)2p`kY;dXnDM{%|I7c93}^r6{-68*?El06oBuEU|MvgB|H1zq|A+jS z`(N@u_`l%)mjB28um0cr|K5L}{|f&n{Gb27>%Z~;Y5yPm&-`!szwCeM|DgY+|NH*? z|M&f$_}}$^#Q&xL&;Eb<|LcEo22%zj1{MZihI|Il%(XYe1%~?!Eetamm>78&Uofm? z;A2!^1jWiShF*q^4DT2wGNdq^WH`={&fv$8%izht$?*CA^ZzUiJPeQiKmC8}|CRqg z|DXAP=Kq8Lh78&aYzztvj1002FaH1d&&R;YAj-hbpvR!e;K2Z@-vt=-7=Hg>{~vT_ z{jvX7|8M)h_y5`dUH_Z@pZL$jV9FrMpve%(kk63C;Lc#oV8{@}V8o!u;K$(1V8{^6 z;KLxwAi^NP@cBOz|C#?+{@?h2!T+lN-T&MFH~nw_@BP2}|GfY6{vZ9n`u~>y zcmMwepUz>+AjF`}V8u|+FqNT?p`Bqj!x4sS46_+J88$O4XV}fq&M<+Yf}xqAiXoF> zF2h^~PX<#4Uj{dZfB!%IfBFCFe|82|hL`^z{ug85VEFW(n}LVn*Z))hw}Q{oKl^{} z|EvG^{a^q8`2RWokN)5Bf93zZ|L^|a`G5NVJ^wfUU-bXX|1bZK{y+Nv|9?IPNd^-J zTLxo>V1@|{D;YW%CNtDC#4&_3=riy$@Gyuoh%xAbRyF>A`2W&>9tK+mF$O^fD+WG> z=l>ZP_WwWiAJjH^{QvKNW(Gb6Mh0F6Nd^gqPyc`Z=Vkcx|H=Qq|G)iz@}HN1f#Jsg z{r`{rzw!U{|KI!XU~Z$e_%i#K6w*{r{K$f()RP4_YH)%8(l?2{;&Lh;Q#Xf8~$JTfA|08|6Bk6`p?L4>HnVp`~M&Pf9n68|6BjB z{{Q9w%l}XRgHBwRWRPUAWpH2s-DLm@19Jv#25AP6Z|419@qgX_9siH~U-Ey}|C#?+ z{qOic>Hn1fGyd28Z~i~=|GfV#{}cYF|Bw8i`9Jgj%>NhuU;N+xKlA_m|1bW7O0w_& zzyH7U|IvRw1{Q|9|JVP&{r}s40R~A1A%^e&)foI36c~INY8e_BVi}?sCNjh_m@@=0 z7&GWHSTHy+m^0WiXfpix|Mx!&0~5p7{~!JfGrazP{r`vmul~RI|K~p_@BjP%9ff7&8PiGd42cYd3`q?33|0)` z4ABh63|0(o3=Ry=47LpV3<3;746pxp{Gajv`Tq<5m;c}P|JeTt|5yM2`Tz6(>;EtO z|MdU$|BwIq7#{z>^#A4moBu)O){p-J3~UU{4BiZO3_=Vd348jcZ z3?U4046zLH3|b6d{@?q5=l|RPU;f|tf8zg}|7_rVs>NW#V8S5G@c#d+|DXT!F#P`i z;Xen%kN=NCsbqdWLcaJBAR390q*`Z3bfoEe34{J_a2I zGX@<7MTQ^$fBb*_|JeVN|8M=j{eRQ{Y5!;ZpZb5^|M~y-{{Qg*&Hpq1SN`Aq|MLIU z{}=wB|9|rT=Kop$YyVgOFZe(A|I+`{|8EDU<{keJ{9pHf`TywgvoWd?DE|NlX0`R4zh|IHc98LSvY8Dts67(n&?-~Ye=%QDC@h%o&5&%nUR@b>@b z|0)c^4A1}n0tNbiMg~y^28M_KnHj_x_!+nvL>RvQ{|IhX-TZ&+|B?T*|L^|)@c$Wb z8sG7M*Z<@HkAUu6`48IXaN_^t|F{1i{(t@dt^bezKmY&y|M&l2!6n*ba7p>{KRW{- zgD!(QgF1sfLn1>gg9Ae(gFk~UgD*o0LllD>gBgPngFAyCgB61SxTT@Vpv|DjpvNH3 zz{$|au$>{8L7KsnA)djAA%vlT!I^=HftA6V!IMFn!IHt0fs;X=fq~)R|4aYB{D1QQ z%KvZwzkoxSkAaop?|*S{dFaC+#=y-W%D~IO!|?h4@Bd5;Z~veCzyJRRaBT-_K{7Ka zF>o@lFerlWb9(cilR<{TlOdMD7+kxkF>ruOi&LPRzW(q2zy1H!|5yHB{QvSlD4alH z1Zs0}FmN*5`G4;Jr~hC6|M;)MV8bBHz{l|Y|I`0B{-649!0`G1ssDHXPyfIC|MUOb z{>T5H^nd;TUT`h3>;J+3bN;XXKkt9v|DFH$|KIih?|)tfW(GwD9R?F{T_wiw_y58F zFaFCjFfqLR&&a^eAjqJ>z{c?U|L6ao|FbfPgYEkHUz#C;A%ww?!G|G+!Gpn)A(SDD zp^>4SA%r23A&nt|!I^=X;m`li|9|{{^Z!1$47>IJ@BhdDKmGsw|I2?vh7bQw{eS;o zlEDxhTh6IR4BiZ449X0q3{ zWEof(e*b^}|IdG61|rX|1bPM{eRE@_5aWP zKlA_S|Ly-b{Gb27?*Fv^i~eu?fBFCU|Lgv*{eS%bt^WuAFZ@6C|I+_^{-6E-=067m zFM}|H1h|w3)gML-P7F5S)*GnpCc>b>pw7U~aQpw$|NIO}3~CG>46zJZ3@r?m3_%PI z3=s@b3=Rxd42}$r3_1+D3;_(b4Dk&44A~4V3@r?W45+gzv};Inrcm7}azvch(|I7dH`7g|%!NAVI#K6SB$iT`V z#URAM$?*06tN-`^-~NB||B?S2{;&DJ`2W2B)BpGXZ~ou-f9e0j|BwDZ{r}GYd;c$h zTeKIzb;8~MkN*Go|Nj4j|M&j?{Ljkp?f--Sum5u~Ffn`rr}f|ezx{vnpPNCPL7qXF zK^5Gh&}YzRh-8Rjh+qg}2xACmaAoji2xf?3$Yf|^=wWDMXlE#5C}L=3sAZ^NsAh;~ zaAxpfP-S3cc>n+0|6Bi`gL^Qa|8p=%FmQr%E++#Q0}I2K|BwH_`2XSme`tH?|9?i>=Z_y52CfARn6|F8Z(`TywuyZ=}JANc?7zc_;l zgENB&0~doLgC>I^gBL?ILl{FOLpVb$LmopbLkmMLLl#2@Lp4J>gC&D8gDHb911|&U zrY8Z0xBuV&|M36ye;x)+23-an(CvW?QVi-~pTGJ4@&Eh(KmW@!$T8?Jh%?AB=rVwI z7z;7TGrax(^Z$?k=l`GkzvKVR{}cb${V)69^?&>SUH=dLKl}ggebf>Od0eUR5C|6l(9_Ma2nud-sWW3XX}WGG=M zV5nrM0{1SRz$?c=EeS6McLraE5QZ3rbcP@XI|h9QLk24bNrvD5FaAIK|Kk5+;F|u* z|6Bjx{RibTPKHna*%^cxxEWX(SQz*jI2i;PWEdD3I2be;q#3js)EJZ*Y#8hr?7=m3 z07D#uJ%cubK7%9!7XuFiXoczb|Ns8~`TrN(3V;3o;r|E!KmNb-|MdS$|L^~Q@&6Gx z-0%Ir`~M@j?RxtEiT@w|%P=@GBr;eqs4|!`$TNKZ|LFg7aR1@x|5g82{6Fx2&;Jeo z_k-cW|K0z){@47U_&V9DUfV8md~5Y14^kj~%&&i7gjmJF;6Z~rqgC^9fJZ2te_KggFy|8Mz!`#%%I zz5o0Ft1%=n=ra8LZ^QuVFTei(7JRc_^Z!}kUiiNMH^HM2pwt({kjh}kAj_b~;J}~@ z?xR{Tm@|OVtPle$0|$d1LnXrmhD-(z26F~~27d-`hA4(`26F}paNWei@bAAAgE+%; zupjP&%c{Tsxfnzk{{6rA|Kop7h8O=Y|G)JAF1UB}@BjP%KmKzu2r&Ho|L^}#a3Ac< z|AYUZ|Nr#=`u}I({v~M4;Kl#X;Pw_f!`J_O3|b6^48{!h;JDOekYP|`Fk(<;Fknz% z0IfmhX5e68W?*J`_5bI8kg1?lcIW@%|C|1w{C^eP20#4&=YPG`49pCy3okU^Tkgu#eGodI-vo+1N3xL*4E|K9)W|3M>ffBvg5a4~%TzwH0D z{~P{)_%FcF_<#HVCI657-~RvD|E1u(1geKm{XhHv>;GH-Pyc`SpP507;oE;z22BPh z1`h@Y1`7sHhGK>uhH?gP1|^0c|Ct$d7%af8)F1!v{J#qhpWFX0gKltT;9+25`2YX= z|1bZa|9|xV@&9N4FaN*t|K)!+1`dYL|G)lcW%&C4`Tx8BPyN67|H}XK|F8T9wH|)` z2bFkY3|tIv{=fPE{{OH4EDRzH;tU`EKl%UQ|0{6nWnvIy5CivfLA%?f!KJATc!UZx zK64k`etZA_;s3|~@BC+FkYV6p`1Swae@=$~|HT=U89?oDIR-w4_x~UN2bDy>|AWd+ zJqFOIpbW#$|Dard|NqPX3=E76+zdhtpx!AvgFJ&6gB*hvgDAuI|BMVw48Q(=1&_*d zFgyhJ=0K@kg+U#h;y_~`(hR>rH_iR$XOLq6-3ki|2~b}D{~vOTpf!UnLjXe*Lmk6H zhKUTN46O_^8KyB*gID+lf&0Qn3^oiw4Dk%!;5DMm3_t!q`2Xnt&;P%{Sdf96ff4L4 zR&XeT@+3EdEQ375=l_?$ZHJ}*C;V^z-~E5e|IPpJ{6F-6=Kn76NC>Dj*!F+v|7HJA z{D1KO&i^a_SN>o0f8YN{{}~v5{r~v?;s2}u&;Eb#|H=RB|3P;wN;3#BuroaQe;%B& zpZtIG|JVON|G61V7@`=w8N3-R8T`PxQ;|WIfrH`p|AYT`{NMTi@c*sw0pvnN+RRmgx&By@i zG3hdxFld8Y;6~sPQE>)V22ja;<^S3LH^8a!&3{lU4^*40GH8K!KY>Q9fBolS&|pwv zkYtc&P+^c}5MhvI&;*xC3Jfv~rr=)sum3;)zx@9U+yi_H&N)~9-~9jQzbu0egB-*6 z{{jrk49W~53}Ott4BQM74BQM~{@?lk==7{V|DXJS`2YKV7KU&CFa2NtfARmx z|9k(>{6GKytp7{FA$;@ym;Z16pZI?pJVMFD@aI1V0~>=fIIk)*7=rVUDAa_L5V?u!H_|XL50DA!H_|fL5D#UJT@)EAk4r3ZvXuM z|MCA%aLof6sd@MR^M4kGkKlgGhX0HI&-lOU|BnA>|G)hI<39@nE5pzK91J`RkP$l2 zC^RSqg2vE5ABRo}wg!IJhR41@|dIqo1HY`JexP zz~k=E{y+Wy7@Yc_{(la-;h8~>ft>-AFCl#Z4hGO@urz}t13!ZhgARiOgBEy{#F9ag zL59JC!H>b0!I(jVftP_DTq`OwXfnt#h%rbqXfT*DxH9-NL^EVC1TdI`$8V$@H|Ihw2 zFz_?H`oH7<(*Mi=@h`oEdx>q8O?f+87!b@)>d% zDj6aeoEV%K6c{)e?)|?7?p>=hh%vA+{QCdvzbJzOgEBZj$uLMT@G>Ycs4yrp$TLVY zs4*xqSTp1^B!OFi+2Ap@6oxQ{I0heZPGeyZWe{U{_5bYud;eelzy1H=|L6Z7{s)!d zcm5y#zxe-#|GWPm1&?k2`2PYdqs*YeV9dbFaOeMn|DfKwCWA1;-~XT*=GOlg|Ct!R z{J-^o$Nwe&cmF@}fA{}g|JVON`2W%WOaG7jU;lsQ|M~yh{`Y`yYF!6zdA#`#8og6u zux7AhFlW$b&;Uby1{Ve&hF}H{21f>W1{Vf>23-bFD>s56l);HX7TkIR-CBL@|B?U4 z{vZB-?*G&OU;f|!f8hVN|A+sd{=fVG?ElUGYyQ{$ul`^9zvzGA|C0Z;|0n)m{D1HN zNB^Z5j2L_wVi-~xY8Z+ciW%A%`Wb2&vKfLHKqJ1n3}p;a3{DKr43P}>41x?o45ADg z3^okP44^Vik3pY772LM?{a=`Yg8`IN-u!?3|1!AW`~Lsk{|EkW`akFY^#3dW@B9DY z|A+r~{@?n40^BDA&GCEzmnDz?U;qCG+(%|&0GYzUAk84hAkU!1puivt-si%`@b&-K z|G)n8GF<+@?*HZgPyS#0fB8SCWEWsiWzYk+L-ZJe7!nym8Qd5g8RQvcz&X&5!HhwG zL4e`M|0n-n|Nr)%i@}gViQ)TyPz!^HL5V?{L5M+;fsNtK|3Ckk8AQReXrP%0P|1At z|B?TP{vY^%>i^OIhyEY>zv2IKaBkW7fA9YT|DS<-@gP62Gbk{qGKesM#;3mhfBPR) zYH%@#G4L?_{SO(NVq^g2!9V|58JHPBEpAEhcmb$14gt@QWiga8G%&O?bTi~L=re%E z@l+W?8T`O~IuQmH1}z3T24V25&;9=o{$Kom^Z%LuhyE}5KmY%#|6BfF1kX3V`Tynr z*Z*n^{tQ735e(+w{j;FF_45Do|DXRq_z!BOf?EA={xdNA{r??2LiF{&7y}=}xBsvH zfBOIZ|Be3#|L^~Q0X%;R>a&2x%t0!j{yzuqWqtYo@&Ake*ZyDrfA9bM|3VC)a{D^C zFZujG3%I4g0v>S#weCQD=O6!B8Tc3k8Q8#MfbtBWz8h$495ix%@c(Y`$n=W;tNzdX zKllHd|HuDd2bWZ!@uo}Qk+|poFaN*)A2fRZ>Hn|)fBy?G$T8?M7=y?EOc^X0tQag9 zj2X-rv>Burv>2or1R2yA0vIY8$`~pcni)D6S{Zs7+QDU~1A`Ys6nOuX6SzbZWRPWG zV*u3{FaE#&4@$2O{y+GC=>PQp)Bi92zwCeG|H}W1{$Kh3`u~gnpq3+Ol!2FllR*|- zVrMbLF__W!y6=l?JLzwQ5p|EK<6`+xuc$N$U>k_@1h5vUEA!w}El%izP{&fowZ zcT!;xWe{gjVo+g_V&Gu-^Z&R*{a^Bb&;OnOulxtKZO;C`^#9cVwcwGq z7vS3T^M7Urb_Ny(Nd{%;NIa-6ie|`V$Ycm-umiVsKqEu?;4v}K*c3Oz@BgpBHN~g@ z*ZyDo{~BC7?+2TD{{N%@C;x8;*Uz9)=M(?W{=fPE=KnkYpZtFXF1OVgbQwT1ah43A zb!mYNISfq<*$kfG(%Xq4n!%Pql7X24)DnFC|JQ$Ea4BTX;KAU@;K5+ZAjrVWz{Ma8 z9zOxiB7jB;p8W@@6ky44ftzy32Zy#LS5@bv$u z|0n;mGTi$w$MEOB8N<8(E)3`Yi!vPgug!4kza~S-{}+FL|J(C--9NW~P5)N>Yx+0g z->!c_|Cju8|KI(;?*F&{v;Lp|Ka1h@|3?h|3_6Uz8Tc5(8QmEtFxoQqG3GJuVDw{D zXXIu)$8d$=0YewVQic?UYKC@(N`?l85C#thd4@CwTLxPOWd=TQZ3;>YppkPG1}_F1 z2499ShJ1z$2499~h5!a%h9HJ01`h^11~&#Ft{oonz!~ZY*|M>sU|4;v4 z{D1KO;s1O8-~9jm|Lgzj|9AgC`TzF+bN_dN=fpSsU-^Ii|JDEd{ulmt{ICAs{=eUU zkN=wgb^p8m5B~4>Km32{|CIl||NH+p{;&OC`@j8v+y8n0m;c}Y|2=p%B`!EWxdJ1qNvb4u;qN|NZA@0FCxCFn~(Xci>v>D0t)yG};Af-MTSkGK4XtGbDj~ z$QcaX3^N#JGW0NnGx##3gUg^M@Qhe0LlHwILlHwhc=R-oA&w!K!JNUKA%r24Aru^E zvJ4swY7AW9xt)LiAA?WG0gaJ?dVW{_@B4rE|Cj&A{-61O^8bndhyEW2_tHS4w0FU4 z3s(NW@c+{P6aO!ON0eUwzx;nUc#f2pL5x9`L6^al!H*%3L7zdBL4e`ke^9I0n8B7o zgF%Ranc?LBWB z{~P~b{J;7C#{Z-L@BCK)w}0FjJQ-XWk{Bu(G8ift3K=FatYYY5Xkh4J=wm2hC}fCW zaADA7&|)xQuwzgFj|s2;zu^DO|5N|Bf?MZ%{?GqE@Bi%oGyhNdKkxs-|I7YQ`oHx5 zlm9>evok#Yf9C%c@HjSTbng8Bli>9zSO4GofB*lx|LhD(3?dBQ|AS_^RT-?nr$@Lm zxH9N5*fCf!*f6*;L@`)_`|nZ=Vho?avpjeI-+<0+{rdm;KPQ6}!=L|`!1Lg5|Ns5} z|39eR@$dgT@QB!@|409C`@ib{y8nIutNs^*=l2W#xBt)iU-*9lczk`*|DOMg|DXSV z_y52DatyNIQG=iVxxj7r5C$Ix4F*XD4sf~j{r~O%PyXNgf9e0D|G)l=FbFgJ{C^!h z{{d)c(4g#HH3R;W9#PIw73veId4!9k4{=l}Enzk9nDuW4w4ucMZGlMCED1#hB z7(+Ef0Yf-LDnm9y9zz^M1cL{IJ3~4{I72u?5Q7tV2YNX}5<@6M3PTe^FT-STof6Ja z$WY7B0v5MnumsQiM1topoEZEWG#I!TOd0IJBc>m~>pDQa;P>D$JU;L&At;79b%TUIU$ngEY9)lW#Is*@b9s{T>XJYv9|KtDv|AiR-{Qv&{*#8s%xBox- z|HuE2;584RQ3cT0r7FYy|L4FXw4w|@{&R!t(jW#$h9rg}hA8kTi5z&u!v-9Opc#)J z{~!H7@_+CDwf`4`S3G?E4;pipVh~^e&8=}UfaY}=8MGNx7-Sgu8Dtog88{fe{by!) z_y5fQo&Q&WS09}JfA;^L|J%T`Q2YLG|G)D8wErvq9|8C67X4rTf7$=3{}=q9^}p|b z>;KgMdH)yuKLDOneEOe(ft%sme{Kdz1~mq423-bU2GDvYV+Lym3kD+w83q9c&`hcf zgBXJpg9d{OgEoU4121^A_2K{1|2O?V@c%M+X8jX*)L)B1k3oe&2|T+i3ZBD`WT;{& zV2EUJU~pkDXK-Q&U~p#$Wr$$#V~ApiW{75pVDMsaVF+W0W$<9IhmPxkW`~^_>=|4b z92g83^uV)3_rd;K`+xKQJ^wd@S6DXu@BP2_|Ka~P{%`xg`u~pqtN(BKf8zgJa7!K3 zr%_?>VQ^(|Vz6USXJBCfjpTw#)*JuN{onKd#Q&fFxfww75}(-}@i5lIaonqy^C2^h5B|4;ir z3p|E#{{N}}3;!?tzwJM0&iB~=Yv56!SO5S02epJ*7$9TH@4)NwIKX{waq!AN(9GwT z|NsBL1fNy&1>CX)m4>zqi45@!kqmYWS`4z_ey<#Z3vz2!jQwz z#!$_W$B@9_1YSMI#_;z)0|PU7%^WCI2r+#6|NQ@x|DXR0GAJ?dG4L}8Fo5P&jTjUd zgc$@FBp4JK6d6D(3P5=QG^6Cmpvds&|NH+}{%`!h`~SiJ=l{R_e;8a2-~0de|Ki^sSpMgtyP&*7XCcpFl!vAajpZfp& z|JVQT!828$kp&3`Wd=0{H3m)wb_P!d(5w__c21T-nL(F9hQWq`pW*%g6aVjkSAQ<} z-|>I){|W!w|M&f$_kYX({r?Yx*CU<$f8qc0{{jpe41x@xaThZNa|Y0S5+4H_gB*h$ zxQ}21UUTKmV9(&nV8&p=5X6wgkjaq95Xk`QnFcb1fk!YwYqAm;{26?~v-F^uuMhtp zgV)M|dUqfH|NZ~^|K0!Zz@tEn48jcT44@GL(6|dH18AHK)PIm;U}bpmA2jy^8c$(k z`1l`GgS`I#9y})vI^P7;Li_!ni$M=O&aBCx&Y;iW%Mizq$`Hz+3!cvh)pZ)+(LZGd zQ3lZX=cWIr|DXPU?EjVjFTm}-%l|?BhWGzLW7D9qzJ34qg4-gX^$>Ud|Nk${@csYw z|5w3dSa<)w{{I&|IuBaI0$LpeTFr6j|4Q&E=}vI#ZRP)-|5N`j|Nr1WXf4jW|Cj#n z{(tuW7x4TQJA)8|ICxf)pMiye8(hW;GsrT4+NvH5W(=C(cA_4GEQ2fqXf+8d18D9M zG-3l<9Rix^U}E_DAGBHllt%u7XXj4-zXTq$`Td`RK?Gd7n=p7YL@>lMM zLKzwun!u;PwKMcFbThOuv@pzPSj*7Q(8!R_5DZ?U=Ez{qpvs`gz{GI&{}S-Z-DUqb z{$C4T8!`KT>;K9Bm;ayhA2b@W|Nn9D>dE8(kAr(vJN_U3{~YYfhyQQ>zxV&i|65@3 zpkZ;M4-@(SlY<|N8#{v>WRG!~gI9fBAnGysqR8IEQ@s z|KR^~=m?Mq0~^D`|HuCy_`mi4I`FLC9`JlLXcabSCC2Cf@Ba%k=rVAEYetYxP!An6 z8$9#>^8d&FZ~H&@Kd3g@^uO$X&i_8}`ryO=pMp#68~?c(j2Sc;6d07ivpk**p!Jua zc62a98bdTgCPOX5B!;;RlNhEl%wm|tkjY@ppvEA@z|FwSzy%(O6=eXeLk5izSTHz& zO#-c*5Cr=jG|L1k`ve#i8Qd9s7+e?(7&I9`DbAR|gh8D_iNPA&y8x~3=3?Mt5M%(Y z#$aOj0WMoE{J#TUkMbS7zH;UN&HuOlU;F>a|5M;TGpLLNg*7_^XslI-L7%}2ylOd! z!I?o1ypj^M4)`H>{SK(#_4z-j@4Ws0;{VT45fenpZ>r1|KESmS|1q(AqF-E(3m!8 z&7T5;I0G}d<)_TR$?)d?6Y$E-KmR4bYgpA8G#FGFI2nHa=LD}u)nMRZ0F9r5dhp^5 zir|qpbp|5_Qw9|V4h9AW0r2=cX!Sp6d=9jV=F|Vj;Pu6zJPaBK1IkOr^#2ieHYz{c<&JoX1#xB3p8+E4#K@c+pF!~akI zKMP*HdI`KY<0^Pg4K#cC@c-@qSHQK_i~sNbgGOt=gI7#~`qH5FdZ3z?i-DDajR90H zfZCg&QB!8JLU|at8MqlhIyu2By*L=y!H^9+i^>XK%fkX*YYtj@0dWarT^%C>0)l$U c_%V2~Ee-;|&QaOX5Eu=C(GVC7fq@MH0FvK)G5`Po literal 0 HcmV?d00001 diff --git a/native_client/test/concurrent_streams.py b/native_client/test/concurrent_streams.py new file mode 100644 index 00000000..51b99774 --- /dev/null +++ b/native_client/test/concurrent_streams.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function + +import argparse +import numpy as np +import wave + +from deepspeech import Model + + +# These constants control the beam search decoder + +# Beam width used in the CTC decoder when building candidate transcriptions +BEAM_WIDTH = 500 + +# The alpha hyperparameter of the CTC decoder. Language Model weight +LM_ALPHA = 0.75 + +# The beta hyperparameter of the CTC decoder. Word insertion bonus. +LM_BETA = 1.85 + + +# These constants are tied to the shape of the graph used (changing them changes +# the geometry of the first layer), so make sure you use the same constants that +# were used during training + +# Number of MFCC features to use +N_FEATURES = 26 + +# Size of the context window used for producing timesteps in the input vector +N_CONTEXT = 9 + + +def main(): + parser = argparse.ArgumentParser(description='Running DeepSpeech inference.') + parser.add_argument('--model', required=True, + help='Path to the model (protocol buffer binary file)') + parser.add_argument('--alphabet', required=True, + help='Path to the configuration file specifying the alphabet used by the network') + parser.add_argument('--lm', nargs='?', + help='Path to the language model binary file') + parser.add_argument('--trie', nargs='?', + help='Path to the language model trie file created with native_client/generate_trie') + parser.add_argument('--audio1', required=True, + help='First audio file to use in interleaved streams') + parser.add_argument('--audio2', required=True, + help='Second audio file to use in interleaved streams') + args = parser.parse_args() + + ds = Model(args.model, N_FEATURES, N_CONTEXT, args.alphabet, BEAM_WIDTH) + + if args.lm and args.trie: + ds.enableDecoderWithLM(args.alphabet, args.lm, args.trie, LM_ALPHA, LM_BETA) + + with wave.open(args.audio1, 'rb') as fin: + fs1 = fin.getframerate() + audio1 = np.frombuffer(fin.readframes(fin.getnframes()), np.int16) + + with wave.open(args.audio2, 'rb') as fin: + fs2 = fin.getframerate() + audio2 = np.frombuffer(fin.readframes(fin.getnframes()), np.int16) + + stream1 = ds.setupStream(sample_rate=fs1) + stream2 = ds.setupStream(sample_rate=fs2) + + splits1 = np.array_split(audio1, 10) + splits2 = np.array_split(audio2, 10) + + for part1, part2 in zip(splits1, splits2): + ds.feedAudioContent(stream1, part1) + ds.feedAudioContent(stream2, part2) + + print(ds.finishStream(stream1)) + print(ds.finishStream(stream2)) + +if __name__ == '__main__': + main() diff --git a/taskcluster/tc-python-tests-prod.sh b/taskcluster/tc-python-tests-prod.sh index 6803082e..b735a30e 100644 --- a/taskcluster/tc-python-tests-prod.sh +++ b/taskcluster/tc-python-tests-prod.sh @@ -39,4 +39,6 @@ LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH pip install --verbose --only-bin run_prod_inference_tests +run_prod_concurrent_stream_tests + virtualenv_deactivate "${pyver}" "${PYENV_NAME}" diff --git a/taskcluster/tc-tests-utils.sh b/taskcluster/tc-tests-utils.sh index dc1e7f3c..5455d748 100755 --- a/taskcluster/tc-tests-utils.sh +++ b/taskcluster/tc-tests-utils.sh @@ -419,6 +419,26 @@ run_all_inference_tests() assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}" } +run_prod_concurrent_stream_tests() +{ + set +e + output=$(python ${TASKCLUSTER_TMP_DIR}/test_sources/concurrent_streams.py \ + --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} \ + --alphabet ${TASKCLUSTER_TMP_DIR}/alphabet.txt \ + --lm ${TASKCLUSTER_TMP_DIR}/lm.binary \ + --trie ${TASKCLUSTER_TMP_DIR}/trie \ + --audio1 ${TASKCLUSTER_TMP_DIR}/LDC93S1.wav \ + --audio2 ${TASKCLUSTER_TMP_DIR}/new-home-in-the-stars-16k.wav 2>/dev/null) + status=$? + set -e + + output1=$(echo ${output} | head -n 1) + output2=$(echo ${output} | tail -n 1) + + assert_correct_ldc93s1_prodmodel "${output1}" "${status}" + assert_correct_inference "${output2}" "i must find a new home in the stars" "${status}" +} + run_prod_inference_tests() { set +e @@ -540,6 +560,7 @@ download_data() cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/alphabet.txt ${TASKCLUSTER_TMP_DIR}/alphabet.txt cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/smoke_test/vocab.pruned.lm ${TASKCLUSTER_TMP_DIR}/lm.binary cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/smoke_test/vocab.trie ${TASKCLUSTER_TMP_DIR}/trie + cp -R ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/test ${TASKCLUSTER_TMP_DIR}/test_sources } download_material()