From 619451e24903c249d1b8bdd8be8d4cdb65417754 Mon Sep 17 00:00:00 2001 From: teaxu Date: Sat, 4 Apr 2020 15:04:24 +0800 Subject: [PATCH] delete build files --- .gitignore | 5 + build/lib/caire-covid/__init__.py | 2 - build/lib/caire-covid/biobert/__init__.py | 0 build/lib/caire-covid/biobert/modeling.py | 988 ----- build/lib/caire-covid/biobert/optimization.py | 174 - .../caire-covid/biobert/predictor_biobert.py | 1227 ------ build/lib/caire-covid/biobert/run_factoid.py | 1290 ------ build/lib/caire-covid/biobert/save_biobert.py | 912 ----- build/lib/caire-covid/biobert/tokenization.py | 399 -- build/lib/caire-covid/main.py | 33 - build/lib/caire-covid/mrqa/__init__.py | 0 build/lib/caire-covid/mrqa/data_utils.py | 915 ----- .../lib/caire-covid/mrqa/function_builder.py | 624 --- build/lib/caire-covid/mrqa/model_utils.py | 399 -- build/lib/caire-covid/mrqa/modeling.py | 783 ---- build/lib/caire-covid/mrqa/multiqa_utils.py | 111 - .../lib/caire-covid/mrqa/predictor_kaggle.py | 923 ----- build/lib/caire-covid/mrqa/prepro_utils.py | 138 - build/lib/caire-covid/mrqa/tpu_estimator.py | 3522 ----------------- build/lib/caire-covid/mrqa/xlnet.py | 292 -- build/lib/caire-covid/qa.py | 252 -- build/lib/caire-covid/retrieval.py | 83 - build/lib/caire-covid/test_api.py | 51 - build/lib/caireCovid/__init__.py | 2 - build/lib/caireCovid/biobert/__init__.py | 0 build/lib/caireCovid/biobert/modeling.py | 988 ----- build/lib/caireCovid/biobert/optimization.py | 174 - .../caireCovid/biobert/predictor_biobert.py | 1074 ----- build/lib/caireCovid/biobert/run_factoid.py | 1290 ------ build/lib/caireCovid/biobert/save_biobert.py | 912 ----- build/lib/caireCovid/biobert/tokenization.py | 399 -- build/lib/caireCovid/main.py | 33 - build/lib/caireCovid/mrqa/__init__.py | 0 build/lib/caireCovid/mrqa/data_utils.py | 915 ----- build/lib/caireCovid/mrqa/function_builder.py | 395 -- build/lib/caireCovid/mrqa/model_utils.py | 399 -- build/lib/caireCovid/mrqa/modeling.py | 783 ---- build/lib/caireCovid/mrqa/multiqa_utils.py | 111 - build/lib/caireCovid/mrqa/predictor_kaggle.py | 920 ----- build/lib/caireCovid/mrqa/prepro_utils.py | 138 - build/lib/caireCovid/mrqa/tpu_estimator.py | 3522 ----------------- build/lib/caireCovid/mrqa/xlnet.py | 292 -- build/lib/caireCovid/qa.py | 258 -- build/lib/caireCovid/retrieval.py | 83 - build/lib/caireCovid/test_api.py | 51 - caireCovid.egg-info/PKG-INFO | 17 - caireCovid.egg-info/SOURCES.txt | 27 - caireCovid.egg-info/dependency_links.txt | 1 - caireCovid.egg-info/top_level.txt | 1 - dist/caireCovid-0.1.0-py3-none-any.whl | Bin 253719 -> 0 bytes dist/caireCovid-0.1.0.tar.gz | Bin 115986 -> 0 bytes 51 files changed, 5 insertions(+), 25903 deletions(-) delete mode 100644 build/lib/caire-covid/__init__.py delete mode 100644 build/lib/caire-covid/biobert/__init__.py delete mode 100644 build/lib/caire-covid/biobert/modeling.py delete mode 100644 build/lib/caire-covid/biobert/optimization.py delete mode 100644 build/lib/caire-covid/biobert/predictor_biobert.py delete mode 100644 build/lib/caire-covid/biobert/run_factoid.py delete mode 100644 build/lib/caire-covid/biobert/save_biobert.py delete mode 100644 build/lib/caire-covid/biobert/tokenization.py delete mode 100644 build/lib/caire-covid/main.py delete mode 100644 build/lib/caire-covid/mrqa/__init__.py delete mode 100644 build/lib/caire-covid/mrqa/data_utils.py delete mode 100644 build/lib/caire-covid/mrqa/function_builder.py delete mode 100644 build/lib/caire-covid/mrqa/model_utils.py delete mode 100644 build/lib/caire-covid/mrqa/modeling.py delete mode 100644 build/lib/caire-covid/mrqa/multiqa_utils.py delete mode 100644 build/lib/caire-covid/mrqa/predictor_kaggle.py delete mode 100644 build/lib/caire-covid/mrqa/prepro_utils.py delete mode 100644 build/lib/caire-covid/mrqa/tpu_estimator.py delete mode 100644 build/lib/caire-covid/mrqa/xlnet.py delete mode 100644 build/lib/caire-covid/qa.py delete mode 100644 build/lib/caire-covid/retrieval.py delete mode 100644 build/lib/caire-covid/test_api.py delete mode 100644 build/lib/caireCovid/__init__.py delete mode 100644 build/lib/caireCovid/biobert/__init__.py delete mode 100644 build/lib/caireCovid/biobert/modeling.py delete mode 100644 build/lib/caireCovid/biobert/optimization.py delete mode 100644 build/lib/caireCovid/biobert/predictor_biobert.py delete mode 100644 build/lib/caireCovid/biobert/run_factoid.py delete mode 100644 build/lib/caireCovid/biobert/save_biobert.py delete mode 100644 build/lib/caireCovid/biobert/tokenization.py delete mode 100644 build/lib/caireCovid/main.py delete mode 100644 build/lib/caireCovid/mrqa/__init__.py delete mode 100644 build/lib/caireCovid/mrqa/data_utils.py delete mode 100644 build/lib/caireCovid/mrqa/function_builder.py delete mode 100644 build/lib/caireCovid/mrqa/model_utils.py delete mode 100644 build/lib/caireCovid/mrqa/modeling.py delete mode 100644 build/lib/caireCovid/mrqa/multiqa_utils.py delete mode 100644 build/lib/caireCovid/mrqa/predictor_kaggle.py delete mode 100644 build/lib/caireCovid/mrqa/prepro_utils.py delete mode 100644 build/lib/caireCovid/mrqa/tpu_estimator.py delete mode 100644 build/lib/caireCovid/mrqa/xlnet.py delete mode 100644 build/lib/caireCovid/qa.py delete mode 100644 build/lib/caireCovid/retrieval.py delete mode 100644 build/lib/caireCovid/test_api.py delete mode 100644 caireCovid.egg-info/PKG-INFO delete mode 100644 caireCovid.egg-info/SOURCES.txt delete mode 100644 caireCovid.egg-info/dependency_links.txt delete mode 100644 caireCovid.egg-info/top_level.txt delete mode 100644 dist/caireCovid-0.1.0-py3-none-any.whl delete mode 100644 dist/caireCovid-0.1.0.tar.gz diff --git a/.gitignore b/.gitignore index c286d31..6b97b9e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,8 @@ __pycache__ *.pyc *.DS_Store + +# package files +dist +build +*egg-info diff --git a/build/lib/caire-covid/__init__.py b/build/lib/caire-covid/__init__.py deleted file mode 100644 index 142e75a..0000000 --- a/build/lib/caire-covid/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .retrieval import information_retrieval -from .qa import QaModule, print_answers_in_file diff --git a/build/lib/caire-covid/biobert/__init__.py b/build/lib/caire-covid/biobert/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/build/lib/caire-covid/biobert/modeling.py b/build/lib/caire-covid/biobert/modeling.py deleted file mode 100644 index 88443f4..0000000 --- a/build/lib/caire-covid/biobert/modeling.py +++ /dev/null @@ -1,988 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""The main BERT model and related functions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import copy -import json -import math -import re -import six -import tensorflow as tf - - -class BertConfig(object): - """Configuration for `BertModel`.""" - - def __init__(self, - vocab_size, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02): - """Constructs BertConfig. - - Args: - vocab_size: Vocabulary size of `inputs_ids` in `BertModel`. - hidden_size: Size of the encoder layers and the pooler layer. - num_hidden_layers: Number of hidden layers in the Transformer encoder. - num_attention_heads: Number of attention heads for each attention layer in - the Transformer encoder. - intermediate_size: The size of the "intermediate" (i.e., feed-forward) - layer in the Transformer encoder. - hidden_act: The non-linear activation function (function or string) in the - encoder and pooler. - hidden_dropout_prob: The dropout probability for all fully connected - layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob: The dropout ratio for the attention - probabilities. - max_position_embeddings: The maximum sequence length that this model might - ever be used with. Typically set this to something large just in case - (e.g., 512 or 1024 or 2048). - type_vocab_size: The vocabulary size of the `token_type_ids` passed into - `BertModel`. - initializer_range: The stdev of the truncated_normal_initializer for - initializing all weight matrices. - """ - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - - @classmethod - def from_dict(cls, json_object): - """Constructs a `BertConfig` from a Python dictionary of parameters.""" - config = BertConfig(vocab_size=None) - for (key, value) in six.iteritems(json_object): - config.__dict__[key] = value - return config - - @classmethod - def from_json_file(cls, json_file): - """Constructs a `BertConfig` from a json file of parameters.""" - with tf.gfile.GFile(json_file, "r") as reader: - text = reader.read() - return cls.from_dict(json.loads(text)) - - def to_dict(self): - """Serializes this instance to a Python dictionary.""" - output = copy.deepcopy(self.__dict__) - return output - - def to_json_string(self): - """Serializes this instance to a JSON string.""" - return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" - - -class BertModel(object): - """BERT model ("Bidirectional Encoder Representations from Transformers"). - - Example usage: - - ```python - # Already been converted into WordPiece token ids - input_ids = tf.constant([[31, 51, 99], [15, 5, 0]]) - input_mask = tf.constant([[1, 1, 1], [1, 1, 0]]) - token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]]) - - config = modeling.BertConfig(vocab_size=32000, hidden_size=512, - num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) - - model = modeling.BertModel(config=config, is_training=True, - input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids) - - label_embeddings = tf.get_variable(...) - pooled_output = model.get_pooled_output() - logits = tf.matmul(pooled_output, label_embeddings) - ... - ``` - """ - - def __init__(self, - config, - is_training, - input_ids, - input_mask=None, - token_type_ids=None, - use_one_hot_embeddings=True, - scope=None): - """Constructor for BertModel. - - Args: - config: `BertConfig` instance. - is_training: bool. true for training model, false for eval model. Controls - whether dropout will be applied. - input_ids: int32 Tensor of shape [batch_size, seq_length]. - input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. - token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. - use_one_hot_embeddings: (optional) bool. Whether to use one-hot word - embeddings or tf.embedding_lookup() for the word embeddings. On the TPU, - it is much faster if this is True, on the CPU or GPU, it is faster if - this is False. - scope: (optional) variable scope. Defaults to "bert". - - Raises: - ValueError: The config is invalid or one of the input tensor shapes - is invalid. - """ - config = copy.deepcopy(config) - if not is_training: - config.hidden_dropout_prob = 0.0 - config.attention_probs_dropout_prob = 0.0 - - input_shape = get_shape_list(input_ids, expected_rank=2) - batch_size = input_shape[0] - seq_length = input_shape[1] - - if input_mask is None: - input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) - - if token_type_ids is None: - token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) - - with tf.variable_scope(scope, default_name="bert"): - with tf.variable_scope("embeddings"): - # Perform embedding lookup on the word ids. - (self.embedding_output, self.embedding_table) = embedding_lookup( - input_ids=input_ids, - vocab_size=config.vocab_size, - embedding_size=config.hidden_size, - initializer_range=config.initializer_range, - word_embedding_name="word_embeddings", - use_one_hot_embeddings=use_one_hot_embeddings) - - # Add positional embeddings and token type embeddings, then layer - # normalize and perform dropout. - self.embedding_output = embedding_postprocessor( - input_tensor=self.embedding_output, - use_token_type=True, - token_type_ids=token_type_ids, - token_type_vocab_size=config.type_vocab_size, - token_type_embedding_name="token_type_embeddings", - use_position_embeddings=True, - position_embedding_name="position_embeddings", - initializer_range=config.initializer_range, - max_position_embeddings=config.max_position_embeddings, - dropout_prob=config.hidden_dropout_prob) - - with tf.variable_scope("encoder"): - # This converts a 2D mask of shape [batch_size, seq_length] to a 3D - # mask of shape [batch_size, seq_length, seq_length] which is used - # for the attention scores. - attention_mask = create_attention_mask_from_input_mask( - input_ids, input_mask) - - # Run the stacked transformer. - # `sequence_output` shape = [batch_size, seq_length, hidden_size]. - self.all_encoder_layers = transformer_model( - input_tensor=self.embedding_output, - attention_mask=attention_mask, - hidden_size=config.hidden_size, - num_hidden_layers=config.num_hidden_layers, - num_attention_heads=config.num_attention_heads, - intermediate_size=config.intermediate_size, - intermediate_act_fn=get_activation(config.hidden_act), - hidden_dropout_prob=config.hidden_dropout_prob, - attention_probs_dropout_prob=config.attention_probs_dropout_prob, - initializer_range=config.initializer_range, - do_return_all_layers=True) - - self.sequence_output = self.all_encoder_layers[-1] - # The "pooler" converts the encoded sequence tensor of shape - # [batch_size, seq_length, hidden_size] to a tensor of shape - # [batch_size, hidden_size]. This is necessary for segment-level - # (or segment-pair-level) classification tasks where we need a fixed - # dimensional representation of the segment. - with tf.variable_scope("pooler"): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. We assume that this has been pre-trained - first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) - self.pooled_output = tf.layers.dense( - first_token_tensor, - config.hidden_size, - activation=tf.tanh, - kernel_initializer=create_initializer(config.initializer_range)) - - def get_pooled_output(self): - return self.pooled_output - - def get_sequence_output(self): - """Gets final hidden layer of encoder. - - Returns: - float Tensor of shape [batch_size, seq_length, hidden_size] corresponding - to the final hidden of the transformer encoder. - """ - return self.sequence_output - - def get_all_encoder_layers(self): - return self.all_encoder_layers - - def get_embedding_output(self): - """Gets output of the embedding lookup (i.e., input to the transformer). - - Returns: - float Tensor of shape [batch_size, seq_length, hidden_size] corresponding - to the output of the embedding layer, after summing the word - embeddings with the positional embeddings and the token type embeddings, - then performing layer normalization. This is the input to the transformer. - """ - return self.embedding_output - - def get_embedding_table(self): - return self.embedding_table - - -def gelu(input_tensor): - """Gaussian Error Linear Unit. - - This is a smoother version of the RELU. - Original paper: https://arxiv.org/abs/1606.08415 - - Args: - input_tensor: float Tensor to perform activation. - - Returns: - `input_tensor` with the GELU activation applied. - """ - cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0))) - return input_tensor * cdf - - -def get_activation(activation_string): - """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. - - Args: - activation_string: String name of the activation function. - - Returns: - A Python function corresponding to the activation function. If - `activation_string` is None, empty, or "linear", this will return None. - If `activation_string` is not a string, it will return `activation_string`. - - Raises: - ValueError: The `activation_string` does not correspond to a known - activation. - """ - - # We assume that anything that"s not a string is already an activation - # function, so we just return it. - if not isinstance(activation_string, six.string_types): - return activation_string - - if not activation_string: - return None - - act = activation_string.lower() - if act == "linear": - return None - elif act == "relu": - return tf.nn.relu - elif act == "gelu": - return gelu - elif act == "tanh": - return tf.tanh - else: - raise ValueError("Unsupported activation: %s" % act) - - -def get_assignment_map_from_checkpoint(tvars, init_checkpoint): - """Compute the union of the current variables and checkpoint variables.""" - assignment_map = {} - initialized_variable_names = {} - - name_to_variable = collections.OrderedDict() - for var in tvars: - name = var.name - m = re.match("^(.*):\\d+$", name) - if m is not None: - name = m.group(1) - name_to_variable[name] = var - - init_vars = tf.train.list_variables(init_checkpoint) - - assignment_map = collections.OrderedDict() - for x in init_vars: - (name, var) = (x[0], x[1]) - if name not in name_to_variable: - continue - assignment_map[name] = name - initialized_variable_names[name] = 1 - initialized_variable_names[name + ":0"] = 1 - - return (assignment_map, initialized_variable_names) - - -def dropout(input_tensor, dropout_prob): - """Perform dropout. - - Args: - input_tensor: float Tensor. - dropout_prob: Python float. The probability of dropping out a value (NOT of - *keeping* a dimension as in `tf.nn.dropout`). - - Returns: - A version of `input_tensor` with dropout applied. - """ - if dropout_prob is None or dropout_prob == 0.0: - return input_tensor - - output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob) - return output - - -def layer_norm(input_tensor, name=None): - """Run layer normalization on the last dimension of the tensor.""" - return tf.contrib.layers.layer_norm( - inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) - - -def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): - """Runs layer normalization followed by dropout.""" - output_tensor = layer_norm(input_tensor, name) - output_tensor = dropout(output_tensor, dropout_prob) - return output_tensor - - -def create_initializer(initializer_range=0.02): - """Creates a `truncated_normal_initializer` with the given range.""" - return tf.truncated_normal_initializer(stddev=initializer_range) - - -def embedding_lookup(input_ids, - vocab_size, - embedding_size=128, - initializer_range=0.02, - word_embedding_name="word_embeddings", - use_one_hot_embeddings=False): - """Looks up words embeddings for id tensor. - - Args: - input_ids: int32 Tensor of shape [batch_size, seq_length] containing word - ids. - vocab_size: int. Size of the embedding vocabulary. - embedding_size: int. Width of the word embeddings. - initializer_range: float. Embedding initialization range. - word_embedding_name: string. Name of the embedding table. - use_one_hot_embeddings: bool. If True, use one-hot method for word - embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better - for TPUs. - - Returns: - float Tensor of shape [batch_size, seq_length, embedding_size]. - """ - # This function assumes that the input is of shape [batch_size, seq_length, - # num_inputs]. - # - # If the input is a 2D tensor of shape [batch_size, seq_length], we - # reshape to [batch_size, seq_length, 1]. - if input_ids.shape.ndims == 2: - input_ids = tf.expand_dims(input_ids, axis=[-1]) - - embedding_table = tf.get_variable( - name=word_embedding_name, - shape=[vocab_size, embedding_size], - initializer=create_initializer(initializer_range)) - - if use_one_hot_embeddings: - flat_input_ids = tf.reshape(input_ids, [-1]) - one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) - output = tf.matmul(one_hot_input_ids, embedding_table) - else: - output = tf.nn.embedding_lookup(embedding_table, input_ids) - - input_shape = get_shape_list(input_ids) - - output = tf.reshape(output, - input_shape[0:-1] + [input_shape[-1] * embedding_size]) - return (output, embedding_table) - - -def embedding_postprocessor(input_tensor, - use_token_type=False, - token_type_ids=None, - token_type_vocab_size=16, - token_type_embedding_name="token_type_embeddings", - use_position_embeddings=True, - position_embedding_name="position_embeddings", - initializer_range=0.02, - max_position_embeddings=512, - dropout_prob=0.1): - """Performs various post-processing on a word embedding tensor. - - Args: - input_tensor: float Tensor of shape [batch_size, seq_length, - embedding_size]. - use_token_type: bool. Whether to add embeddings for `token_type_ids`. - token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. - Must be specified if `use_token_type` is True. - token_type_vocab_size: int. The vocabulary size of `token_type_ids`. - token_type_embedding_name: string. The name of the embedding table variable - for token type ids. - use_position_embeddings: bool. Whether to add position embeddings for the - position of each token in the sequence. - position_embedding_name: string. The name of the embedding table variable - for positional embeddings. - initializer_range: float. Range of the weight initialization. - max_position_embeddings: int. Maximum sequence length that might ever be - used with this model. This can be longer than the sequence length of - input_tensor, but cannot be shorter. - dropout_prob: float. Dropout probability applied to the final output tensor. - - Returns: - float tensor with same shape as `input_tensor`. - - Raises: - ValueError: One of the tensor shapes or input values is invalid. - """ - input_shape = get_shape_list(input_tensor, expected_rank=3) - batch_size = input_shape[0] - seq_length = input_shape[1] - width = input_shape[2] - - output = input_tensor - - if use_token_type: - if token_type_ids is None: - raise ValueError("`token_type_ids` must be specified if" - "`use_token_type` is True.") - token_type_table = tf.get_variable( - name=token_type_embedding_name, - shape=[token_type_vocab_size, width], - initializer=create_initializer(initializer_range)) - # This vocab will be small so we always do one-hot here, since it is always - # faster for a small vocabulary. - flat_token_type_ids = tf.reshape(token_type_ids, [-1]) - one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) - token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) - token_type_embeddings = tf.reshape(token_type_embeddings, - [batch_size, seq_length, width]) - output += token_type_embeddings - - if use_position_embeddings: - assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) - with tf.control_dependencies([assert_op]): - full_position_embeddings = tf.get_variable( - name=position_embedding_name, - shape=[max_position_embeddings, width], - initializer=create_initializer(initializer_range)) - # Since the position embedding table is a learned variable, we create it - # using a (long) sequence length `max_position_embeddings`. The actual - # sequence length might be shorter than this, for faster training of - # tasks that do not have long sequences. - # - # So `full_position_embeddings` is effectively an embedding table - # for position [0, 1, 2, ..., max_position_embeddings-1], and the current - # sequence has positions [0, 1, 2, ... seq_length-1], so we can just - # perform a slice. - position_embeddings = tf.slice(full_position_embeddings, [0, 0], - [seq_length, -1]) - num_dims = len(output.shape.as_list()) - - # Only the last two dimensions are relevant (`seq_length` and `width`), so - # we broadcast among the first dimensions, which is typically just - # the batch size. - position_broadcast_shape = [] - for _ in range(num_dims - 2): - position_broadcast_shape.append(1) - position_broadcast_shape.extend([seq_length, width]) - position_embeddings = tf.reshape(position_embeddings, - position_broadcast_shape) - output += position_embeddings - - output = layer_norm_and_dropout(output, dropout_prob) - return output - - -def create_attention_mask_from_input_mask(from_tensor, to_mask): - """Create 3D attention mask from a 2D tensor mask. - - Args: - from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. - to_mask: int32 Tensor of shape [batch_size, to_seq_length]. - - Returns: - float Tensor of shape [batch_size, from_seq_length, to_seq_length]. - """ - from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) - batch_size = from_shape[0] - from_seq_length = from_shape[1] - - to_shape = get_shape_list(to_mask, expected_rank=2) - to_seq_length = to_shape[1] - - to_mask = tf.cast( - tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) - - # We don't assume that `from_tensor` is a mask (although it could be). We - # don't actually care if we attend *from* padding tokens (only *to* padding) - # tokens so we create a tensor of all ones. - # - # `broadcast_ones` = [batch_size, from_seq_length, 1] - broadcast_ones = tf.ones( - shape=[batch_size, from_seq_length, 1], dtype=tf.float32) - - # Here we broadcast along two dimensions to create the mask. - mask = broadcast_ones * to_mask - - return mask - - -def attention_layer(from_tensor, - to_tensor, - attention_mask=None, - num_attention_heads=1, - size_per_head=512, - query_act=None, - key_act=None, - value_act=None, - attention_probs_dropout_prob=0.0, - initializer_range=0.02, - do_return_2d_tensor=False, - batch_size=None, - from_seq_length=None, - to_seq_length=None): - """Performs multi-headed attention from `from_tensor` to `to_tensor`. - - This is an implementation of multi-headed attention based on "Attention - is all you Need". If `from_tensor` and `to_tensor` are the same, then - this is self-attention. Each timestep in `from_tensor` attends to the - corresponding sequence in `to_tensor`, and returns a fixed-with vector. - - This function first projects `from_tensor` into a "query" tensor and - `to_tensor` into "key" and "value" tensors. These are (effectively) a list - of tensors of length `num_attention_heads`, where each tensor is of shape - [batch_size, seq_length, size_per_head]. - - Then, the query and key tensors are dot-producted and scaled. These are - softmaxed to obtain attention probabilities. The value tensors are then - interpolated by these probabilities, then concatenated back to a single - tensor and returned. - - In practice, the multi-headed attention are done with transposes and - reshapes rather than actual separate tensors. - - Args: - from_tensor: float Tensor of shape [batch_size, from_seq_length, - from_width]. - to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. - attention_mask: (optional) int32 Tensor of shape [batch_size, - from_seq_length, to_seq_length]. The values should be 1 or 0. The - attention scores will effectively be set to -infinity for any positions in - the mask that are 0, and will be unchanged for positions that are 1. - num_attention_heads: int. Number of attention heads. - size_per_head: int. Size of each attention head. - query_act: (optional) Activation function for the query transform. - key_act: (optional) Activation function for the key transform. - value_act: (optional) Activation function for the value transform. - attention_probs_dropout_prob: (optional) float. Dropout probability of the - attention probabilities. - initializer_range: float. Range of the weight initializer. - do_return_2d_tensor: bool. If True, the output will be of shape [batch_size - * from_seq_length, num_attention_heads * size_per_head]. If False, the - output will be of shape [batch_size, from_seq_length, num_attention_heads - * size_per_head]. - batch_size: (Optional) int. If the input is 2D, this might be the batch size - of the 3D version of the `from_tensor` and `to_tensor`. - from_seq_length: (Optional) If the input is 2D, this might be the seq length - of the 3D version of the `from_tensor`. - to_seq_length: (Optional) If the input is 2D, this might be the seq length - of the 3D version of the `to_tensor`. - - Returns: - float Tensor of shape [batch_size, from_seq_length, - num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is - true, this will be of shape [batch_size * from_seq_length, - num_attention_heads * size_per_head]). - - Raises: - ValueError: Any of the arguments or tensor shapes are invalid. - """ - - def transpose_for_scores(input_tensor, batch_size, num_attention_heads, - seq_length, width): - output_tensor = tf.reshape( - input_tensor, [batch_size, seq_length, num_attention_heads, width]) - - output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3]) - return output_tensor - - from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) - to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) - - if len(from_shape) != len(to_shape): - raise ValueError( - "The rank of `from_tensor` must match the rank of `to_tensor`.") - - if len(from_shape) == 3: - batch_size = from_shape[0] - from_seq_length = from_shape[1] - to_seq_length = to_shape[1] - elif len(from_shape) == 2: - if (batch_size is None or from_seq_length is None or to_seq_length is None): - raise ValueError( - "When passing in rank 2 tensors to attention_layer, the values " - "for `batch_size`, `from_seq_length`, and `to_seq_length` " - "must all be specified.") - - # Scalar dimensions referenced here: - # B = batch size (number of sequences) - # F = `from_tensor` sequence length - # T = `to_tensor` sequence length - # N = `num_attention_heads` - # H = `size_per_head` - - from_tensor_2d = reshape_to_matrix(from_tensor) - to_tensor_2d = reshape_to_matrix(to_tensor) - - # `query_layer` = [B*F, N*H] - query_layer = tf.layers.dense( - from_tensor_2d, - num_attention_heads * size_per_head, - activation=query_act, - name="query", - kernel_initializer=create_initializer(initializer_range)) - - # `key_layer` = [B*T, N*H] - key_layer = tf.layers.dense( - to_tensor_2d, - num_attention_heads * size_per_head, - activation=key_act, - name="key", - kernel_initializer=create_initializer(initializer_range)) - - # `value_layer` = [B*T, N*H] - value_layer = tf.layers.dense( - to_tensor_2d, - num_attention_heads * size_per_head, - activation=value_act, - name="value", - kernel_initializer=create_initializer(initializer_range)) - - # `query_layer` = [B, N, F, H] - query_layer = transpose_for_scores(query_layer, batch_size, - num_attention_heads, from_seq_length, - size_per_head) - - # `key_layer` = [B, N, T, H] - key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads, - to_seq_length, size_per_head) - - # Take the dot product between "query" and "key" to get the raw - # attention scores. - # `attention_scores` = [B, N, F, T] - attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) - attention_scores = tf.multiply(attention_scores, - 1.0 / math.sqrt(float(size_per_head))) - - if attention_mask is not None: - # `attention_mask` = [B, 1, F, T] - attention_mask = tf.expand_dims(attention_mask, axis=[1]) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 - - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - attention_scores += adder - - # Normalize the attention scores to probabilities. - # `attention_probs` = [B, N, F, T] - attention_probs = tf.nn.softmax(attention_scores) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = dropout(attention_probs, attention_probs_dropout_prob) - - # `value_layer` = [B, T, N, H] - value_layer = tf.reshape( - value_layer, - [batch_size, to_seq_length, num_attention_heads, size_per_head]) - - # `value_layer` = [B, N, T, H] - value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) - - # `context_layer` = [B, N, F, H] - context_layer = tf.matmul(attention_probs, value_layer) - - # `context_layer` = [B, F, N, H] - context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) - - if do_return_2d_tensor: - # `context_layer` = [B*F, N*H] - context_layer = tf.reshape( - context_layer, - [batch_size * from_seq_length, num_attention_heads * size_per_head]) - else: - # `context_layer` = [B, F, N*H] - context_layer = tf.reshape( - context_layer, - [batch_size, from_seq_length, num_attention_heads * size_per_head]) - - return context_layer - - -def transformer_model(input_tensor, - attention_mask=None, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - intermediate_act_fn=gelu, - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - initializer_range=0.02, - do_return_all_layers=False): - """Multi-headed, multi-layer Transformer from "Attention is All You Need". - - This is almost an exact implementation of the original Transformer encoder. - - See the original paper: - https://arxiv.org/abs/1706.03762 - - Also see: - https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py - - Args: - input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. - attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, - seq_length], with 1 for positions that can be attended to and 0 in - positions that should not be. - hidden_size: int. Hidden size of the Transformer. - num_hidden_layers: int. Number of layers (blocks) in the Transformer. - num_attention_heads: int. Number of attention heads in the Transformer. - intermediate_size: int. The size of the "intermediate" (a.k.a., feed - forward) layer. - intermediate_act_fn: function. The non-linear activation function to apply - to the output of the intermediate/feed-forward layer. - hidden_dropout_prob: float. Dropout probability for the hidden layers. - attention_probs_dropout_prob: float. Dropout probability of the attention - probabilities. - initializer_range: float. Range of the initializer (stddev of truncated - normal). - do_return_all_layers: Whether to also return all layers or just the final - layer. - - Returns: - float Tensor of shape [batch_size, seq_length, hidden_size], the final - hidden layer of the Transformer. - - Raises: - ValueError: A Tensor shape or parameter is invalid. - """ - if hidden_size % num_attention_heads != 0: - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (hidden_size, num_attention_heads)) - - attention_head_size = int(hidden_size / num_attention_heads) - input_shape = get_shape_list(input_tensor, expected_rank=3) - batch_size = input_shape[0] - seq_length = input_shape[1] - input_width = input_shape[2] - - # The Transformer performs sum residuals on all layers so the input needs - # to be the same as the hidden size. - if input_width != hidden_size: - raise ValueError("The width of the input tensor (%d) != hidden size (%d)" % - (input_width, hidden_size)) - - # We keep the representation as a 2D tensor to avoid re-shaping it back and - # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on - # the GPU/CPU but may not be free on the TPU, so we want to minimize them to - # help the optimizer. - prev_output = reshape_to_matrix(input_tensor) - - all_layer_outputs = [] - for layer_idx in range(num_hidden_layers): - with tf.variable_scope("layer_%d" % layer_idx): - layer_input = prev_output - - with tf.variable_scope("attention"): - attention_heads = [] - with tf.variable_scope("self"): - attention_head = attention_layer( - from_tensor=layer_input, - to_tensor=layer_input, - attention_mask=attention_mask, - num_attention_heads=num_attention_heads, - size_per_head=attention_head_size, - attention_probs_dropout_prob=attention_probs_dropout_prob, - initializer_range=initializer_range, - do_return_2d_tensor=True, - batch_size=batch_size, - from_seq_length=seq_length, - to_seq_length=seq_length) - attention_heads.append(attention_head) - - attention_output = None - if len(attention_heads) == 1: - attention_output = attention_heads[0] - else: - # In the case where we have other sequences, we just concatenate - # them to the self-attention head before the projection. - attention_output = tf.concat(attention_heads, axis=-1) - - # Run a linear projection of `hidden_size` then add a residual - # with `layer_input`. - with tf.variable_scope("output"): - attention_output = tf.layers.dense( - attention_output, - hidden_size, - kernel_initializer=create_initializer(initializer_range)) - attention_output = dropout(attention_output, hidden_dropout_prob) - attention_output = layer_norm(attention_output + layer_input) - - # The activation is only applied to the "intermediate" hidden layer. - with tf.variable_scope("intermediate"): - intermediate_output = tf.layers.dense( - attention_output, - intermediate_size, - activation=intermediate_act_fn, - kernel_initializer=create_initializer(initializer_range)) - - # Down-project back to `hidden_size` then add the residual. - with tf.variable_scope("output"): - layer_output = tf.layers.dense( - intermediate_output, - hidden_size, - kernel_initializer=create_initializer(initializer_range)) - layer_output = dropout(layer_output, hidden_dropout_prob) - layer_output = layer_norm(layer_output + attention_output) - prev_output = layer_output - all_layer_outputs.append(layer_output) - - if do_return_all_layers: - final_outputs = [] - for layer_output in all_layer_outputs: - final_output = reshape_from_matrix(layer_output, input_shape) - final_outputs.append(final_output) - return final_outputs - else: - final_output = reshape_from_matrix(prev_output, input_shape) - return final_output - - -def get_shape_list(tensor, expected_rank=None, name=None): - """Returns a list of the shape of tensor, preferring static dimensions. - - Args: - tensor: A tf.Tensor object to find the shape of. - expected_rank: (optional) int. The expected rank of `tensor`. If this is - specified and the `tensor` has a different rank, and exception will be - thrown. - name: Optional name of the tensor for the error message. - - Returns: - A list of dimensions of the shape of tensor. All static dimensions will - be returned as python integers, and dynamic dimensions will be returned - as tf.Tensor scalars. - """ - if name is None: - name = tensor.name - - if expected_rank is not None: - assert_rank(tensor, expected_rank, name) - - shape = tensor.shape.as_list() - - non_static_indexes = [] - for (index, dim) in enumerate(shape): - if dim is None: - non_static_indexes.append(index) - - if not non_static_indexes: - return shape - - dyn_shape = tf.shape(tensor) - for index in non_static_indexes: - shape[index] = dyn_shape[index] - return shape - - -def reshape_to_matrix(input_tensor): - """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" - ndims = input_tensor.shape.ndims - if ndims < 2: - raise ValueError("Input tensor must have at least rank 2. Shape = %s" % - (input_tensor.shape)) - if ndims == 2: - return input_tensor - - width = input_tensor.shape[-1] - output_tensor = tf.reshape(input_tensor, [-1, width]) - return output_tensor - - -def reshape_from_matrix(output_tensor, orig_shape_list): - """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" - if len(orig_shape_list) == 2: - return output_tensor - - output_shape = get_shape_list(output_tensor) - - orig_dims = orig_shape_list[0:-1] - width = output_shape[-1] - - return tf.reshape(output_tensor, orig_dims + [width]) - - -def assert_rank(tensor, expected_rank, name=None): - """Raises an exception if the tensor rank is not of the expected rank. - - Args: - tensor: A tf.Tensor to check the rank of. - expected_rank: Python integer or list of integers, expected rank. - name: Optional name of the tensor for the error message. - - Raises: - ValueError: If the expected shape doesn't match the actual shape. - """ - if name is None: - name = tensor.name - - expected_rank_dict = {} - if isinstance(expected_rank, six.integer_types): - expected_rank_dict[expected_rank] = True - else: - for x in expected_rank: - expected_rank_dict[x] = True - - actual_rank = tensor.shape.ndims - if actual_rank not in expected_rank_dict: - scope_name = tf.get_variable_scope().name - raise ValueError( - "For the tensor `%s` in scope `%s`, the actual rank " - "`%d` (shape = %s) is not equal to the expected rank `%s`" % - (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank))) diff --git a/build/lib/caire-covid/biobert/optimization.py b/build/lib/caire-covid/biobert/optimization.py deleted file mode 100644 index d33dabd..0000000 --- a/build/lib/caire-covid/biobert/optimization.py +++ /dev/null @@ -1,174 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Functions and classes related to optimization (weight updates).""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import re -import tensorflow as tf - - -def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu): - """Creates an optimizer training op.""" - global_step = tf.train.get_or_create_global_step() - - learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32) - - # Implements linear decay of the learning rate. - learning_rate = tf.train.polynomial_decay( - learning_rate, - global_step, - num_train_steps, - end_learning_rate=0.0, - power=1.0, - cycle=False) - - # Implements linear warmup. I.e., if global_step < num_warmup_steps, the - # learning rate will be `global_step/num_warmup_steps * init_lr`. - if num_warmup_steps: - global_steps_int = tf.cast(global_step, tf.int32) - warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32) - - global_steps_float = tf.cast(global_steps_int, tf.float32) - warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) - - warmup_percent_done = global_steps_float / warmup_steps_float - warmup_learning_rate = init_lr * warmup_percent_done - - is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) - learning_rate = ( - (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) - - # It is recommended that you use this optimizer for fine tuning, since this - # is how the model was trained (note that the Adam m/v variables are NOT - # loaded from init_checkpoint.) - optimizer = AdamWeightDecayOptimizer( - learning_rate=learning_rate, - weight_decay_rate=0.01, - beta_1=0.9, - beta_2=0.999, - epsilon=1e-6, - exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) - - if use_tpu: - optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) - - tvars = tf.trainable_variables() - grads = tf.gradients(loss, tvars) - - # This is how the model was pre-trained. - (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) - - train_op = optimizer.apply_gradients( - zip(grads, tvars), global_step=global_step) - - # Normally the global step update is done inside of `apply_gradients`. - # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use - # a different optimizer, you should probably take this line out. - new_global_step = global_step + 1 - train_op = tf.group(train_op, [global_step.assign(new_global_step)]) - return train_op - - -class AdamWeightDecayOptimizer(tf.train.Optimizer): - """A basic Adam optimizer that includes "correct" L2 weight decay.""" - - def __init__(self, - learning_rate, - weight_decay_rate=0.0, - beta_1=0.9, - beta_2=0.999, - epsilon=1e-6, - exclude_from_weight_decay=None, - name="AdamWeightDecayOptimizer"): - """Constructs a AdamWeightDecayOptimizer.""" - super(AdamWeightDecayOptimizer, self).__init__(False, name) - - self.learning_rate = learning_rate - self.weight_decay_rate = weight_decay_rate - self.beta_1 = beta_1 - self.beta_2 = beta_2 - self.epsilon = epsilon - self.exclude_from_weight_decay = exclude_from_weight_decay - - def apply_gradients(self, grads_and_vars, global_step=None, name=None): - """See base class.""" - assignments = [] - for (grad, param) in grads_and_vars: - if grad is None or param is None: - continue - - param_name = self._get_variable_name(param.name) - - m = tf.get_variable( - name=param_name + "/adam_m", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - v = tf.get_variable( - name=param_name + "/adam_v", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - - # Standard Adam update. - next_m = ( - tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) - next_v = ( - tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, - tf.square(grad))) - - update = next_m / (tf.sqrt(next_v) + self.epsilon) - - # Just adding the square of the weights to the loss function is *not* - # the correct way of using L2 regularization/weight decay with Adam, - # since that will interact with the m and v parameters in strange ways. - # - # Instead we want ot decay the weights in a manner that doesn't interact - # with the m/v parameters. This is equivalent to adding the square - # of the weights to the loss with plain (non-momentum) SGD. - if self._do_use_weight_decay(param_name): - update += self.weight_decay_rate * param - - update_with_lr = self.learning_rate * update - - next_param = param - update_with_lr - - assignments.extend( - [param.assign(next_param), - m.assign(next_m), - v.assign(next_v)]) - return tf.group(*assignments, name=name) - - def _do_use_weight_decay(self, param_name): - """Whether to use L2 weight decay for `param_name`.""" - if not self.weight_decay_rate: - return False - if self.exclude_from_weight_decay: - for r in self.exclude_from_weight_decay: - if re.search(r, param_name) is not None: - return False - return True - - def _get_variable_name(self, param_name): - """Get the variable name from the tensor name.""" - m = re.match("^(.*):\\d+$", param_name) - if m is not None: - param_name = m.group(1) - return param_name diff --git a/build/lib/caire-covid/biobert/predictor_biobert.py b/build/lib/caire-covid/biobert/predictor_biobert.py deleted file mode 100644 index dd2e024..0000000 --- a/build/lib/caire-covid/biobert/predictor_biobert.py +++ /dev/null @@ -1,1227 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Run BERT on SQuAD 1.1 and SQuAD 2.0.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import json -import math -import os -import random -import biobert.modeling as modeling -import biobert.optimization as optimization -import biobert.tokenization as tokenization -import six -import tensorflow as tf - -# flags = tf.flags - -# FLAGS = flags.FLAGS - -# ## Required parameters -# flags.DEFINE_string( -# "bert_config_file", None, -# "The config json file corresponding to the pre-trained BERT model. " -# "This specifies the model architecture.") - -# flags.DEFINE_string("vocab_file", None, -# "The vocabulary file that the BERT model was trained on.") - -# flags.DEFINE_string( -# "output_dir", None, -# "The output directory where the model checkpoints will be written.") - -# ## Other parameters -# flags.DEFINE_string("train_file", None, -# "SQuAD json for training. E.g., train-v1.1.json") - -# flags.DEFINE_string( -# "predict_file", None, -# "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") - -# flags.DEFINE_string( -# "init_checkpoint", None, -# "Initial checkpoint (usually from a pre-trained BERT model).") - -# flags.DEFINE_bool( -# "do_lower_case", True, -# "Whether to lower case the input text. Should be True for uncased " -# "models and False for cased models.") - -# flags.DEFINE_integer( -# "max_seq_length", 384, -# "The maximum total input sequence length after WordPiece tokenization. " -# "Sequences longer than this will be truncated, and sequences shorter " -# "than this will be padded.") - -# flags.DEFINE_integer( -# "doc_stride", 128, -# "When splitting up a long document into chunks, how much stride to " -# "take between chunks.") - -# flags.DEFINE_integer( -# "max_query_length", 64, -# "The maximum number of tokens for the question. Questions longer than " -# "this will be truncated to this length.") - -# flags.DEFINE_bool("do_train", False, "Whether to run training.") - -# flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.") - -# flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") - -# flags.DEFINE_integer("predict_batch_size", 8, -# "Total batch size for predictions.") - -# flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") - -# flags.DEFINE_float("num_train_epochs", 3.0, -# "Total number of training epochs to perform.") - -# flags.DEFINE_float( -# "warmup_proportion", 0.1, -# "Proportion of training to perform linear learning rate warmup for. " -# "E.g., 0.1 = 10% of training.") - -# flags.DEFINE_integer("save_checkpoints_steps", 1000, -# "How often to save the model checkpoint.") - -# flags.DEFINE_integer("iterations_per_loop", 1000, -# "How many steps to make in each estimator call.") - -# flags.DEFINE_integer( -# "n_best_size", 20, -# "The total number of n-best predictions to generate in the " -# "nbest_predictions.json output file.") - -# flags.DEFINE_integer( -# "max_answer_length", 30, -# "The maximum length of an answer that can be generated. This is needed " -# "because the start and end predictions are not conditioned on one another.") - -# flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")FLAGS, - -# tf.flags.DEFINE_string( -# "tpu_name", None, -# "The Cloud TPU to use for training. This should be either the name " -# "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " -# "url.") - -# tf.flags.DEFINE_string( -# "tpu_zone", None, -# "[Optional] GCE zone where the Cloud TPU is located in. If not " -# "specified, we will attempt to automatically detect the GCE project from " -# "metadata.") - -# tf.flags.DEFINE_string( -# "gcp_project", None, -# "[Optional] Project name for the Cloud TPU-enabled project. If not " -# "specified, we will attempt to automatically detect the GCE project from " -# "metadata.") - -# tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") - -# flags.DEFINE_integer( -# "num_tpu_cores", 8, -# "Only used if `use_tpu` is True. Total number of TPU cores to use.") - -# flags.DEFINE_bool( -# "verbose_logging", False, -# "If true, all of the warnings related to data processing will be printed. " -# "A number of warnings are expected for a normal SQuAD evaluation.") - -# flags.DEFINE_bool( -# "version_2_with_negative", False, -# "If true, the SQuAD examples contain some that do not have an answer.") - -# flags.DEFINE_float( -# "null_score_diff_threshold", 0.0, -# "If null_score - best_non_null is greater than the threshold predict null.") - - -class SquadExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) - s += ", question_text: %s" % ( - tokenization.printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training): - """Read a SQuAD json file into a list of SquadExample.""" - is_bioasq=True # for BioASQ - - with tf.gfile.Open(input_file, "r") as reader: - #if is_bioasq: - #input_data = [{u'paragraphs':json.load(reader)["questions"], u'title':'bioASQ'}] # to fit the shape of squad code - #else: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - if is_bioasq: - paragraph_text.replace('/',' ') # need review - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if FLAGS.version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - tokenization.whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - -## TODO -def arrange_kaggle_data(input_data, is_training): - """Read a QA data jsonl file into a list of Examples.""" - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - paragraph_text = entry["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in entry["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - return examples - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - # if example_index < 20: - # tf.logging.info("*** Example ***") - # tf.logging.info("unique_id: %s" % (unique_id)) - # tf.logging.info("example_index: %s" % (example_index)) - # tf.logging.info("doc_span_index: %s" % (doc_span_index)) - # tf.logging.info("tokens: %s" % " ".join( - # [tokenization.printable_text(x) for x in tokens])) - # tf.logging.info("token_to_orig_map: %s" % " ".join( - # ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) - # tf.logging.info("token_is_max_context: %s" % " ".join([ - # "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) - # ])) - # tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - # tf.logging.info( - # "input_mask: %s" % " ".join([str(x) for x in input_mask])) - # tf.logging.info( - # "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - # if is_training and example.is_impossible: - # tf.logging.info("impossible example") - # if is_training and not example.is_impossible: - # answer_text = " ".join(tokens[start_position:(end_position + 1)]) - # tf.logging.info("start_position: %d" % (start_position)) - # tf.logging.info("end_position: %d" % (end_position)) - # tf.logging.info( - # "answer: %s" % (tokenization.printable_text(answer_text))) - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - use_one_hot_embeddings): - """Creates a classification model.""" - model = modeling.BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - final_hidden = model.get_sequence_output() - - final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) - batch_size = final_hidden_shape[0] - seq_length = final_hidden_shape[1] - hidden_size = final_hidden_shape[2] - - output_weights = tf.get_variable( - "cls/squad/output_weights", [2, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) - - final_hidden_matrix = tf.reshape(final_hidden, - [batch_size * seq_length, hidden_size]) - logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - - logits = tf.reshape(logits, [batch_size, seq_length, 2]) - logits = tf.transpose(logits, [2, 0, 1]) - - unstacked_logits = tf.unstack(logits, axis=0) - - (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) - - return (start_logits, end_logits) - - -def model_fn_builder(bert_config, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) - - unique_ids = features["unique_ids"] - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (start_logits, end_logits) = create_model( - bert_config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - tvars = tf.trainable_variables() - - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - seq_length = modeling.get_shape_list(input_ids)[1] - - def compute_loss(logits, positions): - one_hot_positions = tf.one_hot( - positions, depth=seq_length, dtype=tf.float32) - log_probs = tf.nn.log_softmax(logits, axis=-1) - loss = -tf.reduce_mean( - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) - return loss - - start_positions = features["start_positions"] - end_positions = features["end_positions"] - - start_loss = compute_loss(start_logits, start_positions) - end_loss = compute_loss(end_logits, end_positions) - - total_loss = (start_loss + end_loss) / 2.0 - - train_op = optimization.create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, loss=total_loss, train_op=train_op) - - elif mode == tf.estimator.ModeKeys.PREDICT: - predictions = { - "unique_ids": unique_ids, - "start_logits": start_logits, - "end_logits": end_logits, - } - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, predictions=predictions) - - else: - raise ValueError( - "Only TRAIN and PREDICT modes are supported: %s" % (mode)) - - return output_spec - - return model_fn - - -def input_fn_builder(input_file, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn - - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_logits", "end_logits"]) - -def get_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, do_lower_case, FLAGS): - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - min_null_feature_index = 0 # the paragraph slice with min mull score - null_start_logit = 0 # the start logit at the slice with min null score - null_end_logit = 0 # the end logit at the slice with min null score - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - if FLAGS.version_2_with_negative: - feature_null_score = result.start_logits[0] + result.end_logits[0] - if feature_null_score < score_null: - score_null = feature_null_score - min_null_feature_index = feature_index - null_start_logit = result.start_logits[0] - null_end_logit = result.end_logits[0] - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - - if FLAGS.version_2_with_negative: - prelim_predictions.append( - _PrelimPrediction( - feature_index=min_null_feature_index, - start_index=0, - end_index=0, - start_logit=null_start_logit, - end_logit=null_end_logit)) - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_logit", "end_logit"]) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, do_lower_case, FLAGS) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_logit=pred.start_logit, - end_logit=pred.end_logit)) - - # if we didn't inlude the empty option in the n-best, inlcude it - if FLAGS.version_2_with_negative: - if "" not in seen_predictions: - nbest.append( - _NbestPrediction( - text="", start_logit=null_start_logit, - end_logit=null_end_logit)) - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1 - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1 - - if not FLAGS.version_2_with_negative: - all_predictions[example.qas_id] = nbest_json[0]["text"] - else: - # predict "" iff the null score - the score of best non-null > threshold - score_diff = score_null - best_non_null_entry.start_logit - ( - best_non_null_entry.end_logit) - scores_diff_json[example.qas_id] = score_diff - if score_diff > FLAGS.null_score_diff_threshold: - all_predictions[example.qas_id] = "" - else: - all_predictions[example.qas_id] = best_non_null_entry.text - - return all_predictions - - -def get_final_text(pred_text, orig_text, do_lower_case, FLAGS): - """Project the tokenized prediction back to the original text.""" - - # When we created the data, we kept track of the alignment between original - # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So - # now `orig_text` contains the span of our original text corresponding to the - # span that we predicted. - # - # However, `orig_text` may contain extra characters that we don't want in - # our prediction. - # - # For example, let's say: - # pred_text = steve smith - # orig_text = Steve Smith's - # - # We don't want to return `orig_text` because it contains the extra "'s". - # - # We don't want to return `pred_text` because it's already been normalized - # (the SQuAD eval script also does punctuation stripping/lower casing but - # our tokenizer does additional normalization like stripping accent - # characters). - # - # What we really want to return is "Steve Smith". - # - # Therefore, we have to apply a semi-complicated alignment heruistic between - # `pred_text` and `orig_text` to get a character-to-charcter alignment. This - # can fail in certain cases in which case we just return `orig_text`. - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if FLAGS.verbose_logging: - tf.logging.info( - "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if FLAGS.verbose_logging: - tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", - orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in six.iteritems(tok_ns_to_s_map): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, is_training): - # self.filename = filename - self.is_training = is_training - self.num_features = 0 - # self._writer = tf.python_io.TFRecordWriter(filename) - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_int_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - return tf_example.SerializeToString() - # self._writer.write(tf_example.SerializeToString()) - - # def close(self): - # self._writer.close() - - -def validate_flags_or_throw(FLAGS, bert_config): - """Validate the input FLAGS or throw an exception.""" - # tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, - # FLAGS.init_checkpoint) - - # if not FLAGS.do_train and not FLAGS.do_predict: - # raise ValueError("At least one of `do_train` or `do_predict` must be True.") - - # if FLAGS.do_train: - # if not FLAGS.train_file: - # raise ValueError( - # "If `do_train` is True, then `train_file` must be specified.") - # if FLAGS.do_predict: - # if not FLAGS.predict_file: - # raise ValueError( - # "If `do_predict` is True, then `predict_file` must be specified.") - - if FLAGS.max_seq_length > bert_config.max_position_embeddings: - raise ValueError( - "Cannot use sequence length %d because the BERT model " - "was only trained up to sequence length %d" % - (FLAGS.max_seq_length, bert_config.max_position_embeddings)) - - if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: - raise ValueError( - "The max_seq_length (%d) must be greater than max_query_length " - "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) - - -def biobert_predictor(FLAGS, predict_fn, data): - tf.logging.set_verbosity(tf.logging.INFO) - - bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) - - validate_flags_or_throw(FLAGS, bert_config) - - tokenizer = tokenization.FullTokenizer( - vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) - - - eval_examples = arrange_kaggle_data(data, is_training=False) - - eval_writer = FeatureWriter(is_training=False) - eval_features = [] - eval_features_inp = [] - - def append_feature(feature): - eval_features.append(feature) - eval_features_inp.append(eval_writer.process_feature(feature)) - - convert_examples_to_features( - examples=eval_examples, - tokenizer=tokenizer, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=False, - output_fn=append_feature) - - # If running eval on the TPU, you will need to specify the number of - # steps. - all_results = [] - for num, eval_feature in enumerate(eval_features_inp): - result = predict_fn({"examples":[eval_feature]}) - - # if len(all_results) % 1000 == 0: - # tf.logging.info("Processing example: %d" % (len(all_results))) - unique_id = int(result["unique_ids"]) - start_logits = [float(x) for x in result["start_logits"].flat] - end_logits = [float(x) for x in result["end_logits"].flat] - all_results.append( - RawResult( - unique_id=unique_id, - start_logits=start_logits, - end_logits=end_logits)) - - ret = get_predictions(eval_examples, eval_features, all_results, - FLAGS.n_best_size, FLAGS.max_answer_length, - FLAGS.do_lower_case, FLAGS) - return ret - -def main(): - raise NotImplementedError - -if __name__ == "__main__": - main() diff --git a/build/lib/caire-covid/biobert/run_factoid.py b/build/lib/caire-covid/biobert/run_factoid.py deleted file mode 100644 index fcfbc21..0000000 --- a/build/lib/caire-covid/biobert/run_factoid.py +++ /dev/null @@ -1,1290 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Run BERT on SQuAD 1.1 and SQuAD 2.0.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import json -import math -import os -import random -import modeling -import optimization -import tokenization -import six -import tensorflow as tf - -flags = tf.flags - -FLAGS = flags.FLAGS - -## Required parameters -flags.DEFINE_string( - "bert_config_file", None, - "The config json file corresponding to the pre-trained BERT model. " - "This specifies the model architecture.") - -flags.DEFINE_string("vocab_file", None, - "The vocabulary file that the BERT model was trained on.") - -flags.DEFINE_string( - "output_dir", None, - "The output directory where the model checkpoints will be written.") - -## Other parameters -flags.DEFINE_string("train_file", None, - "SQuAD json for training. E.g., train-v1.1.json") - -flags.DEFINE_string( - "predict_file", None, - "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") - -flags.DEFINE_string( - "init_checkpoint", None, - "Initial checkpoint (usually from a pre-trained BERT model).") - -flags.DEFINE_bool( - "do_lower_case", True, - "Whether to lower case the input text. Should be True for uncased " - "models and False for cased models.") - -flags.DEFINE_integer( - "max_seq_length", 384, - "The maximum total input sequence length after WordPiece tokenization. " - "Sequences longer than this will be truncated, and sequences shorter " - "than this will be padded.") - -flags.DEFINE_integer( - "doc_stride", 128, - "When splitting up a long document into chunks, how much stride to " - "take between chunks.") - -flags.DEFINE_integer( - "max_query_length", 64, - "The maximum number of tokens for the question. Questions longer than " - "this will be truncated to this length.") - -flags.DEFINE_bool("do_train", False, "Whether to run training.") - -flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.") - -flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") - -flags.DEFINE_integer("predict_batch_size", 8, - "Total batch size for predictions.") - -flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") - -flags.DEFINE_float("num_train_epochs", 3.0, - "Total number of training epochs to perform.") - -flags.DEFINE_float( - "warmup_proportion", 0.1, - "Proportion of training to perform linear learning rate warmup for. " - "E.g., 0.1 = 10% of training.") - -flags.DEFINE_integer("save_checkpoints_steps", 1000, - "How often to save the model checkpoint.") - -flags.DEFINE_integer("iterations_per_loop", 1000, - "How many steps to make in each estimator call.") - -flags.DEFINE_integer( - "n_best_size", 20, - "The total number of n-best predictions to generate in the " - "nbest_predictions.json output file.") - -flags.DEFINE_integer( - "max_answer_length", 30, - "The maximum length of an answer that can be generated. This is needed " - "because the start and end predictions are not conditioned on one another.") - -flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") - -tf.flags.DEFINE_string( - "tpu_name", None, - "The Cloud TPU to use for training. This should be either the name " - "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " - "url.") - -tf.flags.DEFINE_string( - "tpu_zone", None, - "[Optional] GCE zone where the Cloud TPU is located in. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string( - "gcp_project", None, - "[Optional] Project name for the Cloud TPU-enabled project. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") - -flags.DEFINE_integer( - "num_tpu_cores", 8, - "Only used if `use_tpu` is True. Total number of TPU cores to use.") - -flags.DEFINE_bool( - "verbose_logging", False, - "If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation.") - -flags.DEFINE_bool( - "version_2_with_negative", False, - "If true, the SQuAD examples contain some that do not have an answer.") - -flags.DEFINE_float( - "null_score_diff_threshold", 0.0, - "If null_score - best_non_null is greater than the threshold predict null.") - - -class SquadExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) - s += ", question_text: %s" % ( - tokenization.printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training): - """Read a SQuAD json file into a list of SquadExample.""" - is_bioasq=True # for BioASQ - - with tf.gfile.Open(input_file, "r") as reader: - #if is_bioasq: - #input_data = [{u'paragraphs':json.load(reader)["questions"], u'title':'bioASQ'}] # to fit the shape of squad code - #else: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - if is_bioasq: - paragraph_text.replace('/',' ') # need review - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if FLAGS.version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - tokenization.whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - if example_index < 20: - tf.logging.info("*** Example ***") - tf.logging.info("unique_id: %s" % (unique_id)) - tf.logging.info("example_index: %s" % (example_index)) - tf.logging.info("doc_span_index: %s" % (doc_span_index)) - tf.logging.info("tokens: %s" % " ".join( - [tokenization.printable_text(x) for x in tokens])) - tf.logging.info("token_to_orig_map: %s" % " ".join( - ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) - tf.logging.info("token_is_max_context: %s" % " ".join([ - "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) - ])) - tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - tf.logging.info( - "input_mask: %s" % " ".join([str(x) for x in input_mask])) - tf.logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - if is_training and example.is_impossible: - tf.logging.info("impossible example") - if is_training and not example.is_impossible: - answer_text = " ".join(tokens[start_position:(end_position + 1)]) - tf.logging.info("start_position: %d" % (start_position)) - tf.logging.info("end_position: %d" % (end_position)) - tf.logging.info( - "answer: %s" % (tokenization.printable_text(answer_text))) - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - use_one_hot_embeddings): - """Creates a classification model.""" - model = modeling.BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - final_hidden = model.get_sequence_output() - - final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) - batch_size = final_hidden_shape[0] - seq_length = final_hidden_shape[1] - hidden_size = final_hidden_shape[2] - - output_weights = tf.get_variable( - "cls/squad/output_weights", [2, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) - - final_hidden_matrix = tf.reshape(final_hidden, - [batch_size * seq_length, hidden_size]) - logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - - logits = tf.reshape(logits, [batch_size, seq_length, 2]) - logits = tf.transpose(logits, [2, 0, 1]) - - unstacked_logits = tf.unstack(logits, axis=0) - - (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) - - return (start_logits, end_logits) - - -def model_fn_builder(bert_config, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) - - unique_ids = features["unique_ids"] - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (start_logits, end_logits) = create_model( - bert_config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - tvars = tf.trainable_variables() - - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - seq_length = modeling.get_shape_list(input_ids)[1] - - def compute_loss(logits, positions): - one_hot_positions = tf.one_hot( - positions, depth=seq_length, dtype=tf.float32) - log_probs = tf.nn.log_softmax(logits, axis=-1) - loss = -tf.reduce_mean( - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) - return loss - - start_positions = features["start_positions"] - end_positions = features["end_positions"] - - start_loss = compute_loss(start_logits, start_positions) - end_loss = compute_loss(end_logits, end_positions) - - total_loss = (start_loss + end_loss) / 2.0 - - train_op = optimization.create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - elif mode == tf.estimator.ModeKeys.PREDICT: - predictions = { - "unique_ids": unique_ids, - "start_logits": start_logits, - "end_logits": end_logits, - } - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) - else: - raise ValueError( - "Only TRAIN and PREDICT modes are supported: %s" % (mode)) - - return output_spec - - return model_fn - - -def input_fn_builder(input_file, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn - - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_logits", "end_logits"]) - - -def write_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, do_lower_case, output_prediction_file, - output_nbest_file, output_null_log_odds_file): - """Write final predictions to the json file and log-odds of null if needed.""" - tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) - tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - min_null_feature_index = 0 # the paragraph slice with min mull score - null_start_logit = 0 # the start logit at the slice with min null score - null_end_logit = 0 # the end logit at the slice with min null score - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - if FLAGS.version_2_with_negative: - feature_null_score = result.start_logits[0] + result.end_logits[0] - if feature_null_score < score_null: - score_null = feature_null_score - min_null_feature_index = feature_index - null_start_logit = result.start_logits[0] - null_end_logit = result.end_logits[0] - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - - if FLAGS.version_2_with_negative: - prelim_predictions.append( - _PrelimPrediction( - feature_index=min_null_feature_index, - start_index=0, - end_index=0, - start_logit=null_start_logit, - end_logit=null_end_logit)) - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_logit", "end_logit"]) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, do_lower_case) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_logit=pred.start_logit, - end_logit=pred.end_logit)) - - # if we didn't inlude the empty option in the n-best, inlcude it - if FLAGS.version_2_with_negative: - if "" not in seen_predictions: - nbest.append( - _NbestPrediction( - text="", start_logit=null_start_logit, - end_logit=null_end_logit)) - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1 - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1 - - if not FLAGS.version_2_with_negative: - all_predictions[example.qas_id] = nbest_json[0]["text"] - else: - # predict "" iff the null score - the score of best non-null > threshold - score_diff = score_null - best_non_null_entry.start_logit - ( - best_non_null_entry.end_logit) - scores_diff_json[example.qas_id] = score_diff - if score_diff > FLAGS.null_score_diff_threshold: - all_predictions[example.qas_id] = "" - else: - all_predictions[example.qas_id] = best_non_null_entry.text - - all_nbest_json[example.qas_id] = nbest_json - - with tf.gfile.GFile(output_prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - - with tf.gfile.GFile(output_nbest_file, "w") as writer: - writer.write(json.dumps(all_nbest_json, indent=4) + "\n") - - if FLAGS.version_2_with_negative: - with tf.gfile.GFile(output_null_log_odds_file, "w") as writer: - writer.write(json.dumps(scores_diff_json, indent=4) + "\n") - - -def get_final_text(pred_text, orig_text, do_lower_case): - """Project the tokenized prediction back to the original text.""" - - # When we created the data, we kept track of the alignment between original - # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So - # now `orig_text` contains the span of our original text corresponding to the - # span that we predicted. - # - # However, `orig_text` may contain extra characters that we don't want in - # our prediction. - # - # For example, let's say: - # pred_text = steve smith - # orig_text = Steve Smith's - # - # We don't want to return `orig_text` because it contains the extra "'s". - # - # We don't want to return `pred_text` because it's already been normalized - # (the SQuAD eval script also does punctuation stripping/lower casing but - # our tokenizer does additional normalization like stripping accent - # characters). - # - # What we really want to return is "Steve Smith". - # - # Therefore, we have to apply a semi-complicated alignment heruistic between - # `pred_text` and `orig_text` to get a character-to-charcter alignment. This - # can fail in certain cases in which case we just return `orig_text`. - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if FLAGS.verbose_logging: - tf.logging.info( - "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if FLAGS.verbose_logging: - tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", - orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in six.iteritems(tok_ns_to_s_map): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, filename, is_training): - self.filename = filename - self.is_training = is_training - self.num_features = 0 - self._writer = tf.python_io.TFRecordWriter(filename) - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_int_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - self._writer.write(tf_example.SerializeToString()) - - def close(self): - self._writer.close() - - -def validate_flags_or_throw(bert_config): - """Validate the input FLAGS or throw an exception.""" - tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, - FLAGS.init_checkpoint) - - if not FLAGS.do_train and not FLAGS.do_predict: - raise ValueError("At least one of `do_train` or `do_predict` must be True.") - - if FLAGS.do_train: - if not FLAGS.train_file: - raise ValueError( - "If `do_train` is True, then `train_file` must be specified.") - if FLAGS.do_predict: - if not FLAGS.predict_file: - raise ValueError( - "If `do_predict` is True, then `predict_file` must be specified.") - - if FLAGS.max_seq_length > bert_config.max_position_embeddings: - raise ValueError( - "Cannot use sequence length %d because the BERT model " - "was only trained up to sequence length %d" % - (FLAGS.max_seq_length, bert_config.max_position_embeddings)) - - if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: - raise ValueError( - "The max_seq_length (%d) must be greater than max_query_length " - "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - - bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) - - validate_flags_or_throw(bert_config) - - tf.gfile.MakeDirs(FLAGS.output_dir) - - tokenizer = tokenization.FullTokenizer( - vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) - - tpu_cluster_resolver = None - if FLAGS.use_tpu and FLAGS.tpu_name: - tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) - - is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - cluster=tpu_cluster_resolver, - master=FLAGS.master, - model_dir=FLAGS.output_dir, - save_checkpoints_steps=FLAGS.save_checkpoints_steps, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations_per_loop, - num_shards=FLAGS.num_tpu_cores, - per_host_input_for_training=is_per_host)) - - train_examples = None - num_train_steps = None - num_warmup_steps = None - if FLAGS.do_train: - train_examples = read_squad_examples( - input_file=FLAGS.train_file, is_training=True) - num_train_steps = int( - len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) - num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) - - # Pre-shuffle the input to avoid having to make a very large shuffle - # buffer in in the `input_fn`. - rng = random.Random(12345) - rng.shuffle(train_examples) - - model_fn = model_fn_builder( - bert_config=bert_config, - init_checkpoint=FLAGS.init_checkpoint, - learning_rate=FLAGS.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - use_tpu=FLAGS.use_tpu, - use_one_hot_embeddings=FLAGS.use_tpu) - - # If TPU is not available, this will fall back to normal Estimator on CPU - # or GPU. - estimator = tf.contrib.tpu.TPUEstimator( - use_tpu=FLAGS.use_tpu, - model_fn=model_fn, - config=run_config, - train_batch_size=FLAGS.train_batch_size, - predict_batch_size=FLAGS.predict_batch_size) - - if FLAGS.do_train: - # We write to a temporary file to avoid storing very large constant tensors - # in memory. - train_writer = FeatureWriter( - filename=os.path.join(FLAGS.output_dir, "train.tf_record"), - is_training=True) - convert_examples_to_features( - examples=train_examples, - tokenizer=tokenizer, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=True, - output_fn=train_writer.process_feature) - train_writer.close() - - tf.logging.info("***** Running training *****") - tf.logging.info(" Num orig examples = %d", len(train_examples)) - tf.logging.info(" Num split examples = %d", train_writer.num_features) - tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) - tf.logging.info(" Num steps = %d", num_train_steps) - del train_examples - - train_input_fn = input_fn_builder( - input_file=train_writer.filename, - seq_length=FLAGS.max_seq_length, - is_training=True, - drop_remainder=True) - estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) - - if FLAGS.do_predict: - eval_examples = read_squad_examples( - input_file=FLAGS.predict_file, is_training=False) - - eval_writer = FeatureWriter( - filename=os.path.join(FLAGS.output_dir, "eval.tf_record"), - is_training=False) - eval_features = [] - - def append_feature(feature): - eval_features.append(feature) - eval_writer.process_feature(feature) - - convert_examples_to_features( - examples=eval_examples, - tokenizer=tokenizer, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=False, - output_fn=append_feature) - eval_writer.close() - - tf.logging.info("***** Running predictions *****") - tf.logging.info(" Num orig examples = %d", len(eval_examples)) - tf.logging.info(" Num split examples = %d", len(eval_features)) - tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) - - all_results = [] - - predict_input_fn = input_fn_builder( - input_file=eval_writer.filename, - seq_length=FLAGS.max_seq_length, - is_training=False, - drop_remainder=False) - - # If running eval on the TPU, you will need to specify the number of - # steps. - all_results = [] - for result in estimator.predict( - predict_input_fn, yield_single_examples=True): - if len(all_results) % 1000 == 0: - tf.logging.info("Processing example: %d" % (len(all_results))) - unique_id = int(result["unique_ids"]) - start_logits = [float(x) for x in result["start_logits"].flat] - end_logits = [float(x) for x in result["end_logits"].flat] - all_results.append( - RawResult( - unique_id=unique_id, - start_logits=start_logits, - end_logits=end_logits)) - - output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json") - output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json") - output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json") - - write_predictions(eval_examples, eval_features, all_results, - FLAGS.n_best_size, FLAGS.max_answer_length, - FLAGS.do_lower_case, output_prediction_file, - output_nbest_file, output_null_log_odds_file) - - -if __name__ == "__main__": - flags.mark_flag_as_required("vocab_file") - flags.mark_flag_as_required("bert_config_file") - flags.mark_flag_as_required("output_dir") - tf.app.run() diff --git a/build/lib/caire-covid/biobert/save_biobert.py b/build/lib/caire-covid/biobert/save_biobert.py deleted file mode 100644 index 5fa16ba..0000000 --- a/build/lib/caire-covid/biobert/save_biobert.py +++ /dev/null @@ -1,912 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Run BERT on SQuAD 1.1 and SQuAD 2.0.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import json -import math -import os -import random -import modeling -import optimization -import tokenization -import six -import tensorflow as tf - -flags = tf.flags - -FLAGS = flags.FLAGS - -## Required parameters -flags.DEFINE_string( - "bert_config_file", None, - "The config json file corresponding to the pre-trained BERT model. " - "This specifies the model architecture.") - -flags.DEFINE_string("vocab_file", None, - "The vocabulary file that the BERT model was trained on.") - -flags.DEFINE_string( - "output_dir", None, - "The output directory where the model checkpoints will be written.") - -flags.DEFINE_string( - "export_dir_base", None, - "The output directory where the model will be saved.") - -## Other parameters -flags.DEFINE_string("train_file", None, - "SQuAD json for training. E.g., train-v1.1.json") - -flags.DEFINE_string( - "predict_file", None, - "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") - -flags.DEFINE_string( - "init_checkpoint", None, - "Initial checkpoint (usually from a pre-trained BERT model).") - -flags.DEFINE_bool( - "do_lower_case", True, - "Whether to lower case the input text. Should be True for uncased " - "models and False for cased models.") - -flags.DEFINE_integer( - "max_seq_length", 384, - "The maximum total input sequence length after WordPiece tokenization. " - "Sequences longer than this will be truncated, and sequences shorter " - "than this will be padded.") - -flags.DEFINE_integer( - "doc_stride", 128, - "When splitting up a long document into chunks, how much stride to " - "take between chunks.") - -flags.DEFINE_integer( - "max_query_length", 64, - "The maximum number of tokens for the question. Questions longer than " - "this will be truncated to this length.") - -flags.DEFINE_bool("do_train", False, "Whether to run training.") - -flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.") - -flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") - -flags.DEFINE_integer("predict_batch_size", 8, - "Total batch size for predictions.") - -flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") - -flags.DEFINE_float("num_train_epochs", 3.0, - "Total number of training epochs to perform.") - -flags.DEFINE_float( - "warmup_proportion", 0.1, - "Proportion of training to perform linear learning rate warmup for. " - "E.g., 0.1 = 10% of training.") - -flags.DEFINE_integer("save_checkpoints_steps", 1000, - "How often to save the model checkpoint.") - -flags.DEFINE_integer("iterations_per_loop", 1000, - "How many steps to make in each estimator call.") - -flags.DEFINE_integer( - "n_best_size", 20, - "The total number of n-best predictions to generate in the " - "nbest_predictions.json output file.") - -flags.DEFINE_integer( - "max_answer_length", 30, - "The maximum length of an answer that can be generated. This is needed " - "because the start and end predictions are not conditioned on one another.") - -flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") - -tf.flags.DEFINE_string( - "tpu_name", None, - "The Cloud TPU to use for training. This should be either the name " - "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " - "url.") - -tf.flags.DEFINE_string( - "tpu_zone", None, - "[Optional] GCE zone where the Cloud TPU is located in. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string( - "gcp_project", None, - "[Optional] Project name for the Cloud TPU-enabled project. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") - -flags.DEFINE_integer( - "num_tpu_cores", 8, - "Only used if `use_tpu` is True. Total number of TPU cores to use.") - -flags.DEFINE_bool( - "verbose_logging", False, - "If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation.") - -flags.DEFINE_bool( - "version_2_with_negative", False, - "If true, the SQuAD examples contain some that do not have an answer.") - -flags.DEFINE_float( - "null_score_diff_threshold", 0.0, - "If null_score - best_non_null is greater than the threshold predict null.") - - -class SquadExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) - s += ", question_text: %s" % ( - tokenization.printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training): - """Read a SQuAD json file into a list of SquadExample.""" - is_bioasq=True # for BioASQ - - with tf.gfile.Open(input_file, "r") as reader: - #if is_bioasq: - #input_data = [{u'paragraphs':json.load(reader)["questions"], u'title':'bioASQ'}] # to fit the shape of squad code - #else: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - if is_bioasq: - paragraph_text.replace('/',' ') # need review - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if FLAGS.version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - tokenization.whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - if example_index < 20: - tf.logging.info("*** Example ***") - tf.logging.info("unique_id: %s" % (unique_id)) - tf.logging.info("example_index: %s" % (example_index)) - tf.logging.info("doc_span_index: %s" % (doc_span_index)) - tf.logging.info("tokens: %s" % " ".join( - [tokenization.printable_text(x) for x in tokens])) - tf.logging.info("token_to_orig_map: %s" % " ".join( - ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) - tf.logging.info("token_is_max_context: %s" % " ".join([ - "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) - ])) - tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - tf.logging.info( - "input_mask: %s" % " ".join([str(x) for x in input_mask])) - tf.logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - if is_training and example.is_impossible: - tf.logging.info("impossible example") - if is_training and not example.is_impossible: - answer_text = " ".join(tokens[start_position:(end_position + 1)]) - tf.logging.info("start_position: %d" % (start_position)) - tf.logging.info("end_position: %d" % (end_position)) - tf.logging.info( - "answer: %s" % (tokenization.printable_text(answer_text))) - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - use_one_hot_embeddings): - """Creates a classification model.""" - model = modeling.BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - final_hidden = model.get_sequence_output() - - final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) - batch_size = final_hidden_shape[0] - seq_length = final_hidden_shape[1] - hidden_size = final_hidden_shape[2] - - output_weights = tf.get_variable( - "cls/squad/output_weights", [2, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) - - final_hidden_matrix = tf.reshape(final_hidden, - [batch_size * seq_length, hidden_size]) - logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - - logits = tf.reshape(logits, [batch_size, seq_length, 2]) - logits = tf.transpose(logits, [2, 0, 1]) - - unstacked_logits = tf.unstack(logits, axis=0) - - (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) - - return (start_logits, end_logits) - - -def model_fn_builder(bert_config, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) - - unique_ids = features["unique_ids"] - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (start_logits, end_logits) = create_model( - bert_config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - tvars = tf.trainable_variables() - - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - seq_length = modeling.get_shape_list(input_ids)[1] - - def compute_loss(logits, positions): - one_hot_positions = tf.one_hot( - positions, depth=seq_length, dtype=tf.float32) - log_probs = tf.nn.log_softmax(logits, axis=-1) - loss = -tf.reduce_mean( - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) - return loss - - start_positions = features["start_positions"] - end_positions = features["end_positions"] - - start_loss = compute_loss(start_logits, start_positions) - end_loss = compute_loss(end_logits, end_positions) - - total_loss = (start_loss + end_loss) / 2.0 - - train_op = optimization.create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, loss=total_loss, train_op=train_op) - - elif mode == tf.estimator.ModeKeys.PREDICT: - predictions = { - "unique_ids": unique_ids, - "start_logits": start_logits, - "end_logits": end_logits, - } - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, predictions=predictions) - - else: - raise ValueError( - "Only TRAIN and PREDICT modes are supported: %s" % (mode)) - - return output_spec - - return model_fn - - -def input_fn_builder(input_file, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn - - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, filename, is_training): - self.filename = filename - self.is_training = is_training - self.num_features = 0 - self._writer = tf.python_io.TFRecordWriter(filename) - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_int_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - self._writer.write(tf_example.SerializeToString()) - - def close(self): - self._writer.close() - - -def validate_flags_or_throw(bert_config): - """Validate the input FLAGS or throw an exception.""" - tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, - FLAGS.init_checkpoint) - - if not FLAGS.do_train and not FLAGS.do_predict: - raise ValueError("At least one of `do_train` or `do_predict` must be True.") - - if FLAGS.do_train: - if not FLAGS.train_file: - raise ValueError( - "If `do_train` is True, then `train_file` must be specified.") - if FLAGS.do_predict: - if not FLAGS.predict_file: - raise ValueError( - "If `do_predict` is True, then `predict_file` must be specified.") - - if FLAGS.max_seq_length > bert_config.max_position_embeddings: - raise ValueError( - "Cannot use sequence length %d because the BERT model " - "was only trained up to sequence length %d" % - (FLAGS.max_seq_length, bert_config.max_position_embeddings)) - - if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: - raise ValueError( - "The max_seq_length (%d) must be greater than max_query_length " - "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) - - -def serving_input_receiver_fn(): - """ - An input receiver that expects a serialized tf.Example. - Here input builder is just for serving_input_receiver_fn, - Use placeholder to replace the real data. - """ - feature_spec = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.float32), - "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), - } - serialized_tf_example = tf.placeholder(dtype=tf.string, - shape=[FLAGS.predict_batch_size], - name='input_example_tensor') - receiver_tensors = {'examples': serialized_tf_example} - features = tf.parse_example(serialized_tf_example, feature_spec) - return tf.estimator.export.ServingInputReceiver(features, receiver_tensors) - - -def save_biobert_model(): - tf.logging.set_verbosity(tf.logging.INFO) - - bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) - - validate_flags_or_throw(bert_config) - - tf.gfile.MakeDirs(FLAGS.output_dir) - - tokenizer = tokenization.FullTokenizer( - vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) - - tpu_cluster_resolver = None - if FLAGS.use_tpu and FLAGS.tpu_name: - tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) - - is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - cluster=tpu_cluster_resolver, - master=FLAGS.master, - model_dir=FLAGS.output_dir, - save_checkpoints_steps=FLAGS.save_checkpoints_steps, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations_per_loop, - num_shards=FLAGS.num_tpu_cores, - per_host_input_for_training=is_per_host)) - - train_examples = None - num_train_steps = None - num_warmup_steps = None - - model_fn = model_fn_builder( - bert_config=bert_config, - init_checkpoint=FLAGS.init_checkpoint, - learning_rate=FLAGS.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - use_tpu=FLAGS.use_tpu, - use_one_hot_embeddings=FLAGS.use_tpu) - - # If TPU is not available, this will fall back to normal Estimator on CPU - # or GPU. - if FLAGS.use_tpu: - estimator = tf.contrib.tpu.TPUEstimator( - use_tpu=FLAGS.use_tpu, - model_fn=model_fn, - config=run_config, - train_batch_size=FLAGS.train_batch_size, - predict_batch_size=FLAGS.predict_batch_size) - else: - estimator = tf.estimator.Estimator( - model_fn=model_fn, - config=run_config) - - - export_model_path = estimator.export_savedmodel(FLAGS.export_dir_base, serving_input_receiver_fn) - tf.logging.info('Exported to {}'.format(export_model_path)) - return export_model_path - - -if __name__ == "__main__": - flags.mark_flag_as_required("vocab_file") - flags.mark_flag_as_required("bert_config_file") - flags.mark_flag_as_required("output_dir") - model_path = save_biobert_model() - print(model_path) diff --git a/build/lib/caire-covid/biobert/tokenization.py b/build/lib/caire-covid/biobert/tokenization.py deleted file mode 100644 index dc476a6..0000000 --- a/build/lib/caire-covid/biobert/tokenization.py +++ /dev/null @@ -1,399 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import re -import unicodedata -import six -import tensorflow as tf - - -def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): - """Checks whether the casing config is consistent with the checkpoint name.""" - - # The casing has to be passed in by the user and there is no explicit check - # as to whether it matches the checkpoint. The casing information probably - # should have been stored in the bert_config.json file, but it's not, so - # we have to heuristically detect it to validate. - - if not init_checkpoint: - return - - m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) - if m is None: - return - - model_name = m.group(1) - - lower_models = [ - "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", - "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" - ] - - cased_models = [ - "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", - "multi_cased_L-12_H-768_A-12" - ] - - is_bad_config = False - if model_name in lower_models and not do_lower_case: - is_bad_config = True - actual_flag = "False" - case_name = "lowercased" - opposite_flag = "True" - - if model_name in cased_models and do_lower_case: - is_bad_config = True - actual_flag = "True" - case_name = "cased" - opposite_flag = "False" - - if is_bad_config: - raise ValueError( - "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " - "However, `%s` seems to be a %s model, so you " - "should pass in `--do_lower_case=%s` so that the fine-tuning matches " - "how the model was pre-training. If this error is wrong, please " - "just comment out this check." % (actual_flag, init_checkpoint, - model_name, case_name, opposite_flag)) - - -def convert_to_unicode(text): - """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" - if six.PY3: - if isinstance(text, str): - return text - elif isinstance(text, bytes): - return text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - elif six.PY2: - if isinstance(text, str): - return text.decode("utf-8", "ignore") - elif isinstance(text, unicode): - return text - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - else: - raise ValueError("Not running on Python2 or Python 3?") - - -def printable_text(text): - """Returns text encoded in a way suitable for print or `tf.logging`.""" - - # These functions want `str` for both Python2 and Python3, but in one case - # it's a Unicode string and in the other it's a byte string. - if six.PY3: - if isinstance(text, str): - return text - elif isinstance(text, bytes): - return text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - elif six.PY2: - if isinstance(text, str): - return text - elif isinstance(text, unicode): - return text.encode("utf-8") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - else: - raise ValueError("Not running on Python2 or Python 3?") - - -def load_vocab(vocab_file): - """Loads a vocabulary file into a dictionary.""" - vocab = collections.OrderedDict() - index = 0 - with tf.gfile.GFile(vocab_file, "r") as reader: - while True: - token = convert_to_unicode(reader.readline()) - if not token: - break - token = token.strip() - vocab[token] = index - index += 1 - return vocab - - -def convert_by_vocab(vocab, items): - """Converts a sequence of [tokens|ids] using the vocab.""" - output = [] - for item in items: - output.append(vocab[item]) - return output - - -def convert_tokens_to_ids(vocab, tokens): - return convert_by_vocab(vocab, tokens) - - -def convert_ids_to_tokens(inv_vocab, ids): - return convert_by_vocab(inv_vocab, ids) - - -def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -class FullTokenizer(object): - """Runs end-to-end tokenziation.""" - - def __init__(self, vocab_file, do_lower_case=True): - self.vocab = load_vocab(vocab_file) - self.inv_vocab = {v: k for k, v in self.vocab.items()} - self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) - - def tokenize(self, text): - split_tokens = [] - for token in self.basic_tokenizer.tokenize(text): - for sub_token in self.wordpiece_tokenizer.tokenize(token): - split_tokens.append(sub_token) - - return split_tokens - - def convert_tokens_to_ids(self, tokens): - return convert_by_vocab(self.vocab, tokens) - - def convert_ids_to_tokens(self, ids): - return convert_by_vocab(self.inv_vocab, ids) - - -class BasicTokenizer(object): - """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" - - def __init__(self, do_lower_case=True): - """Constructs a BasicTokenizer. - - Args: - do_lower_case: Whether to lower case the input. - """ - self.do_lower_case = do_lower_case - - def tokenize(self, text): - """Tokenizes a piece of text.""" - text = convert_to_unicode(text) - text = self._clean_text(text) - - # This was added on November 1st, 2018 for the multilingual and Chinese - # models. This is also applied to the English models now, but it doesn't - # matter since the English models were not trained on any Chinese data - # and generally don't have any Chinese data in them (there are Chinese - # characters in the vocabulary because Wikipedia does have some Chinese - # words in the English Wikipedia.). - text = self._tokenize_chinese_chars(text) - - orig_tokens = whitespace_tokenize(text) - split_tokens = [] - for token in orig_tokens: - if self.do_lower_case: - token = token.lower() - token = self._run_strip_accents(token) - split_tokens.extend(self._run_split_on_punc(token)) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Strips accents from a piece of text.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text): - """Splits punctuation on a piece of text.""" - chars = list(text) - i = 0 - start_new_word = True - output = [] - while i < len(chars): - char = chars[i] - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - - return ["".join(x) for x in output] - - def _tokenize_chinese_chars(self, text): - """Adds whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # - return True - - return False - - def _clean_text(self, text): - """Performs invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xfffd or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - -class WordpieceTokenizer(object): - """Runs WordPiece tokenziation.""" - - def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): - self.vocab = vocab - self.unk_token = unk_token - self.max_input_chars_per_word = max_input_chars_per_word - - def tokenize(self, text): - """Tokenizes a piece of text into its word pieces. - - This uses a greedy longest-match-first algorithm to perform tokenization - using the given vocabulary. - - For example: - input = "unaffable" - output = ["un", "##aff", "##able"] - - Args: - text: A single token or whitespace separated tokens. This should have - already been passed through `BasicTokenizer. - - Returns: - A list of wordpiece tokens. - """ - - text = convert_to_unicode(text) - - output_tokens = [] - for token in whitespace_tokenize(text): - chars = list(token) - if len(chars) > self.max_input_chars_per_word: - output_tokens.append(self.unk_token) - continue - - is_bad = False - start = 0 - sub_tokens = [] - while start < len(chars): - end = len(chars) - cur_substr = None - while start < end: - substr = "".join(chars[start:end]) - if start > 0: - substr = "##" + substr - if substr in self.vocab: - cur_substr = substr - break - end -= 1 - if cur_substr is None: - is_bad = True - break - sub_tokens.append(cur_substr) - start = end - - if is_bad: - output_tokens.append(self.unk_token) - else: - output_tokens.extend(sub_tokens) - return output_tokens - - -def _is_whitespace(char): - """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them - # as whitespace since they are generally considered as such. - if char == " " or char == "\t" or char == "\n" or char == "\r": - return True - cat = unicodedata.category(char) - if cat == "Zs": - return True - return False - - -def _is_control(char): - """Checks whether `chars` is a control character.""" - # These are technically control characters but we count them as whitespace - # characters. - if char == "\t" or char == "\n" or char == "\r": - return False - cat = unicodedata.category(char) - if cat.startswith("C"): - return True - return False - - -def _is_punctuation(char): - """Checks whether `chars` is a punctuation character.""" - cp = ord(char) - # We treat all non-letter/number ASCII as punctuation. - # Characters such as "^", "$", and "`" are not in the Unicode - # Punctuation class but we treat them as punctuation anyways, for - # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or - (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False diff --git a/build/lib/caire-covid/main.py b/build/lib/caire-covid/main.py deleted file mode 100644 index c312f52..0000000 --- a/build/lib/caire-covid/main.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import sys - -import json -from retrieval import information_retrieval -from qa import QaModule, print_answers_in_file - -all_results, data_for_qa = information_retrieval("question_generation/task1_question.json") - -qa_model = QaModule(["mrqa", "biobert"]) - -answers = qa_model.getAnswers(data_for_qa) - -# print_answers_in_file(answers) -format_answer = {} -for item in answers: - format_answer[item["question"]] = item["data"] - -with open("data.json", "w") as f: - json.dump(format_answer, f) - -# Final output for synthesis -# List [{ -# "question": "xxxx", -# "data": -# { -# "answer": ["answer1", "answer2", ...], -# "confidence": [confidence1, confidence2, ...], -# "title": [title1, title2, ...], -# "doi": [doi1, doi2, ...] -# "sha": [sha1, sha2, ...] -# } -# }] diff --git a/build/lib/caire-covid/mrqa/__init__.py b/build/lib/caire-covid/mrqa/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/build/lib/caire-covid/mrqa/data_utils.py b/build/lib/caire-covid/mrqa/data_utils.py deleted file mode 100644 index 484c6e2..0000000 --- a/build/lib/caire-covid/mrqa/data_utils.py +++ /dev/null @@ -1,915 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json -import os -import random - -from absl import flags -import absl.logging as _logging # pylint: disable=unused-import - -import numpy as np - - -import tensorflow as tf - -from mrqa.prepro_utils import preprocess_text, encode_ids -import sentencepiece as spm - - -special_symbols = { - "" : 0, - "" : 1, - "" : 2, - "" : 3, - "" : 4, - "" : 5, - "" : 6, - "" : 7, - "" : 8, -} - -VOCAB_SIZE = 32000 -UNK_ID = special_symbols[""] -CLS_ID = special_symbols[""] -SEP_ID = special_symbols[""] -MASK_ID = special_symbols[""] -EOD_ID = special_symbols[""] - - -def _int64_feature(values): - return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) - - -def _float_feature(values): - return tf.train.Feature(float_list=tf.train.FloatList(value=values)) - - -def format_filename(prefix, bsz_per_host, seq_len, bi_data, suffix, - mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False, - fixed_num_predict=None): - """docs.""" - if reuse_len is None: - reuse_len_str = "" - else: - reuse_len_str = "reuse-{}.".format(reuse_len) - if not uncased: - uncased_str = "" - else: - uncased_str = "uncased." - if bi_data: - bi_data_str = "bi" - else: - bi_data_str = "uni" - if fixed_num_predict is not None: - fnp_str = "fnp-{}.".format(fixed_num_predict) - else: - fnp_str = "" - - file_name = "{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}".format( - prefix, bsz_per_host, seq_len, reuse_len_str, uncased_str, bi_data_str, - mask_alpha, mask_beta, fnp_str, suffix) - - return file_name - - -def _create_data(idx, input_paths): - # Load sentence-piece model - sp = spm.SentencePieceProcessor() - sp.Load(FLAGS.sp_path) - - input_shards = [] - total_line_cnt = 0 - for input_path in input_paths: - input_data, sent_ids = [], [] - sent_id, line_cnt = True, 0 - tf.logging.info("Processing %s", input_path) - for line in tf.gfile.Open(input_path): - if line_cnt % 100000 == 0: - tf.logging.info("Loading line %d", line_cnt) - line_cnt += 1 - - if not line.strip(): - if FLAGS.use_eod: - sent_id = not sent_id - cur_sent = [EOD_ID] - else: - continue - else: - if FLAGS.from_raw_text: - cur_sent = preprocess_text(line.strip(), lower=FLAGS.uncased) - cur_sent = encode_ids(sp, cur_sent) - else: - cur_sent = list(map(int, line.strip().split())) - - input_data.extend(cur_sent) - sent_ids.extend([sent_id] * len(cur_sent)) - sent_id = not sent_id - - tf.logging.info("Finish with line %d", line_cnt) - if line_cnt == 0: - continue - - input_data = np.array(input_data, dtype=np.int64) - sent_ids = np.array(sent_ids, dtype=np.bool) - - total_line_cnt += line_cnt - input_shards.append((input_data, sent_ids)) - - tf.logging.info("[Task %d] Total number line: %d", idx, total_line_cnt) - - tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") - - filenames, num_batch = [], 0 - - # Randomly shuffle input shards (with a fixed but distinct random seed) - np.random.seed(100 * FLAGS.task + FLAGS.pass_id) - - perm_indices = np.random.permutation(len(input_shards)) - tf.logging.info("Using perm indices %s for pass %d", - perm_indices.tolist(), FLAGS.pass_id) - - input_data_list, sent_ids_list = [], [] - prev_sent_id = None - for perm_idx in perm_indices: - input_data, sent_ids = input_shards[perm_idx] - # make sure the `send_ids[0] == not prev_sent_id` - if prev_sent_id is not None and sent_ids[0] == prev_sent_id: - sent_ids = np.logical_not(sent_ids) - - # append to temporary list - input_data_list.append(input_data) - sent_ids_list.append(sent_ids) - - # update `prev_sent_id` - prev_sent_id = sent_ids[-1] - - input_data = np.concatenate(input_data_list) - sent_ids = np.concatenate(sent_ids_list) - - file_name, cur_num_batch = create_tfrecords( - save_dir=tfrecord_dir, - basename="{}-{}-{}".format(FLAGS.split, idx, FLAGS.pass_id), - data=[input_data, sent_ids], - bsz_per_host=FLAGS.bsz_per_host, - seq_len=FLAGS.seq_len, - bi_data=FLAGS.bi_data, - sp=sp, - ) - - filenames.append(file_name) - num_batch += cur_num_batch - - record_info = { - "filenames": filenames, - "num_batch": num_batch - } - - return record_info - - -def create_data(_): - # Validate FLAGS - assert FLAGS.bsz_per_host % FLAGS.num_core_per_host == 0 - if not FLAGS.use_tpu: - FLAGS.num_core_per_host = 1 # forced to be one - - # Make workdirs - if not tf.gfile.Exists(FLAGS.save_dir): - tf.gfile.MakeDirs(FLAGS.save_dir) - - tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") - if not tf.gfile.Exists(tfrecord_dir): - tf.gfile.MakeDirs(tfrecord_dir) - - # Create and dump corpus_info from task 0 - if FLAGS.task == 0: - corpus_info = { - "vocab_size": VOCAB_SIZE, - "bsz_per_host": FLAGS.bsz_per_host, - "num_core_per_host": FLAGS.num_core_per_host, - "seq_len": FLAGS.seq_len, - "reuse_len": FLAGS.reuse_len, - "uncased": FLAGS.uncased, - "bi_data": FLAGS.bi_data, - "mask_alpha": FLAGS.mask_alpha, - "mask_beta": FLAGS.mask_beta, - "num_predict": FLAGS.num_predict, - "use_eod": FLAGS.use_eod, - "sp_path": FLAGS.sp_path, - "input_glob": FLAGS.input_glob, - } - corpus_info_path = os.path.join(FLAGS.save_dir, "corpus_info.json") - with tf.gfile.Open(corpus_info_path, "w") as fp: - json.dump(corpus_info, fp) - - # Interleavely split the work into FLAGS.num_task splits - file_paths = sorted(tf.gfile.Glob(FLAGS.input_glob)) - tf.logging.info("Use glob: %s", FLAGS.input_glob) - tf.logging.info("Find %d files: %s", len(file_paths), file_paths) - - task_file_paths = file_paths[FLAGS.task::FLAGS.num_task] - if not task_file_paths: - tf.logging.info("Exit: task %d has no file to process.", FLAGS.task) - return - - tf.logging.info("Task %d process %d files: %s", - FLAGS.task, len(task_file_paths), task_file_paths) - record_info = _create_data(FLAGS.task, task_file_paths) - - record_prefix = "record_info-{}-{}-{}".format( - FLAGS.split, FLAGS.task, FLAGS.pass_id) - record_name = format_filename( - prefix=record_prefix, - bsz_per_host=FLAGS.bsz_per_host, - seq_len=FLAGS.seq_len, - mask_alpha=FLAGS.mask_alpha, - mask_beta=FLAGS.mask_beta, - reuse_len=FLAGS.reuse_len, - bi_data=FLAGS.bi_data, - suffix="json", - uncased=FLAGS.uncased, - fixed_num_predict=FLAGS.num_predict) - record_info_path = os.path.join(tfrecord_dir, record_name) - - with tf.gfile.Open(record_info_path, "w") as fp: - json.dump(record_info, fp) - - -def batchify(data, bsz_per_host, sent_ids=None): - num_step = len(data) // bsz_per_host - data = data[:bsz_per_host * num_step] - data = data.reshape(bsz_per_host, num_step) - if sent_ids is not None: - sent_ids = sent_ids[:bsz_per_host * num_step] - sent_ids = sent_ids.reshape(bsz_per_host, num_step) - - if sent_ids is not None: - return data, sent_ids - return data - - -def _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False): - """Split two segments from `data` starting from the index `begin_idx`.""" - - data_len = data.shape[0] - if begin_idx + tot_len >= data_len: - tf.logging.info("[_split_a_and_b] returns None: " - "begin_idx %d + tot_len %d >= data_len %d", - begin_idx, tot_len, data_len) - return None - - end_idx = begin_idx + 1 - cut_points = [] - while end_idx < data_len: - if sent_ids[end_idx] != sent_ids[end_idx - 1]: - if end_idx - begin_idx >= tot_len: break - cut_points.append(end_idx) - end_idx += 1 - - a_begin = begin_idx - if len(cut_points) == 0 or random.random() < 0.5: - label = 0 - if len(cut_points) == 0: - a_end = end_idx - else: - a_end = random.choice(cut_points) - - b_len = max(1, tot_len - (a_end - a_begin)) - # (zihang): `data_len - 1` to account for extend_target - b_begin = random.randint(0, data_len - 1 - b_len) - b_end = b_begin + b_len - while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]: - b_begin -= 1 - # (zihang): `data_len - 1` to account for extend_target - while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]: - b_end += 1 - - new_begin = a_end - else: - label = 1 - a_end = random.choice(cut_points) - b_begin = a_end - b_end = end_idx - - new_begin = b_end - - while a_end - a_begin + b_end - b_begin > tot_len: - if a_end - a_begin > b_end - b_begin: - # delete the right side only for the LM objective - a_end -= 1 - else: - b_end -= 1 - - ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin] - - if extend_target: - if a_end >= data_len or b_end >= data_len: - tf.logging.info("[_split_a_and_b] returns None: " - "a_end %d or b_end %d >= data_len %d", - a_end, b_end, data_len) - return None - a_target = data[a_begin + 1: a_end + 1] - b_target = data[b_begin: b_end + 1] - ret.extend([a_target, b_target]) - - return ret - - -def _is_start_piece(piece): - special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~')) - if (piece.startswith("▁") or piece.startswith("<") - or piece in special_pieces): - return True - else: - return False - - -def _sample_mask(sp, seg, reverse=False, max_gram=5, goal_num_predict=None): - """Sample `goal_num_predict` tokens for partial prediction. - About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.""" - - seg_len = len(seg) - mask = np.array([False] * seg_len, dtype=np.bool) - - num_predict = 0 - - ngrams = np.arange(1, max_gram + 1, dtype=np.int64) - pvals = 1. / np.arange(1, max_gram + 1) - pvals /= pvals.sum(keepdims=True) - - if reverse: - seg = np.flip(seg, 0) - - cur_len = 0 - while cur_len < seg_len: - if goal_num_predict is not None and num_predict >= goal_num_predict: break - - n = np.random.choice(ngrams, p=pvals) - if goal_num_predict is not None: - n = min(n, goal_num_predict - num_predict) - ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta - l_ctx = np.random.choice(ctx_size) - r_ctx = ctx_size - l_ctx - - # Find the start position of a complete token - beg = cur_len + l_ctx - while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())): - beg += 1 - if beg >= seg_len: - break - - # Find the end position of the n-gram (start pos of the n+1-th gram) - end = beg + 1 - cnt_ngram = 1 - while end < seg_len: - if _is_start_piece(sp.IdToPiece(seg[beg].item())): - cnt_ngram += 1 - if cnt_ngram > n: - break - end += 1 - if end >= seg_len: - break - - # Update - mask[beg:end] = True - num_predict += end - beg - - cur_len = end + r_ctx - - while goal_num_predict is not None and num_predict < goal_num_predict: - i = np.random.randint(seg_len) - if not mask[i]: - mask[i] = True - num_predict += 1 - - if reverse: - mask = np.flip(mask, 0) - - return mask - - -def create_tfrecords(save_dir, basename, data, bsz_per_host, seq_len, - bi_data, sp): - data, sent_ids = data[0], data[1] - - num_core = FLAGS.num_core_per_host - bsz_per_core = bsz_per_host // num_core - - if bi_data: - assert bsz_per_host % (2 * FLAGS.num_core_per_host) == 0 - fwd_data, fwd_sent_ids = batchify(data, bsz_per_host // 2, sent_ids) - - fwd_data = fwd_data.reshape(num_core, 1, bsz_per_core // 2, -1) - fwd_sent_ids = fwd_sent_ids.reshape(num_core, 1, bsz_per_core // 2, -1) - - bwd_data = fwd_data[:, :, :, ::-1] - bwd_sent_ids = fwd_sent_ids[:, :, :, ::-1] - - data = np.concatenate( - [fwd_data, bwd_data], 1).reshape(bsz_per_host, -1) - sent_ids = np.concatenate( - [fwd_sent_ids, bwd_sent_ids], 1).reshape(bsz_per_host, -1) - else: - data, sent_ids = batchify(data, bsz_per_host, sent_ids) - - tf.logging.info("Raw data shape %s.", data.shape) - - file_name = format_filename( - prefix=basename, - bsz_per_host=bsz_per_host, - seq_len=seq_len, - bi_data=bi_data, - suffix="tfrecords", - mask_alpha=FLAGS.mask_alpha, - mask_beta=FLAGS.mask_beta, - reuse_len=FLAGS.reuse_len, - uncased=FLAGS.uncased, - fixed_num_predict=FLAGS.num_predict - ) - save_path = os.path.join(save_dir, file_name) - record_writer = tf.python_io.TFRecordWriter(save_path) - tf.logging.info("Start writing %s.", save_path) - - num_batch = 0 - reuse_len = FLAGS.reuse_len - - # [sep] x 2 + [cls] - assert reuse_len < seq_len - 3 - - data_len = data.shape[1] - sep_array = np.array([SEP_ID], dtype=np.int64) - cls_array = np.array([CLS_ID], dtype=np.int64) - - i = 0 - while i + seq_len <= data_len: - if num_batch % 500 == 0: - tf.logging.info("Processing batch %d", num_batch) - - all_ok = True - features = [] - for idx in range(bsz_per_host): - inp = data[idx, i: i + reuse_len] - tgt = data[idx, i + 1: i + reuse_len + 1] - - results = _split_a_and_b( - data[idx], - sent_ids[idx], - begin_idx=i + reuse_len, - tot_len=seq_len - reuse_len - 3, - extend_target=True) - if results is None: - tf.logging.info("Break out with seq idx %d", i) - all_ok = False - break - - # unpack the results - (a_data, b_data, label, _, a_target, b_target) = tuple(results) - - # sample ngram spans to predict - reverse = bi_data and (idx // (bsz_per_core // 2)) % 2 == 1 - if FLAGS.num_predict is None: - num_predict_0 = num_predict_1 = None - else: - num_predict_1 = FLAGS.num_predict // 2 - num_predict_0 = FLAGS.num_predict - num_predict_1 - mask_0 = _sample_mask(sp, inp, reverse=reverse, - goal_num_predict=num_predict_0) - mask_1 = _sample_mask(sp, np.concatenate([a_data, sep_array, b_data, - sep_array, cls_array]), - reverse=reverse, goal_num_predict=num_predict_1) - - # concatenate data - cat_data = np.concatenate([inp, a_data, sep_array, b_data, - sep_array, cls_array]) - seg_id = ([0] * (reuse_len + a_data.shape[0]) + [0] + - [1] * b_data.shape[0] + [1] + [2]) - assert cat_data.shape[0] == seq_len - assert mask_0.shape[0] == seq_len // 2 - assert mask_1.shape[0] == seq_len // 2 - - # the last two CLS's are not used, just for padding purposes - tgt = np.concatenate([tgt, a_target, b_target, cls_array, cls_array]) - assert tgt.shape[0] == seq_len - - is_masked = np.concatenate([mask_0, mask_1], 0) - if FLAGS.num_predict is not None: - assert np.sum(is_masked) == FLAGS.num_predict - - feature = { - "input": _int64_feature(cat_data), - "is_masked": _int64_feature(is_masked), - "target": _int64_feature(tgt), - "seg_id": _int64_feature(seg_id), - "label": _int64_feature([label]), - } - features.append(feature) - - if all_ok: - assert len(features) == bsz_per_host - for feature in features: - example = tf.train.Example(features=tf.train.Features(feature=feature)) - record_writer.write(example.SerializeToString()) - num_batch += 1 - else: - break - - i += reuse_len - - record_writer.close() - tf.logging.info("Done writing %s. Num of batches: %d", save_path, num_batch) - - return save_path, num_batch - - -################ -# get_input_fn # -################ -def _convert_example(example, use_bfloat16): - """Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.""" - for key in list(example.keys()): - val = example[key] - if tf.keras.backend.is_sparse(val): - val = tf.sparse.to_dense(val) - if val.dtype == tf.int64: - val = tf.cast(val, tf.int32) - if use_bfloat16 and val.dtype == tf.float32: - val = tf.cast(val, tf.bfloat16) - - example[key] = val - - -def parse_files_to_dataset(parser, file_names, split, num_batch, num_hosts, - host_id, num_core_per_host, bsz_per_core): - # list of file pathes - num_files = len(file_names) - num_files_per_host = num_files // num_hosts - my_start_file_id = host_id * num_files_per_host - my_end_file_id = (host_id + 1) * num_files_per_host - if host_id == num_hosts - 1: - my_end_file_id = num_files - file_paths = file_names[my_start_file_id: my_end_file_id] - tf.logging.info("Host %d handles %d files", host_id, len(file_paths)) - - assert split == "train" - dataset = tf.data.Dataset.from_tensor_slices(file_paths) - - # file-level shuffle - if len(file_paths) > 1: - dataset = dataset.shuffle(len(file_paths)) - - # Note: we cannot perform sample-level shuffle here because this will violate - # the consecutive requirement of data stream. - dataset = tf.data.TFRecordDataset(dataset) - - # (zihang): since we are doing online preprocessing, the parsed result of - # the same input at each time will be different. Thus, cache processed data - # is not helpful. It will use a lot of memory and lead to contrainer OOM. - # So, change to cache non-parsed raw data instead. - dataset = dataset.cache().map(parser).repeat() - dataset = dataset.batch(bsz_per_core, drop_remainder=True) - dataset = dataset.prefetch(num_core_per_host * bsz_per_core) - - return dataset - - -def _local_perm(inputs, targets, is_masked, perm_size, seq_len): - """ - Sample a permutation of the factorization order, and create an - attention mask accordingly. - - Args: - inputs: int64 Tensor in shape [seq_len], input ids. - targets: int64 Tensor in shape [seq_len], target ids. - is_masked: bool Tensor in shape [seq_len]. True means being selected - for partial prediction. - perm_size: the length of longest permutation. Could be set to be reuse_len. - Should not be larger than reuse_len or there will be data leaks. - seq_len: int, sequence length. - """ - - # Generate permutation indices - index = tf.range(seq_len, dtype=tf.int64) - index = tf.transpose(tf.reshape(index, [-1, perm_size])) - index = tf.random_shuffle(index) - index = tf.reshape(tf.transpose(index), [-1]) - - # `perm_mask` and `target_mask` - # non-functional tokens - non_func_tokens = tf.logical_not(tf.logical_or( - tf.equal(inputs, SEP_ID), - tf.equal(inputs, CLS_ID))) - - non_mask_tokens = tf.logical_and(tf.logical_not(is_masked), non_func_tokens) - masked_or_func_tokens = tf.logical_not(non_mask_tokens) - - # Set the permutation indices of non-masked (& non-funcional) tokens to the - # smallest index (-1): - # (1) they can be seen by all other positions - # (2) they cannot see masked positions, so there won"t be information leak - smallest_index = -tf.ones([seq_len], dtype=tf.int64) - rev_index = tf.where(non_mask_tokens, smallest_index, index) - - # Create `target_mask`: non-funcional and maksed tokens - # 1: use mask as input and have loss - # 0: use token (or [SEP], [CLS]) as input and do not have loss - target_tokens = tf.logical_and(masked_or_func_tokens, non_func_tokens) - target_mask = tf.cast(target_tokens, tf.float32) - - # Create `perm_mask` - # `target_tokens` cannot see themselves - self_rev_index = tf.where(target_tokens, rev_index, rev_index + 1) - - # 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens) - # 0: can attend if i > j or j is non-masked - perm_mask = tf.logical_and( - self_rev_index[:, None] <= rev_index[None, :], - masked_or_func_tokens) - perm_mask = tf.cast(perm_mask, tf.float32) - - # new target: [next token] for LM and [curr token] (self) for PLM - new_targets = tf.concat([inputs[0: 1], targets[: -1]], - axis=0) - - # construct inputs_k - inputs_k = inputs - - # construct inputs_q - inputs_q = target_mask - - return perm_mask, new_targets, target_mask, inputs_k, inputs_q - - -def get_dataset(params, num_hosts, num_core_per_host, split, file_names, - num_batch, seq_len, reuse_len, perm_size, mask_alpha, - mask_beta, use_bfloat16=False, num_predict=None): - - bsz_per_core = params["batch_size"] - if num_hosts > 1: - host_id = params["context"].current_host - else: - host_id = 0 - - #### Function used to parse tfrecord - def parser(record): - """function used to parse tfrecord.""" - - record_spec = { - "input": tf.FixedLenFeature([seq_len], tf.int64), - "target": tf.FixedLenFeature([seq_len], tf.int64), - "seg_id": tf.FixedLenFeature([seq_len], tf.int64), - "label": tf.FixedLenFeature([1], tf.int64), - "is_masked": tf.FixedLenFeature([seq_len], tf.int64), - } - - # retrieve serialized example - example = tf.parse_single_example( - serialized=record, - features=record_spec) - - inputs = example.pop("input") - target = example.pop("target") - is_masked = tf.cast(example.pop("is_masked"), tf.bool) - - non_reuse_len = seq_len - reuse_len - assert perm_size <= reuse_len and perm_size <= non_reuse_len - - perm_mask_0, target_0, target_mask_0, input_k_0, input_q_0 = _local_perm( - inputs[:reuse_len], - target[:reuse_len], - is_masked[:reuse_len], - perm_size, - reuse_len) - - perm_mask_1, target_1, target_mask_1, input_k_1, input_q_1 = _local_perm( - inputs[reuse_len:], - target[reuse_len:], - is_masked[reuse_len:], - perm_size, - non_reuse_len) - - perm_mask_0 = tf.concat([perm_mask_0, tf.ones([reuse_len, non_reuse_len])], - axis=1) - perm_mask_1 = tf.concat([tf.zeros([non_reuse_len, reuse_len]), perm_mask_1], - axis=1) - perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0) - target = tf.concat([target_0, target_1], axis=0) - target_mask = tf.concat([target_mask_0, target_mask_1], axis=0) - input_k = tf.concat([input_k_0, input_k_1], axis=0) - input_q = tf.concat([input_q_0, input_q_1], axis=0) - - if num_predict is not None: - indices = tf.range(seq_len, dtype=tf.int64) - bool_target_mask = tf.cast(target_mask, tf.bool) - indices = tf.boolean_mask(indices, bool_target_mask) - - ##### extra padding due to CLS/SEP introduced after prepro - actual_num_predict = tf.shape(indices)[0] - pad_len = num_predict - actual_num_predict - - ##### target_mapping - target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32) - paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype) - target_mapping = tf.concat([target_mapping, paddings], axis=0) - example["target_mapping"] = tf.reshape(target_mapping, - [num_predict, seq_len]) - - ##### target - target = tf.boolean_mask(target, bool_target_mask) - paddings = tf.zeros([pad_len], dtype=target.dtype) - target = tf.concat([target, paddings], axis=0) - example["target"] = tf.reshape(target, [num_predict]) - - ##### target mask - target_mask = tf.concat( - [tf.ones([actual_num_predict], dtype=tf.float32), - tf.zeros([pad_len], dtype=tf.float32)], - axis=0) - example["target_mask"] = tf.reshape(target_mask, [num_predict]) - else: - example["target"] = tf.reshape(target, [seq_len]) - example["target_mask"] = tf.reshape(target_mask, [seq_len]) - - # reshape back to fixed shape - example["perm_mask"] = tf.reshape(perm_mask, [seq_len, seq_len]) - example["input_k"] = tf.reshape(input_k, [seq_len]) - example["input_q"] = tf.reshape(input_q, [seq_len]) - - _convert_example(example, use_bfloat16) - - for k, v in example.items(): - tf.logging.info("%s: %s", k, v) - - return example - - # Get dataset - dataset = parse_files_to_dataset( - parser=parser, - file_names=file_names, - split=split, - num_batch=num_batch, - num_hosts=num_hosts, - host_id=host_id, - num_core_per_host=num_core_per_host, - bsz_per_core=bsz_per_core) - - return dataset - - -def get_input_fn( - tfrecord_dir, - split, - bsz_per_host, - seq_len, - reuse_len, - bi_data, - num_hosts=1, - num_core_per_host=1, - perm_size=None, - mask_alpha=None, - mask_beta=None, - uncased=False, - num_passes=None, - use_bfloat16=False, - num_predict=None): - - # Merge all record infos into a single one - record_glob_base = format_filename( - prefix="record_info-{}-*".format(split), - bsz_per_host=bsz_per_host, - seq_len=seq_len, - bi_data=bi_data, - suffix="json", - mask_alpha=mask_alpha, - mask_beta=mask_beta, - reuse_len=reuse_len, - uncased=uncased, - fixed_num_predict=num_predict) - - record_info = {"num_batch": 0, "filenames": []} - - tfrecord_dirs = tfrecord_dir.split(",") - tf.logging.info("Use the following tfrecord dirs: %s", tfrecord_dirs) - - for idx, record_dir in enumerate(tfrecord_dirs): - record_glob = os.path.join(record_dir, record_glob_base) - tf.logging.info("[%d] Record glob: %s", idx, record_glob) - - record_paths = sorted(tf.gfile.Glob(record_glob)) - tf.logging.info("[%d] Num of record info path: %d", - idx, len(record_paths)) - - cur_record_info = {"num_batch": 0, "filenames": []} - - for record_info_path in record_paths: - if num_passes is not None: - record_info_name = os.path.basename(record_info_path) - fields = record_info_name.split(".")[0].split("-") - pass_id = int(fields[-1]) - if len(fields) == 5 and pass_id >= num_passes: - tf.logging.info("Skip pass %d: %s", pass_id, record_info_name) - continue - - with tf.gfile.Open(record_info_path, "r") as fp: - info = json.load(fp) - if num_passes is not None: - eff_num_passes = min(num_passes, len(info["filenames"])) - ratio = eff_num_passes / len(info["filenames"]) - cur_record_info["num_batch"] += int(info["num_batch"] * ratio) - cur_record_info["filenames"] += info["filenames"][:eff_num_passes] - else: - cur_record_info["num_batch"] += info["num_batch"] - cur_record_info["filenames"] += info["filenames"] - - # overwrite directory for `cur_record_info` - new_filenames = [] - for filename in cur_record_info["filenames"]: - basename = os.path.basename(filename) - new_filename = os.path.join(record_dir, basename) - new_filenames.append(new_filename) - cur_record_info["filenames"] = new_filenames - - tf.logging.info("[Dir %d] Number of chosen batches: %s", - idx, cur_record_info["num_batch"]) - tf.logging.info("[Dir %d] Number of chosen files: %s", - idx, len(cur_record_info["filenames"])) - tf.logging.info(cur_record_info["filenames"]) - - # add `cur_record_info` to global `record_info` - record_info["num_batch"] += cur_record_info["num_batch"] - record_info["filenames"] += cur_record_info["filenames"] - - tf.logging.info("Total number of batches: %d", - record_info["num_batch"]) - tf.logging.info("Total number of files: %d", - len(record_info["filenames"])) - tf.logging.info(record_info["filenames"]) - - def input_fn(params): - """docs.""" - assert params["batch_size"] * num_core_per_host == bsz_per_host - - dataset = get_dataset( - params=params, - num_hosts=num_hosts, - num_core_per_host=num_core_per_host, - split=split, - file_names=record_info["filenames"], - num_batch=record_info["num_batch"], - seq_len=seq_len, - reuse_len=reuse_len, - perm_size=perm_size, - mask_alpha=mask_alpha, - mask_beta=mask_beta, - use_bfloat16=use_bfloat16, - num_predict=num_predict) - - return dataset - - return input_fn, record_info - - -if __name__ == "__main__": - FLAGS = flags.FLAGS - flags.DEFINE_bool("use_tpu", True, help="whether to use TPUs") - flags.DEFINE_integer("bsz_per_host", 32, help="batch size per host.") - flags.DEFINE_integer("num_core_per_host", 8, help="num TPU cores per host.") - - flags.DEFINE_integer("seq_len", 512, - help="Sequence length.") - flags.DEFINE_integer("reuse_len", 256, - help="Number of token that can be reused as memory. " - "Could be half of `seq_len`.") - flags.DEFINE_bool("uncased", True, help="Use uncased inputs or not.") - flags.DEFINE_bool("bi_data", True, - help="whether to create bidirectional data") - flags.DEFINE_integer("mask_alpha", default=6, - help="How many tokens to form a group.") - flags.DEFINE_integer("mask_beta", default=1, - help="How many tokens to mask within each group.") - flags.DEFINE_bool("use_eod", True, - help="whether to append EOD at the end of a doc.") - flags.DEFINE_bool("from_raw_text", True, - help="Whether the input is raw text or encoded ids.") - flags.DEFINE_integer("num_predict", default=85, - help="Num of tokens to predict.") - - flags.DEFINE_string("input_glob", "data/example/*.txt", - help="Input file glob.") - flags.DEFINE_string("sp_path", "", help="Path to the sentence piece model.") - flags.DEFINE_string("save_dir", "proc_data/example", - help="Directory for saving the processed data.") - flags.DEFINE_enum("split", "train", ["train", "dev", "test"], - help="Save the data as which split.") - - flags.DEFINE_integer("pass_id", 0, help="ID of the current pass." - "Different passes sample different negative segment.") - flags.DEFINE_integer("num_task", 1, help="Number of total tasks.") - flags.DEFINE_integer("task", 0, help="The Task ID. This value is used when " - "using multiple workers to identify each worker.") - - tf.logging.set_verbosity(tf.logging.INFO) - tf.app.run(create_data) diff --git a/build/lib/caire-covid/mrqa/function_builder.py b/build/lib/caire-covid/mrqa/function_builder.py deleted file mode 100644 index fef33fd..0000000 --- a/build/lib/caire-covid/mrqa/function_builder.py +++ /dev/null @@ -1,624 +0,0 @@ -"""doc.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import os -import tensorflow as tf -import mrqa.modeling -import mrqa.xlnet - -# import attn_utils - -def construct_scalar_host_call( - monitor_dict, - model_dir, - prefix="", - reduce_fn=None): - """ - Construct host calls to monitor training progress on TPUs. - """ - - metric_names = list(monitor_dict.keys()) - - def host_call_fn(global_step, *args): - """actual host call function.""" - step = global_step[0] - with tf.contrib.summary.create_file_writer( - logdir=model_dir, filename_suffix=".host_call").as_default(): - with tf.contrib.summary.always_record_summaries(): - for i, name in enumerate(metric_names): - if reduce_fn is None: - scalar = args[i][0] - else: - scalar = reduce_fn(args[i]) - with tf.contrib.summary.record_summaries_every_n_global_steps( - 100, global_step=step): - tf.contrib.summary.scalar(prefix + name, scalar, step=step) - - return tf.contrib.summary.all_summary_ops() - - global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) - other_tensors = [tf.reshape(monitor_dict[key], [1]) for key in metric_names] - - return host_call_fn, [global_step_tensor] + other_tensors - - -def two_stream_loss(FLAGS, features, labels, mems, is_training): - """Pretraining loss with two-stream attention Transformer-XL.""" - - #### Unpack input - mem_name = "mems" - mems = mems.get(mem_name, None) - - inp_k = tf.transpose(features["input_k"], [1, 0]) - inp_q = tf.transpose(features["input_q"], [1, 0]) - - seg_id = tf.transpose(features["seg_id"], [1, 0]) - - inp_mask = None - perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0]) - - if FLAGS.num_predict is not None: - # [num_predict x tgt_len x bsz] - target_mapping = tf.transpose(features["target_mapping"], [1, 2, 0]) - else: - target_mapping = None - - # target for LM loss - tgt = tf.transpose(features["target"], [1, 0]) - - # target mask for LM loss - tgt_mask = tf.transpose(features["target_mask"], [1, 0]) - - # construct xlnet config and save to model_dir - xlnet_config = xlnet.XLNetConfig(FLAGS=FLAGS) - xlnet_config.to_json(os.path.join(FLAGS.model_dir, "config.json")) - - # construct run config from FLAGS - run_config = xlnet.create_run_config(is_training, False, FLAGS) - - xlnet_model = xlnet.XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp_k, - seg_ids=seg_id, - input_mask=inp_mask, - mems=mems, - perm_mask=perm_mask, - target_mapping=target_mapping, - inp_q=inp_q) - - output = xlnet_model.get_sequence_output() - new_mems = {mem_name: xlnet_model.get_new_memory()} - lookup_table = xlnet_model.get_embedding_table() - - initializer = xlnet_model.get_initializer() - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - # LM loss - lm_loss = modeling.lm_loss( - hidden=output, - target=tgt, - n_token=xlnet_config.n_token, - d_model=xlnet_config.d_model, - initializer=initializer, - lookup_table=lookup_table, - tie_weight=True, - bi_data=run_config.bi_data, - use_tpu=run_config.use_tpu) - - #### Quantity to monitor - monitor_dict = {} - - if FLAGS.use_bfloat16: - tgt_mask = tf.cast(tgt_mask, tf.float32) - lm_loss = tf.cast(lm_loss, tf.float32) - - total_loss = tf.reduce_sum(lm_loss * tgt_mask) / tf.reduce_sum(tgt_mask) - monitor_dict["total_loss"] = total_loss - - return total_loss, new_mems, monitor_dict - - -def get_loss(FLAGS, features, labels, mems, is_training): - """Pretraining loss with two-stream attention Transformer-XL.""" - if FLAGS.use_bfloat16: - with tf.tpu.bfloat16_scope(): - return two_stream_loss(FLAGS, features, labels, mems, is_training) - else: - return two_stream_loss(FLAGS, features, labels, mems, is_training) - - -def get_classification_loss( - FLAGS, features, n_class, is_training): - """Loss for downstream classification tasks.""" - - bsz_per_core = tf.shape(features["input_ids"])[0] - - inp = tf.transpose(features["input_ids"], [1, 0]) - seg_id = tf.transpose(features["segment_ids"], [1, 0]) - inp_mask = tf.transpose(features["input_mask"], [1, 0]) - label = tf.reshape(features["label_ids"], [bsz_per_core]) - - xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) - run_config = xlnet.create_run_config(is_training, True, FLAGS) - - xlnet_model = xlnet.XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - - summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - - if FLAGS.cls_scope is not None and FLAGS.cls_scope: - cls_scope = "classification_{}".format(FLAGS.cls_scope) - else: - cls_scope = "classification_{}".format(FLAGS.task_name.lower()) - - per_example_loss, logits = modeling.classification_loss( - hidden=summary, - labels=label, - n_class=n_class, - initializer=xlnet_model.get_initializer(), - scope=cls_scope, - return_logits=True) - - total_loss = tf.reduce_mean(per_example_loss) - - return total_loss, per_example_loss, logits - - -def get_regression_loss( - FLAGS, features, is_training): - """Loss for downstream regression tasks.""" - - bsz_per_core = tf.shape(features["input_ids"])[0] - - inp = tf.transpose(features["input_ids"], [1, 0]) - seg_id = tf.transpose(features["segment_ids"], [1, 0]) - inp_mask = tf.transpose(features["input_mask"], [1, 0]) - label = tf.reshape(features["label_ids"], [bsz_per_core]) - - xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) - run_config = xlnet.create_run_config(is_training, True, FLAGS) - - xlnet_model = xlnet.XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - - summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - per_example_loss, logits = modeling.regression_loss( - hidden=summary, - labels=label, - initializer=xlnet_model.get_initializer(), - scope="regression_{}".format(FLAGS.task_name.lower()), - return_logits=True) - - total_loss = tf.reduce_mean(per_example_loss) - - return total_loss, per_example_loss, logits - - -def get_qa_outputs(FLAGS, features, is_training): - """Loss for downstream span-extraction QA tasks such as SQuAD.""" - - inp = tf.transpose(features["input_ids"], [1, 0]) - seg_id = tf.transpose(features["segment_ids"], [1, 0]) - inp_mask = tf.transpose(features["input_mask"], [1, 0]) - cls_index = tf.reshape(features["cls_index"], [-1]) - - seq_len = tf.shape(inp)[0] - - xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) - run_config = xlnet.create_run_config(is_training, True, FLAGS) - - seg_id = tf.cast(seg_id, tf.int32) - xlnet_model = xlnet.XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - output = xlnet_model.get_sequence_output() - initializer = xlnet_model.get_initializer() - - return_dict = {} - - # invalid position mask such as query and special symbols (PAD, SEP, CLS) - p_mask = features["p_mask"] - - # logit of the start position - with tf.variable_scope("start_logits"): - start_logits = tf.layers.dense( - output, - 1, - kernel_initializer=initializer) - start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0]) - start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask - start_log_probs = tf.nn.log_softmax(start_logits_masked, -1) - - # logit of the end position - with tf.variable_scope("end_logits"): - if is_training: - # during training, compute the end logits based on the - # ground truth of the start position - - start_positions = tf.reshape(features["start_positions"], [-1]) - start_index = tf.one_hot(start_positions, depth=seq_len, axis=-1, - dtype=tf.float32) - start_features = tf.einsum("lbh,bl->bh", output, start_index) - start_features = tf.tile(start_features[None], [seq_len, 1, 1]) - end_logits = tf.layers.dense( - tf.concat([output, start_features], axis=-1), xlnet_config.d_model, - kernel_initializer=initializer, activation=tf.tanh, name="dense_0") - end_logits = tf.contrib.layers.layer_norm( - end_logits, begin_norm_axis=-1) - - end_logits = tf.layers.dense( - end_logits, 1, - kernel_initializer=initializer, - name="dense_1") - end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) - end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask - end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) - else: - # during inference, compute the end logits based on beam search - - start_top_log_probs, start_top_index = tf.nn.top_k( - start_log_probs, k=FLAGS.start_n_top) - start_index = tf.one_hot(start_top_index, - depth=seq_len, axis=-1, dtype=tf.float32) - start_features = tf.einsum("lbh,bkl->bkh", output, start_index) - end_input = tf.tile(output[:, :, None], - [1, 1, FLAGS.start_n_top, 1]) - start_features = tf.tile(start_features[None], - [seq_len, 1, 1, 1]) - end_input = tf.concat([end_input, start_features], axis=-1) - end_logits = tf.layers.dense( - end_input, - xlnet_config.d_model, - kernel_initializer=initializer, - activation=tf.tanh, - name="dense_0") - end_logits = tf.contrib.layers.layer_norm(end_logits, - begin_norm_axis=-1) - end_logits = tf.layers.dense( - end_logits, - 1, - kernel_initializer=initializer, - name="dense_1") - end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top]) - end_logits = tf.transpose(end_logits, [1, 2, 0]) - end_logits_masked = end_logits * ( - 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] - end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) - end_top_log_probs, end_top_index = tf.nn.top_k( - end_log_probs, k=FLAGS.end_n_top) - end_top_log_probs = tf.reshape( - end_top_log_probs, - [-1, FLAGS.start_n_top * FLAGS.end_n_top]) - end_top_index = tf.reshape( - end_top_index, - [-1, FLAGS.start_n_top * FLAGS.end_n_top]) - - if is_training: - return_dict["start_log_probs"] = start_log_probs - return_dict["end_log_probs"] = end_log_probs - else: - return_dict["start_top_log_probs"] = start_top_log_probs - return_dict["start_top_index"] = start_top_index - return_dict["end_top_log_probs"] = end_top_log_probs - return_dict["end_top_index"] = end_top_index - - # an additional layer to predict answerability - with tf.variable_scope("answer_class"): - # get the representation of CLS - cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32) - cls_feature = tf.einsum("lbh,bl->bh", output, cls_index) - - # get the representation of START - start_p = tf.nn.softmax(start_logits_masked, axis=-1, - name="softmax_start") - start_feature = tf.einsum("lbh,bl->bh", output, start_p) - - # note(zhiliny): no dependency on end_feature so that we can obtain - # one single `cls_logits` for each sample - ans_feature = tf.concat([start_feature, cls_feature], -1) - ans_feature = tf.layers.dense( - ans_feature, - xlnet_config.d_model, - activation=tf.tanh, - kernel_initializer=initializer, name="dense_0") - ans_feature = tf.layers.dropout(ans_feature, FLAGS.dropout, - training=is_training) - cls_logits = tf.layers.dense( - ans_feature, - 1, - kernel_initializer=initializer, - name="dense_1", - use_bias=False) - cls_logits = tf.squeeze(cls_logits, -1) - - return_dict["cls_logits"] = cls_logits - - return return_dict - -# def get_qa_outputs_w_attention(FLAGS, features, is_training): -# """Loss for downstream span-extraction QA tasks such as SQuAD.""" - -# inp = tf.transpose(features["input_ids"], [1, 0]) -# seg_id = tf.transpose(features["segment_ids"], [1, 0]) -# inp_mask = tf.transpose(features["input_mask"], [1, 0]) -# cls_index = tf.reshape(features["cls_index"], [-1]) - -# seq_len = tf.shape(inp)[0] - -# xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) -# run_config = xlnet.create_run_config(is_training, True, FLAGS) - -# seg_id = tf.cast(seg_id, tf.int32) -# xlnet_model = xlnet.XLNetModel( -# xlnet_config=xlnet_config, -# run_config=run_config, -# input_ids=inp, -# seg_ids=seg_id, -# input_mask=inp_mask) -# output = xlnet_model.get_sequence_output() -# initializer = xlnet_model.get_initializer() - -# return_dict = {} - -# # invalid position mask such as query and special symbols (PAD, SEP, CLS) -# p_mask = features["p_mask"] - -# p_mask_query = tf.transpose(tf.cast(p_mask, tf.float32), [1, 0]) # size: [340, 4] type: float32 -# p_mask_inp = tf.transpose(tf.cast(p_mask, tf.int32), [1, 0]) # size: [340, 4] type: int32 -# query_toks = tf.einsum('ibh,ib->ibh', output, p_mask_query) # size: [340, 4, 1024] -# context_toks = tf.einsum('ibh,ib->ibh', output, 1-p_mask_query) # size: [340, 4, 1024] - -# inp_mask_int = tf.cast(inp_mask, tf.int32) -# query_pos = tf.cast(tf.cast(tf.math.multiply(p_mask_inp, 1-inp_mask_int), tf.bool), tf.int32) -# context_pos = tf.cast(tf.cast(tf.math.multiply(1-p_mask_inp, 1-inp_mask_int), tf.bool), tf.int32) -# query_len = tf.reduce_sum(tf.cast(query_pos, tf.int32), 0) - 1 # size: [340, 4] -# context_len = tf.reduce_sum(tf.cast(context_pos, tf.int32), 0) - 1 # size: [340, 4] - -# keep_prob = 1.0 -# if is_training: -# keep_prob = 0.8 -# else: -# keep_prob = 1.0 - -# # context to query attention layer (AoA) -# with tf.variable_scope("Context_to_Query_Attention_Layer"): -# if is_training: -# # c, q, c_maxlen, q_maxlen, q_mask, c_mask -# [max_seq_len, bsz, dimen] = output.shape.as_list() -# c_maxlen = tf.reduce_max(context_len) -# q_maxlen = tf.reduce_max(query_len) - -# c = tf.transpose(context_toks, [1, 0, 2]) -# c_mask = tf.transpose(context_pos, [1, 0]) - -# for i in range(0, bsz): -# q_indice0 = tf.range(context_len[i], context_len[i]+query_len[i]) -# q_indice1 = tf.zeros([query_len[i], 1])+i -# q_indices = tf.concat([tf.reshape(q_indice0,[-1, 1]), tf.cast(q_indice1, tf.int32)], 1) -# q_bsz = tf.gather_nd(query_toks, q_indices) -# q_bsz = tf.concat([q_bsz, tf.zeros([q_maxlen-query_len[i], dimen])], 0) - -# q_bsz_mask = tf.gather_nd(query_pos, q_indices) -# q_bsz_mask = tf.concat([tf.reshape(q_bsz_mask, [-1, 1]), tf.cast(tf.zeros([q_maxlen-query_len[i], 1]), tf.int32)], 0) -# if i==0: -# q = tf.reshape(q_bsz, [q_maxlen, 1, dimen]) -# q_mask = tf.reshape(q_bsz_mask, [q_maxlen, 1]) -# else: -# q = tf.concat([q, tf.reshape(q_bsz, [q_maxlen, 1, dimen])], 1) -# q_mask = tf.concat([q_mask, tf.reshape(q_bsz_mask, [q_maxlen, 1])], 1) -# q = tf.transpose(q, [1, 0, 2]) -# q_mask = tf.transpose(q_mask, [1, 0]) - -# else: # testing/predicting -# [max_seq_len, _, dimen] = output.shape.as_list() # size: [340, None(?), 1024] -# c_maxlen = tf.reduce_max(context_len) -# q_maxlen = tf.reduce_max(query_len) - -# c = tf.transpose(context_toks, [1, 0, 2]) -# c_mask = tf.transpose(context_pos, [1, 0]) - -# # bsz = tf.shape(output)[1] # Tensor [340, bsz, 1024] -> Tensor [bsz] -# # elems = tf.range(0, bsz) -# for i in range(0, FLAGS.predict_batch_size): -# q_indice0 = tf.range(context_len[i], context_len[i]+query_len[i]) -# q_indice1 = tf.zeros([query_len[i], 1])+i -# q_indices = tf.concat([tf.reshape(q_indice0,[-1, 1]), tf.cast(q_indice1, tf.int32)], 1) -# q_bsz = tf.gather_nd(query_toks, q_indices) -# q_bsz = tf.concat([q_bsz, tf.zeros([q_maxlen-query_len[i], dimen])], 0) - -# q_bsz_mask = tf.gather_nd(query_pos, q_indices) -# q_bsz_mask = tf.concat([tf.reshape(q_bsz_mask, [-1, 1]), tf.cast(tf.zeros([q_maxlen-query_len[i], 1]), tf.int32)], 0) -# if i==0: -# q = tf.reshape(q_bsz, [q_maxlen, 1, dimen]) -# q_mask = tf.reshape(q_bsz_mask, [q_maxlen, 1]) -# else: -# q = tf.concat([q, tf.reshape(q_bsz, [q_maxlen, 1, dimen])], 1) -# q_mask = tf.concat([q_mask, tf.reshape(q_bsz_mask, [q_maxlen, 1])], 1) -# q = tf.transpose(q, [1, 0, 2]) -# q_mask = tf.transpose(q_mask, [1, 0]) - -# S = attn_utils.optimized_trilinear_for_attention([c, q], max_seq_len, q_maxlen, input_keep_prob = keep_prob) # c:[bsz, 340, dimen] q:[bsz, 64, dimen] -# mask_q = tf.expand_dims(q_mask, 1) # [bsz, 1, q_len] -# S_ = tf.nn.softmax(attn_utils.mask_logits(S, mask = mask_q)) -# mask_c = tf.expand_dims(c_mask, 2) # [bsz, c_len, 1] -# S_T = tf.transpose(tf.nn.softmax(attn_utils.mask_logits(S, mask = mask_c), dim = 1),(0,2,1)) - -# c2q = tf.matmul(S_, q) -# q2c = tf.matmul(tf.matmul(S_, S_T), c) -# attention_outputs = [c, c2q, c * c2q, c * q2c] -# output = tf.transpose(tf.concat(attention_outputs, axis = -1), [1, 0, 2]) - -# # logit of the start position -# with tf.variable_scope("start_logits"): -# start_logits = tf.layers.dense( -# output, -# 1, -# kernel_initializer=initializer) -# start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0]) -# start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask -# start_log_probs = tf.nn.log_softmax(start_logits_masked, -1) - -# # logit of the end position -# with tf.variable_scope("end_logits"): -# if is_training: -# # during training, compute the end logits based on the -# # ground truth of the start position - -# start_positions = tf.reshape(features["start_positions"], [-1]) -# start_index = tf.one_hot(start_positions, depth=seq_len, axis=-1, -# dtype=tf.float32) -# start_features = tf.einsum("lbh,bl->bh", output, start_index) -# start_features = tf.tile(start_features[None], [seq_len, 1, 1]) -# end_logits = tf.layers.dense( -# tf.concat([output, start_features], axis=-1), xlnet_config.d_model, -# kernel_initializer=initializer, activation=tf.tanh, name="dense_0") -# end_logits = tf.contrib.layers.layer_norm( -# end_logits, begin_norm_axis=-1) - -# end_logits = tf.layers.dense( -# end_logits, 1, -# kernel_initializer=initializer, -# name="dense_1") -# end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) -# end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask -# end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) -# else: -# # during inference, compute the end logits based on beam search - -# start_top_log_probs, start_top_index = tf.nn.top_k( -# start_log_probs, k=FLAGS.start_n_top) -# start_index = tf.one_hot(start_top_index, -# depth=seq_len, axis=-1, dtype=tf.float32) -# start_features = tf.einsum("lbh,bkl->bkh", output, start_index) -# end_input = tf.tile(output[:, :, None], -# [1, 1, FLAGS.start_n_top, 1]) -# start_features = tf.tile(start_features[None], -# [seq_len, 1, 1, 1]) -# end_input = tf.concat([end_input, start_features], axis=-1) -# end_logits = tf.layers.dense( -# end_input, -# xlnet_config.d_model, -# kernel_initializer=initializer, -# activation=tf.tanh, -# name="dense_0") -# end_logits = tf.contrib.layers.layer_norm(end_logits, -# begin_norm_axis=-1) -# end_logits = tf.layers.dense( -# end_logits, -# 1, -# kernel_initializer=initializer, -# name="dense_1") -# end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top]) -# end_logits = tf.transpose(end_logits, [1, 2, 0]) -# end_logits_masked = end_logits * ( -# 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] -# end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) -# end_top_log_probs, end_top_index = tf.nn.top_k( -# end_log_probs, k=FLAGS.end_n_top) -# end_top_log_probs = tf.reshape( -# end_top_log_probs, -# [-1, FLAGS.start_n_top * FLAGS.end_n_top]) -# end_top_index = tf.reshape( -# end_top_index, -# [-1, FLAGS.start_n_top * FLAGS.end_n_top]) - -# if is_training: -# return_dict["start_log_probs"] = start_log_probs -# return_dict["end_log_probs"] = end_log_probs -# else: -# return_dict["start_top_log_probs"] = start_top_log_probs -# return_dict["start_top_index"] = start_top_index -# return_dict["end_top_log_probs"] = end_top_log_probs -# return_dict["end_top_index"] = end_top_index - -# # an additional layer to predict answerability -# with tf.variable_scope("answer_class"): -# # get the representation of CLS -# cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32) -# cls_feature = tf.einsum("lbh,bl->bh", output, cls_index) - -# # get the representation of START -# start_p = tf.nn.softmax(start_logits_masked, axis=-1, -# name="softmax_start") -# start_feature = tf.einsum("lbh,bl->bh", output, start_p) - -# # note(zhiliny): no dependency on end_feature so that we can obtain -# # one single `cls_logits` for each sample -# ans_feature = tf.concat([start_feature, cls_feature], -1) -# ans_feature = tf.layers.dense( -# ans_feature, -# xlnet_config.d_model, -# activation=tf.tanh, -# kernel_initializer=initializer, name="dense_0") -# ans_feature = tf.layers.dropout(ans_feature, FLAGS.dropout, -# training=is_training) -# cls_logits = tf.layers.dense( -# ans_feature, -# 1, -# kernel_initializer=initializer, -# name="dense_1", -# use_bias=False) -# cls_logits = tf.squeeze(cls_logits, -1) - -# return_dict["cls_logits"] = cls_logits - -# return return_dict - - -def get_race_loss(FLAGS, features, is_training): - """Loss for downstream multi-choice QA tasks such as RACE.""" - - bsz_per_core = tf.shape(features["input_ids"])[0] - - def _transform_features(feature): - out = tf.reshape(feature, [bsz_per_core, 4, -1]) - out = tf.transpose(out, [2, 0, 1]) - out = tf.reshape(out, [-1, bsz_per_core * 4]) - return out - - inp = _transform_features(features["input_ids"]) - seg_id = _transform_features(features["segment_ids"]) - inp_mask = _transform_features(features["input_mask"]) - label = tf.reshape(features["label_ids"], [bsz_per_core]) - - xlnet_config = xlnet.XLNetConfig(json_path=FLAGS.model_config_path) - run_config = xlnet.create_run_config(is_training, True, FLAGS) - - xlnet_model = xlnet.XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) - - with tf.variable_scope("logits"): - logits = tf.layers.dense(summary, 1, - kernel_initializer=xlnet_model.get_initializer()) - logits = tf.reshape(logits, [bsz_per_core, 4]) - - one_hot_target = tf.one_hot(label, 4) - per_example_loss = -tf.reduce_sum( - tf.nn.log_softmax(logits) * one_hot_target, -1) - total_loss = tf.reduce_mean(per_example_loss) - - return total_loss, per_example_loss, logits diff --git a/build/lib/caire-covid/mrqa/model_utils.py b/build/lib/caire-covid/mrqa/model_utils.py deleted file mode 100644 index fd8d6d8..0000000 --- a/build/lib/caire-covid/mrqa/model_utils.py +++ /dev/null @@ -1,399 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import os -import re -import numpy as np -import six -from os.path import join -from six.moves import zip - -from absl import flags - -import tensorflow as tf - - -def configure_tpu(FLAGS): - if FLAGS.use_tpu: - tpu_cluster = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) - master = tpu_cluster.get_master() - else: - tpu_cluster = None - master = FLAGS.master - - session_config = tf.ConfigProto(allow_soft_placement=True) - # Uncomment the following line if you hope to monitor GPU RAM growth - # session_config.gpu_options.allow_growth = True - - if FLAGS.use_tpu: - strategy = None - tf.logging.info('Use TPU without distribute strategy.') - elif FLAGS.num_core_per_host == 1: - strategy = None - tf.logging.info('Single device mode.') - else: - strategy = tf.contrib.distribute.MirroredStrategy( - num_gpus=FLAGS.num_core_per_host) - tf.logging.info('Use MirroredStrategy with %d devices.', - strategy.num_replicas_in_sync) - - per_host_input = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - master=master, - model_dir=FLAGS.model_dir, - session_config=session_config, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations, - num_shards=FLAGS.num_hosts * FLAGS.num_core_per_host, - per_host_input_for_training=per_host_input), - keep_checkpoint_max=FLAGS.max_save, - save_checkpoints_secs=None, - save_checkpoints_steps=FLAGS.save_steps, - train_distribute=strategy - ) - return run_config - - -def init_from_checkpoint(FLAGS, global_vars=False): - tvars = tf.global_variables() if global_vars else tf.trainable_variables() - initialized_variable_names = {} - scaffold_fn = None - if FLAGS.init_checkpoint is not None: - if FLAGS.init_checkpoint.endswith("latest"): - ckpt_dir = os.path.dirname(FLAGS.init_checkpoint) - init_checkpoint = tf.train.latest_checkpoint(ckpt_dir) - else: - init_checkpoint = FLAGS.init_checkpoint - - tf.logging.info("Initialize from the ckpt {}".format(init_checkpoint)) - - (assignment_map, initialized_variable_names - ) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if FLAGS.use_tpu: - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - # Log customized initialization - tf.logging.info("**** Global Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - return scaffold_fn - - -def get_train_op(FLAGS, total_loss, grads_and_vars=None): - global_step = tf.train.get_or_create_global_step() - - # increase the learning rate linearly - if FLAGS.warmup_steps > 0: - warmup_lr = (tf.cast(global_step, tf.float32) - / tf.cast(FLAGS.warmup_steps, tf.float32) - * FLAGS.learning_rate) - else: - warmup_lr = 0.0 - - # decay the learning rate - if FLAGS.decay_method == "poly": - decay_lr = tf.train.polynomial_decay( - FLAGS.learning_rate, - global_step=global_step - FLAGS.warmup_steps, - decay_steps=FLAGS.train_steps - FLAGS.warmup_steps, - end_learning_rate=FLAGS.learning_rate * FLAGS.min_lr_ratio) - elif FLAGS.decay_method == "cos": - decay_lr = tf.train.cosine_decay( - FLAGS.learning_rate, - global_step=global_step - FLAGS.warmup_steps, - decay_steps=FLAGS.train_steps - FLAGS.warmup_steps, - alpha=FLAGS.min_lr_ratio) - else: - raise ValueError(FLAGS.decay_method) - - learning_rate = tf.where(global_step < FLAGS.warmup_steps, - warmup_lr, decay_lr) - - if (FLAGS.weight_decay > 0 and not FLAGS.use_tpu and - FLAGS.num_core_per_host > 1): - raise ValueError("Do not support `weight_decay > 0` with multi-gpu " - "training so far.") - - if FLAGS.weight_decay == 0: - optimizer = tf.train.AdamOptimizer( - learning_rate=learning_rate, - epsilon=FLAGS.adam_epsilon) - else: - optimizer = AdamWeightDecayOptimizer( - learning_rate=learning_rate, - epsilon=FLAGS.adam_epsilon, - exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], - weight_decay_rate=FLAGS.weight_decay) - - if FLAGS.use_tpu: - optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) - - if grads_and_vars is None: - grads_and_vars = optimizer.compute_gradients(total_loss) - gradients, variables = zip(*grads_and_vars) - clipped, gnorm = tf.clip_by_global_norm(gradients, FLAGS.clip) - - if getattr(FLAGS, "lr_layer_decay_rate", 1.0) != 1.0: - n_layer = 0 - for i in range(len(clipped)): - m = re.search(r"model/transformer/layer_(\d+?)/", variables[i].name) - if not m: continue - n_layer = max(n_layer, int(m.group(1)) + 1) - - for i in range(len(clipped)): - for l in range(n_layer): - if "model/transformer/layer_{}/".format(l) in variables[i].name: - abs_rate = FLAGS.lr_layer_decay_rate ** (n_layer - 1 - l) - clipped[i] *= abs_rate - tf.logging.info("Apply mult {:.4f} to layer-{} grad of {}".format( - abs_rate, l, variables[i].name)) - break - - train_op = optimizer.apply_gradients( - zip(clipped, variables), global_step=global_step) - - # Manually increment `global_step` for AdamWeightDecayOptimizer - if FLAGS.weight_decay > 0: - new_global_step = global_step + 1 - train_op = tf.group(train_op, [global_step.assign(new_global_step)]) - - return train_op, learning_rate, gnorm - - -def clean_ckpt(_): - input_ckpt = FLAGS.clean_input_ckpt - output_model_dir = FLAGS.clean_output_model_dir - - tf.reset_default_graph() - - var_list = tf.contrib.framework.list_variables(input_ckpt) - var_values, var_dtypes = {}, {} - for (name, shape) in var_list: - if not name.startswith("global_step") and "adam" not in name.lower(): - var_values[name] = None - tf.logging.info("Include {}".format(name)) - else: - tf.logging.info("Exclude {}".format(name)) - - tf.logging.info("Loading from {}".format(input_ckpt)) - reader = tf.contrib.framework.load_checkpoint(input_ckpt) - for name in var_values: - tensor = reader.get_tensor(name) - var_dtypes[name] = tensor.dtype - var_values[name] = tensor - - with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): - tf_vars = [ - tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) - for v in var_values - ] - placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] - assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] - global_step = tf.Variable( - 0, name="global_step", trainable=False, dtype=tf.int64) - saver = tf.train.Saver(tf.all_variables()) - - if not tf.gfile.Exists(output_model_dir): - tf.gfile.MakeDirs(output_model_dir) - - # Build a model consisting only of variables, set them to the average values. - with tf.Session() as sess: - sess.run(tf.initialize_all_variables()) - for p, assign_op, (name, value) in zip(placeholders, assign_ops, - six.iteritems(var_values)): - sess.run(assign_op, {p: value}) - - # Use the built saver to save the averaged checkpoint. - saver.save(sess, join(output_model_dir, "model.ckpt"), - global_step=global_step) - - -def avg_checkpoints(model_dir, output_model_dir, last_k): - tf.reset_default_graph() - - checkpoint_state = tf.train.get_checkpoint_state(model_dir) - checkpoints = checkpoint_state.all_model_checkpoint_paths[- last_k:] - var_list = tf.contrib.framework.list_variables(checkpoints[0]) - var_values, var_dtypes = {}, {} - for (name, shape) in var_list: - if not name.startswith("global_step"): - var_values[name] = np.zeros(shape) - for checkpoint in checkpoints: - reader = tf.contrib.framework.load_checkpoint(checkpoint) - for name in var_values: - tensor = reader.get_tensor(name) - var_dtypes[name] = tensor.dtype - var_values[name] += tensor - tf.logging.info("Read from checkpoint %s", checkpoint) - for name in var_values: # Average. - var_values[name] /= len(checkpoints) - - with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): - tf_vars = [ - tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) - for v in var_values - ] - placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] - assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] - global_step = tf.Variable( - 0, name="global_step", trainable=False, dtype=tf.int64) - saver = tf.train.Saver(tf.all_variables()) - - # Build a model consisting only of variables, set them to the average values. - with tf.Session() as sess: - sess.run(tf.initialize_all_variables()) - for p, assign_op, (name, value) in zip(placeholders, assign_ops, - six.iteritems(var_values)): - sess.run(assign_op, {p: value}) - # Use the built saver to save the averaged checkpoint. - saver.save(sess, join(output_model_dir, "model.ckpt"), - global_step=global_step) - - -def get_assignment_map_from_checkpoint(tvars, init_checkpoint): - """Compute the union of the current variables and checkpoint variables.""" - assignment_map = {} - initialized_variable_names = {} - - name_to_variable = collections.OrderedDict() - for var in tvars: - name = var.name - m = re.match("^(.*):\\d+$", name) - if m is not None: - name = m.group(1) - name_to_variable[name] = var - - init_vars = tf.train.list_variables(init_checkpoint) - - assignment_map = collections.OrderedDict() - for x in init_vars: - (name, var) = (x[0], x[1]) - # tf.logging.info('original name: %s', name) - if name not in name_to_variable: - continue - # assignment_map[name] = name - assignment_map[name] = name_to_variable[name] - initialized_variable_names[name] = 1 - initialized_variable_names[name + ":0"] = 1 - - return (assignment_map, initialized_variable_names) - - -class AdamWeightDecayOptimizer(tf.train.Optimizer): - """A basic Adam optimizer that includes "correct" L2 weight decay.""" - - def __init__(self, - learning_rate, - weight_decay_rate=0.0, - beta_1=0.9, - beta_2=0.999, - epsilon=1e-6, - exclude_from_weight_decay=None, - include_in_weight_decay=["r_s_bias", "r_r_bias", "r_w_bias"], - name="AdamWeightDecayOptimizer"): - """Constructs a AdamWeightDecayOptimizer.""" - super(AdamWeightDecayOptimizer, self).__init__(False, name) - - self.learning_rate = learning_rate - self.weight_decay_rate = weight_decay_rate - self.beta_1 = beta_1 - self.beta_2 = beta_2 - self.epsilon = epsilon - self.exclude_from_weight_decay = exclude_from_weight_decay - self.include_in_weight_decay = include_in_weight_decay - - def apply_gradients(self, grads_and_vars, global_step=None, name=None): - """See base class.""" - assignments = [] - for (grad, param) in grads_and_vars: - if grad is None or param is None: - continue - - param_name = self._get_variable_name(param.name) - - m = tf.get_variable( - name=param_name + "/adam_m", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - v = tf.get_variable( - name=param_name + "/adam_v", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - - # Standard Adam update. - next_m = ( - tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) - next_v = ( - tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, - tf.square(grad))) - - update = next_m / (tf.sqrt(next_v) + self.epsilon) - - # Just adding the square of the weights to the loss function is *not* - # the correct way of using L2 regularization/weight decay with Adam, - # since that will interact with the m and v parameters in strange ways. - # - # Instead we want ot decay the weights in a manner that doesn't interact - # with the m/v parameters. This is equivalent to adding the square - # of the weights to the loss with plain (non-momentum) SGD. - if self._do_use_weight_decay(param_name): - update += self.weight_decay_rate * param - - update_with_lr = self.learning_rate * update - - next_param = param - update_with_lr - - assignments.extend( - [param.assign(next_param), - m.assign(next_m), - v.assign(next_v)]) - - return tf.group(*assignments, name=name) - - def _do_use_weight_decay(self, param_name): - """Whether to use L2 weight decay for `param_name`.""" - if not self.weight_decay_rate: - return False - for r in self.include_in_weight_decay: - if re.search(r, param_name) is not None: - return True - - if self.exclude_from_weight_decay: - for r in self.exclude_from_weight_decay: - if re.search(r, param_name) is not None: - tf.logging.info('Adam WD excludes {}'.format(param_name)) - return False - return True - - def _get_variable_name(self, param_name): - """Get the variable name from the tensor name.""" - m = re.match("^(.*):\\d+$", param_name) - if m is not None: - param_name = m.group(1) - return param_name - - -if __name__ == "__main__": - flags.DEFINE_string("clean_input_ckpt", "", "input ckpt for cleaning") - flags.DEFINE_string("clean_output_model_dir", "", "output dir for cleaned ckpt") - - FLAGS = flags.FLAGS - - tf.app.run(clean_ckpt) diff --git a/build/lib/caire-covid/mrqa/modeling.py b/build/lib/caire-covid/mrqa/modeling.py deleted file mode 100644 index a9385e4..0000000 --- a/build/lib/caire-covid/mrqa/modeling.py +++ /dev/null @@ -1,783 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - - -def gelu(x): - """Gaussian Error Linear Unit. - - This is a smoother version of the RELU. - Original paper: https://arxiv.org/abs/1606.08415 - Args: - x: float Tensor to perform activation. - - Returns: - `x` with the GELU activation applied. - """ - cdf = 0.5 * (1.0 + tf.tanh( - (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) - return x * cdf - - -def embedding_lookup(x, n_token, d_embed, initializer, use_tpu=True, - scope='embedding', reuse=None, dtype=tf.float32): - """TPU and GPU embedding_lookup function.""" - with tf.variable_scope(scope, reuse=reuse): - lookup_table = tf.get_variable('lookup_table', [n_token, d_embed], - dtype=dtype, initializer=initializer) - if use_tpu: - one_hot_idx = tf.one_hot(x, n_token, dtype=dtype) - if one_hot_idx.shape.ndims == 2: - return tf.einsum('in,nd->id', one_hot_idx, lookup_table), lookup_table - else: - return tf.einsum('ibn,nd->ibd', one_hot_idx, lookup_table), lookup_table - else: - return tf.nn.embedding_lookup(lookup_table, x), lookup_table - - -def positional_embedding(pos_seq, inv_freq, bsz=None): - sinusoid_inp = tf.einsum('i,d->id', pos_seq, inv_freq) - pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1) - pos_emb = pos_emb[:, None, :] - - if bsz is not None: - pos_emb = tf.tile(pos_emb, [1, bsz, 1]) - - return pos_emb - - -def positionwise_ffn(inp, d_model, d_inner, dropout, kernel_initializer, - activation_type='relu', scope='ff', is_training=True, - reuse=None): - """Position-wise Feed-forward Network.""" - if activation_type == 'relu': - activation = tf.nn.relu - elif activation_type == 'gelu': - activation = gelu - else: - raise ValueError('Unsupported activation type {}'.format(activation_type)) - - output = inp - with tf.variable_scope(scope, reuse=reuse): - output = tf.layers.dense(output, d_inner, activation=activation, - kernel_initializer=kernel_initializer, - name='layer_1') - output = tf.layers.dropout(output, dropout, training=is_training, - name='drop_1') - output = tf.layers.dense(output, d_model, - kernel_initializer=kernel_initializer, - name='layer_2') - output = tf.layers.dropout(output, dropout, training=is_training, - name='drop_2') - output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1, - scope='LayerNorm') - return output - - -def head_projection(h, d_model, n_head, d_head, kernel_initializer, name): - """Project hidden states to a specific head with a 4D-shape.""" - proj_weight = tf.get_variable('{}/kernel'.format(name), - [d_model, n_head, d_head], dtype=h.dtype, - initializer=kernel_initializer) - head = tf.einsum('ibh,hnd->ibnd', h, proj_weight) - - return head - - -def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training, - kernel_initializer, residual=True): - """Post-attention processing.""" - # post-attention projection (back to `d_model`) - proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head], - dtype=h.dtype, initializer=kernel_initializer) - attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, proj_o) - - attn_out = tf.layers.dropout(attn_out, dropout, training=is_training) - if residual: - output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1, - scope='LayerNorm') - else: - output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, - scope='LayerNorm') - - return output - - -def abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, is_training, - scale): - """Core absolute positional attention operations.""" - - attn_score = tf.einsum('ibnd,jbnd->ijbn', q_head, k_head) - attn_score *= scale - if attn_mask is not None: - attn_score = attn_score - 1e30 * attn_mask - - # attention probability - attn_prob = tf.nn.softmax(attn_score, 1) - attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training) - - # attention output - attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head) - - return attn_vec - - -def rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, - r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, is_training, - scale): - """Core relative positional attention operations.""" - - # content based attention score - ac = tf.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h) - - # position based attention score - bd = tf.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r) - bd = rel_shift(bd, klen=tf.shape(ac)[1]) - - # segment based attention score - if seg_mat is None: - ef = 0 - else: - ef = tf.einsum('ibnd,snd->ibns', q_head + r_s_bias, seg_embed) - ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef) - - # merge attention scores and perform masking - attn_score = (ac + bd + ef) * scale - if attn_mask is not None: - # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask - attn_score = attn_score - 1e30 * attn_mask - - # attention probability - attn_prob = tf.nn.softmax(attn_score, 1) - attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training) - - # attention output - attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h) - - return attn_vec - - -def rel_shift(x, klen=-1): - """perform relative shift to form the relative attention score.""" - x_size = tf.shape(x) - - x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]]) - x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1]) - x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]]) - x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1]) - - return x - - -def _create_mask(qlen, mlen, dtype=tf.float32, same_length=False): - """create causal attention mask.""" - attn_mask = tf.ones([qlen, qlen], dtype=dtype) - mask_u = tf.matrix_band_part(attn_mask, 0, -1) - mask_dia = tf.matrix_band_part(attn_mask, 0, 0) - attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype) - ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) - if same_length: - mask_l = tf.matrix_band_part(attn_mask, -1, 0) - ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) - - return ret - - -def _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None): - """cache hidden states into memory.""" - if mem_len is None or mem_len == 0: - return None - else: - if reuse_len is not None and reuse_len > 0: - curr_out = curr_out[:reuse_len] - - if prev_mem is None: - new_mem = curr_out[-mem_len:] - else: - new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:] - - return tf.stop_gradient(new_mem) - - -def relative_positional_encoding(qlen, klen, d_model, clamp_len, attn_type, - bi_data, bsz=None, dtype=None): - """create relative positional encoding.""" - freq_seq = tf.range(0, d_model, 2.0) - if dtype is not None and dtype != tf.float32: - freq_seq = tf.cast(freq_seq, dtype=dtype) - inv_freq = 1 / (10000 ** (freq_seq / d_model)) - - if attn_type == 'bi': - # beg, end = klen - 1, -qlen - beg, end = klen, -qlen - elif attn_type == 'uni': - # beg, end = klen - 1, -1 - beg, end = klen, -1 - else: - raise ValueError('Unknown `attn_type` {}.'.format(attn_type)) - - if bi_data: - fwd_pos_seq = tf.range(beg, end, -1.0) - bwd_pos_seq = tf.range(-beg, -end, 1.0) - - if dtype is not None and dtype != tf.float32: - fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) - bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype) - - if clamp_len > 0: - fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len) - bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -clamp_len, clamp_len) - - if bsz is not None: - # With bi_data, the batch size should be divisible by 2. - assert bsz%2 == 0 - fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz//2) - bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq, bsz//2) - else: - fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq) - bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq) - - pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1) - else: - fwd_pos_seq = tf.range(beg, end, -1.0) - if dtype is not None and dtype != tf.float32: - fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) - if clamp_len > 0: - fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len) - pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz) - - return pos_emb - - -def multihead_attn(q, k, v, attn_mask, d_model, n_head, d_head, dropout, - dropatt, is_training, kernel_initializer, residual=True, - scope='abs_attn', reuse=None): - """Standard multi-head attention with absolute positional embedding.""" - - scale = 1 / (d_head ** 0.5) - with tf.variable_scope(scope, reuse=reuse): - # attention heads - q_head = head_projection( - q, d_model, n_head, d_head, kernel_initializer, 'q') - k_head = head_projection( - k, d_model, n_head, d_head, kernel_initializer, 'k') - v_head = head_projection( - v, d_model, n_head, d_head, kernel_initializer, 'v') - - # attention vector - attn_vec = abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, - is_training, scale) - - # post processing - output = post_attention(v, attn_vec, d_model, n_head, d_head, dropout, - is_training, kernel_initializer, residual) - - return output - - - -def rel_multihead_attn(h, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, - attn_mask, mems, d_model, n_head, d_head, dropout, - dropatt, is_training, kernel_initializer, - scope='rel_attn', reuse=None): - """Multi-head attention with relative positional encoding.""" - - scale = 1 / (d_head ** 0.5) - with tf.variable_scope(scope, reuse=reuse): - if mems is not None and mems.shape.ndims > 1: - cat = tf.concat([mems, h], 0) - else: - cat = h - - # content heads - q_head_h = head_projection( - h, d_model, n_head, d_head, kernel_initializer, 'q') - k_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'k') - v_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'v') - - # positional heads - k_head_r = head_projection( - r, d_model, n_head, d_head, kernel_initializer, 'r') - - # core attention ops - attn_vec = rel_attn_core( - q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask, dropatt, is_training, scale) - - # post processing - output = post_attention(h, attn_vec, d_model, n_head, d_head, dropout, - is_training, kernel_initializer) - - return output - - -def two_stream_rel_attn(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias, - seg_embed, attn_mask_h, attn_mask_g, target_mapping, - d_model, n_head, d_head, dropout, dropatt, is_training, - kernel_initializer, scope='rel_attn'): - """Two-stream attention with relative positional encoding.""" - - scale = 1 / (d_head ** 0.5) - with tf.variable_scope(scope, reuse=False): - - # content based attention score - if mems is not None and mems.shape.ndims > 1: - cat = tf.concat([mems, h], 0) - else: - cat = h - - # content-based key head - k_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'k') - - # content-based value head - v_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'v') - - # position-based key head - k_head_r = head_projection( - r, d_model, n_head, d_head, kernel_initializer, 'r') - - ##### h-stream - # content-stream query head - q_head_h = head_projection( - h, d_model, n_head, d_head, kernel_initializer, 'q') - - # core attention ops - attn_vec_h = rel_attn_core( - q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask_h, dropatt, is_training, scale) - - # post processing - output_h = post_attention(h, attn_vec_h, d_model, n_head, d_head, dropout, - is_training, kernel_initializer) - - with tf.variable_scope(scope, reuse=True): - ##### g-stream - # query-stream query head - q_head_g = head_projection( - g, d_model, n_head, d_head, kernel_initializer, 'q') - - # core attention ops - if target_mapping is not None: - q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping) - attn_vec_g = rel_attn_core( - q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale) - attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping) - else: - attn_vec_g = rel_attn_core( - q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale) - - # post processing - output_g = post_attention(g, attn_vec_g, d_model, n_head, d_head, dropout, - is_training, kernel_initializer) - - return output_h, output_g - - -def transformer_xl(inp_k, n_token, n_layer, d_model, n_head, - d_head, d_inner, dropout, dropatt, attn_type, - bi_data, initializer, is_training, mem_len=None, - inp_q=None, mems=None, - same_length=False, clamp_len=-1, untie_r=False, - use_tpu=True, input_mask=None, - perm_mask=None, seg_id=None, reuse_len=None, - ff_activation='relu', target_mapping=None, - use_bfloat16=False, scope='transformer', **kwargs): - """ - Defines a Transformer-XL computation graph with additional - support for XLNet. - - Args: - - inp_k: int32 Tensor in shape [len, bsz], the input token IDs. - seg_id: int32 Tensor in shape [len, bsz], the input segment IDs. - input_mask: float32 Tensor in shape [len, bsz], the input mask. - 0 for real tokens and 1 for padding. - mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory - from previous batches. The length of the list equals n_layer. - If None, no memory is used. - perm_mask: float32 Tensor in shape [len, len, bsz]. - If perm_mask[i, j, k] = 0, i attend to j in batch k; - if perm_mask[i, j, k] = 1, i does not attend to j in batch k. - If None, each position attends to all the others. - target_mapping: float32 Tensor in shape [num_predict, len, bsz]. - If target_mapping[i, j, k] = 1, the i-th predict in batch k is - on the j-th token. - Only used during pretraining for partial prediction. - Set to None during finetuning. - inp_q: float32 Tensor in shape [len, bsz]. - 1 for tokens with losses and 0 for tokens without losses. - Only used during pretraining for two-stream attention. - Set to None during finetuning. - - n_layer: int, the number of layers. - d_model: int, the hidden size. - n_head: int, the number of attention heads. - d_head: int, the dimension size of each attention head. - d_inner: int, the hidden size in feed-forward layers. - ff_activation: str, "relu" or "gelu". - untie_r: bool, whether to untie the biases in attention. - n_token: int, the vocab size. - - is_training: bool, whether in training mode. - use_tpu: bool, whether TPUs are used. - use_bfloat16: bool, use bfloat16 instead of float32. - dropout: float, dropout rate. - dropatt: float, dropout rate on attention probabilities. - init: str, the initialization scheme, either "normal" or "uniform". - init_range: float, initialize the parameters with a uniform distribution - in [-init_range, init_range]. Only effective when init="uniform". - init_std: float, initialize the parameters with a normal distribution - with mean 0 and stddev init_std. Only effective when init="normal". - mem_len: int, the number of tokens to cache. - reuse_len: int, the number of tokens in the currect batch to be cached - and reused in the future. - bi_data: bool, whether to use bidirectional input pipeline. - Usually set to True during pretraining and False during finetuning. - clamp_len: int, clamp all relative distances larger than clamp_len. - -1 means no clamping. - same_length: bool, whether to use the same attention length for each token. - summary_type: str, "last", "first", "mean", or "attn". The method - to pool the input to get a vector representation. - initializer: A tf initializer. - scope: scope name for the computation graph. - """ - tf.logging.info('memory input {}'.format(mems)) - tf_float = tf.bfloat16 if use_bfloat16 else tf.float32 - tf.logging.info('Use float type {}'.format(tf_float)) - - new_mems = [] - with tf.variable_scope(scope): - if untie_r: - r_w_bias = tf.get_variable('r_w_bias', [n_layer, n_head, d_head], - dtype=tf_float, initializer=initializer) - r_r_bias = tf.get_variable('r_r_bias', [n_layer, n_head, d_head], - dtype=tf_float, initializer=initializer) - else: - r_w_bias = tf.get_variable('r_w_bias', [n_head, d_head], - dtype=tf_float, initializer=initializer) - r_r_bias = tf.get_variable('r_r_bias', [n_head, d_head], - dtype=tf_float, initializer=initializer) - - bsz = tf.shape(inp_k)[1] - qlen = tf.shape(inp_k)[0] - mlen = tf.shape(mems[0])[0] if mems is not None else 0 - klen = mlen + qlen - - ##### Attention mask - # causal attention mask - if attn_type == 'uni': - attn_mask = _create_mask(qlen, mlen, tf_float, same_length) - attn_mask = attn_mask[:, :, None, None] - elif attn_type == 'bi': - attn_mask = None - else: - raise ValueError('Unsupported attention type: {}'.format(attn_type)) - - # data mask: input mask & perm mask - if input_mask is not None and perm_mask is not None: - data_mask = input_mask[None] + perm_mask - elif input_mask is not None and perm_mask is None: - data_mask = input_mask[None] - elif input_mask is None and perm_mask is not None: - data_mask = perm_mask - else: - data_mask = None - - if data_mask is not None: - # all mems can be attended to - mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz], - dtype=tf_float) - data_mask = tf.concat([mems_mask, data_mask], 1) - if attn_mask is None: - attn_mask = data_mask[:, :, :, None] - else: - attn_mask += data_mask[:, :, :, None] - - if attn_mask is not None: - attn_mask = tf.cast(attn_mask > 0, dtype=tf_float) # change attn_mask into float type - - if attn_mask is not None: - non_tgt_mask = -tf.eye(qlen, dtype=tf_float) - non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=tf_float), - non_tgt_mask], axis=-1) - non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, - dtype=tf_float) - else: - non_tgt_mask = None - - ##### Word embedding - word_emb_k, lookup_table = embedding_lookup( - x=inp_k, - n_token=n_token, - d_embed=d_model, - initializer=initializer, - use_tpu=use_tpu, - dtype=tf_float, - scope='word_embedding') - - if inp_q is not None: - with tf.variable_scope('mask_emb'): - mask_emb = tf.get_variable('mask_emb', [1, 1, d_model], dtype=tf_float) - if target_mapping is not None: - word_emb_q = tf.tile(mask_emb, [tf.shape(target_mapping)[0], bsz, 1]) - else: - inp_q_ext = inp_q[:, :, None] - word_emb_q = inp_q_ext * mask_emb + (1 - inp_q_ext) * word_emb_k - output_h = tf.layers.dropout(word_emb_k, dropout, training=is_training) - if inp_q is not None: - output_g = tf.layers.dropout(word_emb_q, dropout, training=is_training) - - ##### Segment embedding - if seg_id is not None: - if untie_r: - r_s_bias = tf.get_variable('r_s_bias', [n_layer, n_head, d_head], - dtype=tf_float, initializer=initializer) - else: - # default case (tie) - r_s_bias = tf.get_variable('r_s_bias', [n_head, d_head], - dtype=tf_float, initializer=initializer) - - seg_embed = tf.get_variable('seg_embed', [n_layer, 2, n_head, d_head], - dtype=tf_float, initializer=initializer) - - # Convert `seg_id` to one-hot `seg_mat` - mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32) - cat_ids = tf.concat([mem_pad, seg_id], 0) - - # `1` indicates not in the same segment [qlen x klen x bsz] - seg_mat = tf.cast( - tf.logical_not(tf.equal(seg_id[:, None], cat_ids[None, :])), - tf.int32) - seg_mat = tf.one_hot(seg_mat, 2, dtype=tf_float) - else: - seg_mat = None - - ##### Positional encoding - pos_emb = relative_positional_encoding( - qlen, klen, d_model, clamp_len, attn_type, bi_data, - bsz=bsz, dtype=tf_float) - pos_emb = tf.layers.dropout(pos_emb, dropout, training=is_training) - - ##### Attention layers - if mems is None: - mems = [None] * n_layer - - for i in range(n_layer): - # cache new mems - new_mems.append(_cache_mem(output_h, mems[i], mem_len, reuse_len)) - - # segment bias - if seg_id is None: - r_s_bias_i = None - seg_embed_i = None - else: - r_s_bias_i = r_s_bias if not untie_r else r_s_bias[i] - seg_embed_i = seg_embed[i] - - with tf.variable_scope('layer_{}'.format(i)): - if inp_q is not None: - output_h, output_g = two_stream_rel_attn( - h=output_h, - g=output_g, - r=pos_emb, - r_w_bias=r_w_bias if not untie_r else r_w_bias[i], - r_r_bias=r_r_bias if not untie_r else r_r_bias[i], - seg_mat=seg_mat, - r_s_bias=r_s_bias_i, - seg_embed=seg_embed_i, - attn_mask_h=non_tgt_mask, - attn_mask_g=attn_mask, - mems=mems[i], - target_mapping=target_mapping, - d_model=d_model, - n_head=n_head, - d_head=d_head, - dropout=dropout, - dropatt=dropatt, - is_training=is_training, - kernel_initializer=initializer) - reuse = True - else: - reuse = False - - output_h = rel_multihead_attn( - h=output_h, - r=pos_emb, - r_w_bias=r_w_bias if not untie_r else r_w_bias[i], - r_r_bias=r_r_bias if not untie_r else r_r_bias[i], - seg_mat=seg_mat, - r_s_bias=r_s_bias_i, - seg_embed=seg_embed_i, - attn_mask=non_tgt_mask, - mems=mems[i], - d_model=d_model, - n_head=n_head, - d_head=d_head, - dropout=dropout, - dropatt=dropatt, - is_training=is_training, - kernel_initializer=initializer, - reuse=reuse) - - if inp_q is not None: - output_g = positionwise_ffn( - inp=output_g, - d_model=d_model, - d_inner=d_inner, - dropout=dropout, - kernel_initializer=initializer, - activation_type=ff_activation, - is_training=is_training) - - output_h = positionwise_ffn( - inp=output_h, - d_model=d_model, - d_inner=d_inner, - dropout=dropout, - kernel_initializer=initializer, - activation_type=ff_activation, - is_training=is_training, - reuse=reuse) - - if inp_q is not None: - output = tf.layers.dropout(output_g, dropout, training=is_training) - else: - output = tf.layers.dropout(output_h, dropout, training=is_training) - - return output, new_mems, lookup_table - - -def lm_loss(hidden, target, n_token, d_model, initializer, lookup_table=None, - tie_weight=False, bi_data=True, use_tpu=False): - """doc.""" - - with tf.variable_scope('lm_loss'): - if tie_weight: - assert lookup_table is not None, \ - 'lookup_table cannot be None for tie_weight' - softmax_w = lookup_table - else: - softmax_w = tf.get_variable('weight', [n_token, d_model], - dtype=hidden.dtype, initializer=initializer) - - softmax_b = tf.get_variable('bias', [n_token], dtype=hidden.dtype, - initializer=tf.zeros_initializer()) - - logits = tf.einsum('ibd,nd->ibn', hidden, softmax_w) + softmax_b - - if use_tpu: - one_hot_target = tf.one_hot(target, n_token, dtype=logits.dtype) - loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) - else: - loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, - logits=logits) - - return loss - - -def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout, - dropatt, input_mask, is_training, initializer, - scope=None, reuse=None, use_proj=True): - - """ - Different classification tasks may not may not share the same parameters - to summarize the sequence features. - - If shared, one can keep the `scope` to the default value `None`. - Otherwise, one should specify a different `scope` for each task. - """ - - with tf.variable_scope(scope, 'sequnece_summary', reuse=reuse): - if summary_type == 'last': - summary = hidden[-1] - elif summary_type == 'first': - summary = hidden[0] - elif summary_type == 'mean': - summary = tf.reduce_mean(hidden, axis=0) - elif summary_type == 'attn': - bsz = tf.shape(hidden)[1] - - summary_bias = tf.get_variable('summary_bias', [d_model], - dtype=hidden.dtype, - initializer=initializer) - summary_bias = tf.tile(summary_bias[None, None], [1, bsz, 1]) - - if input_mask is not None: - input_mask = input_mask[None, :, :, None] - - summary = multihead_attn(summary_bias, hidden, hidden, input_mask, - d_model, n_head, d_head, dropout, dropatt, - is_training, initializer, residual=False) - summary = summary[0] - else: - raise ValueError('Unsupported summary type {}'.format(summary_type)) - - # use another projection as in BERT - if use_proj: - summary = tf.layers.dense( - summary, - d_model, - activation=tf.tanh, - kernel_initializer=initializer, - name='summary') - - # dropout - summary = tf.layers.dropout( - summary, dropout, training=is_training, - name='dropout') - - return summary - - -def classification_loss(hidden, labels, n_class, initializer, scope, reuse=None, - return_logits=False): - """ - Different classification tasks should use different scope names to ensure - different dense layers (parameters) are used to produce the logits. - - An exception will be in transfer learning, where one hopes to transfer - the classification weights. - """ - - with tf.variable_scope(scope, reuse=reuse): - logits = tf.layers.dense( - hidden, - n_class, - kernel_initializer=initializer, - name='logit') - - one_hot_target = tf.one_hot(labels, n_class, dtype=hidden.dtype) - loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) - - if return_logits: - return loss, logits - - return loss - - -def regression_loss(hidden, labels, initializer, scope, reuse=None, - return_logits=False): - with tf.variable_scope(scope, reuse=reuse): - logits = tf.layers.dense( - hidden, - 1, - kernel_initializer=initializer, - name='logit') - - logits = tf.squeeze(logits, axis=-1) - loss = tf.square(logits - labels) - - if return_logits: - return loss, logits - - return loss - diff --git a/build/lib/caire-covid/mrqa/multiqa_utils.py b/build/lib/caire-covid/mrqa/multiqa_utils.py deleted file mode 100644 index 09b8f93..0000000 --- a/build/lib/caire-covid/mrqa/multiqa_utils.py +++ /dev/null @@ -1,111 +0,0 @@ -import argparse -import collections -import json -import numpy as np -import os -import re -import string -import sys - -OPTS = None - -def make_qid_to_has_ans(dataset): - qid_to_has_ans = {} - for entry in dataset: - qid_to_has_ans[entry['qid']] = bool(entry['answers']) - return qid_to_has_ans - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - def remove_articles(text): - regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) - return re.sub(regex, ' ', text) - def white_space_fix(text): - return ' '.join(text.split()) - def remove_punc(text): - exclude = set(string.punctuation) - return ''.join(ch for ch in text if ch not in exclude) - def lower(text): - return text.lower() - return white_space_fix(remove_articles(remove_punc(lower(s)))) - -def get_tokens(s): - if not s: return [] - return normalize_answer(s).split() - -def compute_exact(a_gold, a_pred): - return int(normalize_answer(a_gold) == normalize_answer(a_pred)) - -def compute_f1(a_gold, a_pred): - gold_toks = get_tokens(a_gold) - pred_toks = get_tokens(a_pred) - common = collections.Counter(gold_toks) & collections.Counter(pred_toks) - num_same = sum(common.values()) - if len(gold_toks) == 0 or len(pred_toks) == 0: - # If either is no-answer, then F1 is 1 if they agree, 0 otherwise - return int(gold_toks == pred_toks) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(pred_toks) - recall = 1.0 * num_same / len(gold_toks) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - -def get_raw_scores(dataset, preds): - exact_scores = {} - f1_scores = {} - - for qa in dataset: - qid = qa['qid'] - gold_answers = [a['text'] for a in qa['detected_answers'] - if normalize_answer(a['text'])] - if not gold_answers: - # For unanswerable questions, only correct answer is empty string - gold_answers = [''] - if qid not in preds: - print('Missing prediction for %s' % qid) - continue - a_pred = preds[qid] - # Take max over all gold answers - exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) - f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) - return exact_scores, f1_scores - -def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): - num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) - cur_score = num_no_ans - best_score = cur_score - best_thresh = 0.0 - qid_list = sorted(na_probs, key=lambda k: na_probs[k]) - for i, qid in enumerate(qid_list): - if qid not in scores: continue - if qid_to_has_ans[qid]: - diff = scores[qid] - else: - if preds[qid]: - diff = -1 - else: - diff = 0 - cur_score += diff - if cur_score > best_score: - best_score = cur_score - best_thresh = na_probs[qid] - - has_ans_score, has_ans_cnt = 0, 0 - for qid in qid_list: - if not qid_to_has_ans[qid]: continue - has_ans_cnt += 1 - - if qid not in scores: continue - has_ans_score += scores[qid] - return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt - -def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): - best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) - best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) - main_eval['best_exact'] = best_exact - main_eval['best_exact_thresh'] = exact_thresh - main_eval['best_f1'] = best_f1 - main_eval['best_f1_thresh'] = f1_thresh - main_eval['has_ans_exact'] = has_ans_exact - main_eval['has_ans_f1'] = has_ans_f1 \ No newline at end of file diff --git a/build/lib/caire-covid/mrqa/predictor_kaggle.py b/build/lib/caire-covid/mrqa/predictor_kaggle.py deleted file mode 100644 index 0bca014..0000000 --- a/build/lib/caire-covid/mrqa/predictor_kaggle.py +++ /dev/null @@ -1,923 +0,0 @@ -# coding=utf-8 -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl import flags -import absl.logging as _logging # pylint: disable=unused-import - -import collections -import os -import time -import math -import json -import six -import random -import gc -import sys -import pprint - -import numpy as np - -if six.PY2: - import cPickle as pickle -else: - import pickle -import jsonlines - -import tensorflow as tf -import sentencepiece as spm -from mrqa.prepro_utils import preprocess_text, encode_ids, encode_pieces, printable_text -import mrqa.function_builder as function_builder -import mrqa.model_utils as model_utils -import mrqa.multiqa_utils -from mrqa.data_utils import SEP_ID, CLS_ID, VOCAB_SIZE - -SPIECE_UNDERLINE = u'▁' - -SEG_ID_P = 0 -SEG_ID_Q = 1 -SEG_ID_CLS = 2 -SEG_ID_PAD = 3 - - -class MultiqaExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - paragraph_text, - orig_answer_text=None, - start_position=None, - send_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.paragraph_text = paragraph_text - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.send_position = send_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (printable_text(self.qas_id)) - s += ", question_text: %s" % ( - printable_text(self.question_text)) - s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - if self.start_position: - s += ", send_position: %d" % (self.send_position) - return s - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tok_start_to_orig_index, - tok_end_to_orig_index, - token_is_max_context, - input_ids, - input_mask, - p_mask, - segment_ids, - paragraph_len, - cls_index, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tok_start_to_orig_index = tok_start_to_orig_index # paragraph - self.tok_end_to_orig_index = tok_end_to_orig_index # paragraph - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids # context+question - self.input_mask = input_mask - self.p_mask = p_mask - self.segment_ids = segment_ids - self.paragraph_len = paragraph_len - self.cls_index = cls_index - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - -def read_kaggle_data(input_file, is_training): - """Read a QA data jsonl file into a list of Examples.""" - with tf.gfile.Open(input_file, "r") as reader: - data = json.load(reader) - - input_data = data['data'] - examples = [] - for item in input_data: - paragraphs = item['paragraphs'] - for entry in paragraphs: - assert type(entry) == dict - assert u'context' in entry - assert u'qas' in entry - - paragraph_text = entry["context"] - - for qa in entry["qas"]: - assert u'id' in qa - assert u'question' in qa - - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - send_position = None - orig_answer_text = None - is_impossible = False - - example = MultiqaExample( - qas_id=qas_id, - question_text=question_text, - paragraph_text=paragraph_text, - orig_answer_text=orig_answer_text, - start_position=start_position, - send_position=send_position, - is_impossible=is_impossible) - examples.append(example) - return examples - -def arrange_kaggle_data(input_data, is_training): - """Read a QA data jsonl file into a list of Examples.""" - examples = [] - for entry in input_data: - - assert type(entry) == dict - assert u'context' in entry - assert u'qas' in entry - - paragraph_text = entry["context"] - - for qa in entry["qas"]: - assert u'id' in qa - assert u'question' in qa - - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - send_position = None - orig_answer_text = None - is_impossible = False - - example = MultiqaExample( - qas_id=qas_id, - question_text=question_text, - paragraph_text=paragraph_text, - orig_answer_text=orig_answer_text, - start_position=start_position, - send_position=send_position, - is_impossible=is_impossible) - examples.append(example) - return examples - -def _convert_index(index, pos, M=None, is_start=True): - if index[pos] is not None: - return index[pos] - N = len(index) - rear = pos - while rear < N - 1 and index[rear] is None: - rear += 1 - front = pos - while front > 0 and index[front] is None: - front -= 1 - assert index[front] is not None or index[rear] is not None - if index[front] is None: - if index[rear] >= 1: - if is_start: - return 0 - else: - return index[rear] - 1 - return index[rear] - if index[rear] is None: - if M is not None and index[front] < M - 1: - if is_start: - return index[front] + 1 - else: - return M - 1 - return index[front] - if is_start: - if index[rear] > index[front] + 1: - return index[front] + 1 - else: - return index[rear] - else: - if index[rear] > index[front] + 1: - return index[rear] - 1 - else: - return index[front] - -def convert_examples_to_features(examples, sp_model, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn, FLAGS): - """Loads a data file into a list of `InputBatch`s.""" - - cnt_pos, cnt_neg = 0, 0 - unique_id = 1000000000 - max_N, max_M = 1024, 1024 - f = np.zeros((max_N, max_M), dtype=np.float32) - - for (example_index, example) in enumerate(examples): - - if example_index % 100 == 0: - tf.logging.info('Converting {}/{} pos {} neg {}'.format( - example_index, len(examples), cnt_pos, cnt_neg)) - - query_tokens = encode_ids( - sp_model, - preprocess_text(example.question_text, lower=FLAGS.uncased)) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - paragraph_text = example.paragraph_text - para_tokens = encode_pieces( - sp_model, - preprocess_text(example.paragraph_text, lower=FLAGS.uncased)) - - chartok_to_tok_index = [] - tok_start_to_chartok_index = [] - tok_end_to_chartok_index = [] - char_cnt = 0 - for i, token in enumerate(para_tokens): - chartok_to_tok_index.extend([i] * len(token)) - tok_start_to_chartok_index.append(char_cnt) - char_cnt += len(token) - tok_end_to_chartok_index.append(char_cnt - 1) - - tok_cat_text = ''.join(para_tokens).replace(SPIECE_UNDERLINE, ' ') - N, M = len(paragraph_text), len(tok_cat_text) - - if N > max_N or M > max_M: - max_N = max(N, max_N) - max_M = max(M, max_M) - f = np.zeros((max_N, max_M), dtype=np.float32) - gc.collect() - - g = {} - - def _lcs_match(max_dist): - f.fill(0) - g.clear() - - ### longest common sub sequence - # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) - for i in range(N): - - # note(zhiliny): - # unlike standard LCS, this is specifically optimized for the setting - # because the mismatch between sentence pieces and original text will - # be small - for j in range(i - max_dist, i + max_dist): - if j >= M or j < 0: continue - - if i > 0: - g[(i, j)] = 0 - f[i, j] = f[i - 1, j] - - if j > 0 and f[i, j - 1] > f[i, j]: - g[(i, j)] = 1 - f[i, j] = f[i, j - 1] - - f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0 - if (preprocess_text(paragraph_text[i], lower=FLAGS.uncased, - remove_space=False) - == tok_cat_text[j] - and f_prev + 1 > f[i, j]): - g[(i, j)] = 2 - f[i, j] = f_prev + 1 - - max_dist = abs(N - M) + 5 - for _ in range(2): - _lcs_match(max_dist) - if f[N - 1, M - 1] > 0.8 * N: break - max_dist *= 2 - - orig_to_chartok_index = [None] * N - chartok_to_orig_index = [None] * M - i, j = N - 1, M - 1 - while i >= 0 and j >= 0: - if (i, j) not in g: break - if g[(i, j)] == 2: - orig_to_chartok_index[i] = j - chartok_to_orig_index[j] = i - i, j = i - 1, j - 1 - elif g[(i, j)] == 1: - j = j - 1 - else: - i = i - 1 - - if all(v is None for v in orig_to_chartok_index) or f[N - 1, M - 1] < 0.8 * N: - print('MISMATCH DETECTED!') - continue - - tok_start_to_orig_index = [] - tok_end_to_orig_index = [] - for i in range(len(para_tokens)): - start_chartok_pos = tok_start_to_chartok_index[i] - end_chartok_pos = tok_end_to_chartok_index[i] - start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos, - N, is_start=True) - end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos, - N, is_start=False) - - tok_start_to_orig_index.append(start_orig_pos) - tok_end_to_orig_index.append(end_orig_pos) - - if not is_training: - tok_start_position = tok_end_position = None - - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - - if is_training and not example.is_impossible: - start_position = example.start_position - # end_position = start_position + len(example.orig_answer_text) - 1 - end_position = example.send_position - - start_chartok_pos = _convert_index(orig_to_chartok_index, start_position, - is_start=True) - tok_start_position = chartok_to_tok_index[start_chartok_pos] - - end_chartok_pos = _convert_index(orig_to_chartok_index, end_position, - is_start=False) - tok_end_position = chartok_to_tok_index[end_chartok_pos] - assert tok_start_position <= tok_end_position - - def _piece_to_id(x): - if six.PY2 and isinstance(x, unicode): - x = x.encode('utf-8') - return sp_model.PieceToId(x) - - all_doc_tokens = list(map(_piece_to_id, para_tokens)) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_is_max_context = {} - segment_ids = [] - p_mask = [] - - cur_tok_start_to_orig_index = [] - cur_tok_end_to_orig_index = [] - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - - cur_tok_start_to_orig_index.append( - tok_start_to_orig_index[split_token_index]) - cur_tok_end_to_orig_index.append( - tok_end_to_orig_index[split_token_index]) - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(SEG_ID_P) - p_mask.append(0) - - paragraph_len = len(tokens) - - tokens.append(SEP_ID) - segment_ids.append(SEG_ID_P) - p_mask.append(1) - - # note(zhiliny): we put P before Q - # because during pretraining, B is always shorter than A - for token in query_tokens: - tokens.append(token) - segment_ids.append(SEG_ID_Q) - p_mask.append(1) - tokens.append(SEP_ID) - segment_ids.append(SEG_ID_Q) - p_mask.append(1) - - cls_index = len(segment_ids) - tokens.append(CLS_ID) - segment_ids.append(SEG_ID_CLS) - p_mask.append(0) - - input_ids = tokens - - # The mask has 0 for real tokens and 1 for padding tokens. Only real - # tokens are attended to. - input_mask = [0] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(1) - segment_ids.append(SEG_ID_PAD) - p_mask.append(1) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - assert len(p_mask) == max_seq_length - - span_is_impossible = example.is_impossible - start_position = None - end_position = None - if is_training and not span_is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - # print("out of span") - # print("{}|{}|{}|{}".format(doc_start,tok_start_position,tok_end_position,doc_end)) - out_of_span = True - if out_of_span: - # continue - start_position = 0 - end_position = 0 - span_is_impossible = True - else: - # note(zhiliny): we put P before Q, so doc_offset should be zero. - # doc_offset = len(query_tokens) + 2 - doc_offset = 0 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and span_is_impossible: - start_position = cls_index - end_position = cls_index - - # note(zhiliny): With multi processing, - # the example_index is actually the index within the current process - # therefore we use example_index=None to avoid being used in the future. - # The current code does not use example_index of training data. - if is_training: - feat_example_index = None - else: - feat_example_index = example_index - - feature = InputFeatures( - unique_id=unique_id, - example_index=feat_example_index, - doc_span_index=doc_span_index, - tok_start_to_orig_index=cur_tok_start_to_orig_index, - tok_end_to_orig_index=cur_tok_end_to_orig_index, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - p_mask=p_mask, - segment_ids=segment_ids, - paragraph_len=paragraph_len, - cls_index=cls_index, - start_position=start_position, - end_position=end_position, - is_impossible=span_is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - if span_is_impossible: - cnt_neg += 1 - else: - cnt_pos += 1 - - tf.logging.info("Total number of instances: {} = pos {} neg {}".format( - cnt_pos + cnt_neg, cnt_pos, cnt_neg)) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, is_training): - self.is_training = is_training - self.num_features = 0 - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - def create_float_feature(values): - f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) - return f - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_float_feature(feature.input_mask) - features["p_mask"] = create_float_feature(feature.p_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - features["cls_index"] = create_int_feature([feature.cls_index]) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_float_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - return tf_example.SerializeToString() - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_top_log_probs", "start_top_index", - "end_top_log_probs", "end_top_index", "cls_logits"]) - -_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", - "start_log_prob", "end_log_prob"]) - -_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) - -def get_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, FLAGS): - """Write final predictions to the json file and log-odds of null if needed.""" - tf.logging.info("Getting predictions") - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - all_predictions = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - - cur_null_score = result.cls_logits - - # if we could have irrelevant answers, get the min score of irrelevant - score_null = min(score_null, cur_null_score) - - for i in range(FLAGS.start_n_top): - for j in range(FLAGS.end_n_top): - start_log_prob = result.start_top_log_probs[i] - start_index = result.start_top_index[i] - - j_index = i * FLAGS.end_n_top + j - - end_log_prob = result.end_top_log_probs[j_index] - end_index = result.end_top_index[j_index] - - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= feature.paragraph_len - 1: - continue - if end_index >= feature.paragraph_len - 1: - continue - - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_log_prob=start_log_prob, - end_log_prob=end_log_prob)) - - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_log_prob + x.end_log_prob), - reverse=True) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - - tok_start_to_orig_index = feature.tok_start_to_orig_index - tok_end_to_orig_index = feature.tok_end_to_orig_index - start_orig_pos = tok_start_to_orig_index[pred.start_index] - end_orig_pos = tok_end_to_orig_index[pred.end_index] - - paragraph_text = example.paragraph_text - final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() - - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_log_prob=pred.start_log_prob, - end_log_prob=pred.end_log_prob)) - - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="", start_log_prob=-1e6, - end_log_prob=-1e6)) - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_log_prob + entry.end_log_prob) - if not best_non_null_entry: - best_non_null_entry = entry - - assert best_non_null_entry is not None - - all_predictions[example.qas_id] = best_non_null_entry.text - - return all_predictions - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - -def input_fn_builder(input_glob, seq_length, is_training, drop_remainder, - num_hosts, num_threads=8): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.float32), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - "cls_index": tf.FixedLenFeature([], tf.int64), - "p_mask": tf.FixedLenFeature([seq_length], tf.float32) - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["is_impossible"] = tf.FixedLenFeature([], tf.float32) - - tf.logging.info("Input tfrecord file glob {}".format(input_glob)) - global_input_paths = tf.gfile.Glob(input_glob) - tf.logging.info("Find {} input paths {}".format( - len(global_input_paths), global_input_paths)) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.cast(t, tf.int32) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - if FLAGS.use_tpu: - batch_size = params["batch_size"] - elif is_training: - batch_size = FLAGS.train_batch_size - else: - batch_size = FLAGS.predict_batch_size - - # Split tfrecords across hosts - if num_hosts > 1: - host_id = params["context"].current_host - num_files = len(global_input_paths) - if num_files >= num_hosts: - num_files_per_host = (num_files + num_hosts - 1) // num_hosts - my_start_file_id = host_id * num_files_per_host - my_end_file_id = min((host_id + 1) * num_files_per_host, num_files) - input_paths = global_input_paths[my_start_file_id: my_end_file_id] - tf.logging.info("Host {} handles {} files".format(host_id, - len(input_paths))) - else: - input_paths = global_input_paths - - if len(input_paths) == 1: - d = tf.data.TFRecordDataset(input_paths[0]) - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - if is_training: - d = d.shuffle(buffer_size=FLAGS.shuffle_buffer) - d = d.repeat() - else: - d = tf.data.Dataset.from_tensor_slices(input_paths) - # file level shuffle - d = d.shuffle(len(input_paths)).repeat() - - # `cycle_length` is the number of parallel files that get read. - cycle_length = min(num_threads, len(input_paths)) - - d = d.apply( - tf.contrib.data.parallel_interleave( - tf.data.TFRecordDataset, - sloppy=is_training, - cycle_length=cycle_length)) - - if is_training: - # sample level shuffle - d = d.shuffle(buffer_size=FLAGS.shuffle_buffer) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - num_parallel_batches=num_threads, - drop_remainder=drop_remainder)) - d = d.prefetch(1024) - - return d - - return input_fn - - -def mrqa_predictor(FLAGS, predict_fn, data): - """ - Get prediction with the data got fron mrqa official request. - """ - tf.logging.set_verbosity(tf.logging.INFO) - - sp_model = spm.SentencePieceProcessor() - sp_model.Load(FLAGS.spiece_model_file) - - tf.logging.info("Got Data from IR system...") - eval_data = arrange_kaggle_data(data, is_training=False) - - eval_writer = FeatureWriter(is_training=False) - eval_features = [] - eval_features_inp = [] - - def append_feature(feature): - eval_features.append(feature) - eval_features_inp.append(eval_writer.process_feature(feature)) - - convert_examples_to_features( - examples=eval_data, - sp_model=sp_model, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=False, - output_fn=append_feature, - FLAGS=FLAGS) - - # predict_fn = tf.contrib.predictor.from_saved_model(FLAGS.export_dir_base) - - cur_results = [] - - for num, eval_feature in enumerate(eval_features_inp): - result = predict_fn({"examples":[eval_feature]}) - - if len(cur_results) % 1000 == 0: - tf.logging.info("Processing example: %d" % (len(cur_results))) - - unique_id = int(result["unique_ids"]) - start_top_log_probs = ( - [float(x) for x in result["start_top_log_probs"].flat]) - start_top_index = [int(x) for x in result["start_top_index"].flat] - end_top_log_probs = ( - [float(x) for x in result["end_top_log_probs"].flat]) - end_top_index = [int(x) for x in result["end_top_index"].flat] - - cls_logits = float(result["cls_logits"].flat[0]) - - cur_results.append( - RawResult( - unique_id=unique_id, - start_top_log_probs=start_top_log_probs, - start_top_index=start_top_index, - end_top_log_probs=end_top_log_probs, - end_top_index=end_top_index, - cls_logits=cls_logits)) - - ret = get_predictions(eval_data, eval_features, cur_results, - FLAGS.n_best_size, FLAGS.max_answer_length, - FLAGS) - return dict(ret) - -if __name__ == "__main__": - pp = pprint.PrettyPrinter(indent=4) diff --git a/build/lib/caire-covid/mrqa/prepro_utils.py b/build/lib/caire-covid/mrqa/prepro_utils.py deleted file mode 100644 index 1d8ac83..0000000 --- a/build/lib/caire-covid/mrqa/prepro_utils.py +++ /dev/null @@ -1,138 +0,0 @@ -# coding=utf-8 -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import unicodedata -import six -from functools import partial - - -SPIECE_UNDERLINE = '▁' - - -def printable_text(text): - """Returns text encoded in a way suitable for print or `tf.logging`.""" - - # These functions want `str` for both Python2 and Python3, but in one case - # it's a Unicode string and in the other it's a byte string. - if six.PY3: - if isinstance(text, str): - return text - elif isinstance(text, bytes): - return text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - elif six.PY2: - if isinstance(text, str): - return text - elif isinstance(text, unicode): - return text.encode("utf-8") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - else: - raise ValueError("Not running on Python2 or Python 3?") - - -def print_(*args): - new_args = [] - for arg in args: - if isinstance(arg, list): - s = [printable_text(i) for i in arg] - s = ' '.join(s) - new_args.append(s) - else: - new_args.append(printable_text(arg)) - print(*new_args) - - -def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False): - if remove_space: - outputs = ' '.join(inputs.strip().split()) - else: - outputs = inputs - outputs = outputs.replace("``", '"').replace("''", '"') - - if six.PY2 and isinstance(outputs, str): - outputs = outputs.decode('utf-8') - - if not keep_accents: - outputs = unicodedata.normalize('NFKD', outputs) - outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) - if lower: - outputs = outputs.lower() - - return outputs - - -def encode_pieces(sp_model, text, return_unicode=True, sample=False): - # return_unicode is used only for py2 - - # note(zhiliny): in some systems, sentencepiece only accepts str for py2 - if six.PY2 and isinstance(text, unicode): - text = text.encode('utf-8') - - if not sample: - pieces = sp_model.EncodeAsPieces(text) - else: - pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) - new_pieces = [] - for piece in pieces: - if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit(): - cur_pieces = sp_model.EncodeAsPieces( - piece[:-1].replace(SPIECE_UNDERLINE, '')) - if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: - if len(cur_pieces[0]) == 1: - cur_pieces = cur_pieces[1:] - else: - cur_pieces[0] = cur_pieces[0][1:] - cur_pieces.append(piece[-1]) - new_pieces.extend(cur_pieces) - else: - new_pieces.append(piece) - - # note(zhiliny): convert back to unicode for py2 - if six.PY2 and return_unicode: - ret_pieces = [] - for piece in new_pieces: - if isinstance(piece, str): - piece = piece.decode('utf-8') - ret_pieces.append(piece) - new_pieces = ret_pieces - - return new_pieces - - -def encode_ids(sp_model, text, sample=False): - pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample) - ids = [sp_model.PieceToId(piece) for piece in pieces] - return ids - - -if __name__ == '__main__': - import sentencepiece as spm - - sp = spm.SentencePieceProcessor() - sp.load('sp10m.uncased.v3.model') - - print_(u'I was born in 2000, and this is falsé.') - print_(u'ORIGINAL', sp.EncodeAsPieces(u'I was born in 2000, and this is falsé.')) - print_(u'OURS', encode_pieces(sp, u'I was born in 2000, and this is falsé.')) - print(encode_ids(sp, u'I was born in 2000, and this is falsé.')) - print_('') - prepro_func = partial(preprocess_text, lower=True) - print_(prepro_func('I was born in 2000, and this is falsé.')) - print_('ORIGINAL', sp.EncodeAsPieces(prepro_func('I was born in 2000, and this is falsé.'))) - print_('OURS', encode_pieces(sp, prepro_func('I was born in 2000, and this is falsé.'))) - print(encode_ids(sp, prepro_func('I was born in 2000, and this is falsé.'))) - print_('') - print_('I was born in 2000, and this is falsé.') - print_('ORIGINAL', sp.EncodeAsPieces('I was born in 2000, and this is falsé.')) - print_('OURS', encode_pieces(sp, 'I was born in 2000, and this is falsé.')) - print(encode_ids(sp, 'I was born in 2000, and this is falsé.')) - print_('') - print_('I was born in 92000, and this is falsé.') - print_('ORIGINAL', sp.EncodeAsPieces('I was born in 92000, and this is falsé.')) - print_('OURS', encode_pieces(sp, 'I was born in 92000, and this is falsé.')) - print(encode_ids(sp, 'I was born in 92000, and this is falsé.')) - diff --git a/build/lib/caire-covid/mrqa/tpu_estimator.py b/build/lib/caire-covid/mrqa/tpu_estimator.py deleted file mode 100644 index cc0f801..0000000 --- a/build/lib/caire-covid/mrqa/tpu_estimator.py +++ /dev/null @@ -1,3522 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =================================================================== -"""TPUEstimator class.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import copy -import os -import signal -import sys -import threading -import time - -import numpy as np -import six -from six.moves import queue as Queue # pylint: disable=redefined-builtin -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result -from tensorflow.contrib.tpu.python.tpu import tensor_tracer -from tensorflow.contrib.tpu.python.ops import tpu_ops -from tensorflow.contrib.tpu.python.tpu import error_handling -from tensorflow.contrib.tpu.python.tpu import session_support -from tensorflow.contrib.tpu.python.tpu import tpu -from tensorflow.contrib.tpu.python.tpu import tpu_config -from tensorflow.contrib.tpu.python.tpu import tpu_context -from tensorflow.contrib.tpu.python.tpu import tpu_feed -from tensorflow.contrib.tpu.python.tpu import training_loop -from tensorflow.contrib.tpu.python.tpu import util as util_lib -from tensorflow.contrib.training.python.training import hparam -from tensorflow.core.framework import variable_pb2 -from tensorflow.core.framework.summary_pb2 import Summary -from tensorflow.core.protobuf import config_pb2 -from tensorflow.python.client import session as tf_session -from tensorflow.python.data.ops import dataset_ops -from tensorflow.python.data.util import nest as data_nest -from tensorflow.python.estimator import estimator as estimator_lib -from tensorflow.python.estimator import model_fn as model_fn_lib -from tensorflow.python.estimator.export import export_output as export_output_lib -from tensorflow.python.framework import constant_op -from tensorflow.python.framework import dtypes -from tensorflow.python.framework import errors -from tensorflow.python.framework import ops -from tensorflow.python.ops import array_ops -from tensorflow.python.ops import check_ops -from tensorflow.python.ops import control_flow_ops -from tensorflow.python.ops import init_ops -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import resource_variable_ops -from tensorflow.python.ops import state_ops -from tensorflow.python.ops import summary_ops_v2 as contrib_summary -from tensorflow.python.ops import variable_scope -from tensorflow.python.ops import variables -from tensorflow.python.platform import tf_logging as logging -from tensorflow.python.saved_model import tag_constants -from tensorflow.python.summary import summary -from tensorflow.python.training import basic_session_run_hooks -from tensorflow.python.training import evaluation -from tensorflow.python.training import session_run_hook -from tensorflow.python.training import training -from tensorflow.python.training import training_util -from tensorflow.python.util import function_utils -from tensorflow.python.util import nest -from tensorflow.python.util import tf_inspect - -_INITIAL_LOSS = 1e7 -_ZERO_LOSS = 0. -_TPU_ESTIMATOR = 'custom_tpu_estimator' -_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop' -_BATCH_SIZE_KEY = 'batch_size' -_CTX_KEY = 'context' -_USE_TPU_KEY = 'use_tpu' -_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum' -_ONE_GIGABYTE = 1024 * 1024 * 1024 -_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops' -_TPU_TRAIN_OP = '_tpu_train_op' -_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference' - -# Ideally _USE_TPU_KEY should be reserved as well. However there are already -# models that make use of this key, thus it can not be reserved now to prevent -# breakage. In the long run, we would like to mitigate this by migrating models -# off of using _USE_TPU_KEY. -_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY] - -# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is -# only used for per-core based deployments. For per-host based pipelines, if a -# user returns a Dataset instance it will be automatically wrapped in a -# tf.while_loop (This can be disabled by returning features and labels -# explicitly). -_WRAP_INPUT_FN_INTO_WHILE_LOOP = False - -ops.register_proto_function( - '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR), - proto_type=variable_pb2.VariableDef, - to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access - from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access - - -def _is_iterable(obj): - """A Python 2 and 3 compatible util to check whether `obj` is iterable.""" - try: - iter(obj) - return True - except TypeError: - return False - - -def _create_global_step(graph): - graph = graph or ops.get_default_graph() - if training.get_global_step(graph) is not None: - raise ValueError('"global_step" already exists.') - # Create in proper graph and base name_scope. - with graph.as_default() as g, g.name_scope(None): - return variable_scope.get_variable( - ops.GraphKeys.GLOBAL_STEP, - shape=[], - dtype=dtypes.int64, - initializer=init_ops.zeros_initializer(), - trainable=False, - use_resource=True, - collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) - - -def _create_or_get_iterations_per_loop(): - """Creates or gets the iterations_per_loop variable. - - In TPUEstimator, the user provided computation, the model_fn, is wrapped - inside a tf.while_loop for peak performance. The iterations of the loop are - specified by this variable, which adjusts its value on the CPU after each TPU - program execution and before the next TPU execution. - - The purpose of using a variable, rather then a constant, is to allow - TPUEstimator adapt the TPU training iterations according to the final steps - specified by users. For example, if the user sets the iterations_per_loop as 4 - in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop - variable will have the following value before each TPU training. - - - 1-th TPU execution: iterations_per_loop = 4 - - 2-th TPU execution: iterations_per_loop = 4 - - 3-th TPU execution: iterations_per_loop = 2 - - As model_fn increases the global step once per train_op invocation, the global - step is 10 after all TPU executions, matching the steps=10 inputs passed in by - users. - - Returns: - A TF non-trainable resource variable. - - Raises: - RuntimeError: If multi iterations_per_loop variables were found. - """ - graph = ops.get_default_graph() - collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) - iter_vars = graph.get_collection(collection_name) - if len(iter_vars) == 1: - return iter_vars[0] - elif len(iter_vars) > 1: - raise RuntimeError('Multiple iterations_per_loop_var in collection.') - - with ops.colocate_with(training_util.get_global_step()): - with variable_scope.variable_scope( - _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): - return variable_scope.get_variable( - _ITERATIONS_PER_LOOP_VAR, - initializer=init_ops.zeros_initializer(), - shape=[], - dtype=dtypes.int32, - trainable=False, - collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], - use_resource=True) - - -def _sync_variables_ops(ctx): - """Create varriables synchronization ops. - - Gets the variables back from TPU nodes. This means the variables updated - by TPU will now be *synced* to host memory. - In BROADCAST mode, we skip this sync since the variables are ususally too - big to transmit via RPC. - - Args: - ctx: A `_InternalTPUContext` instance with mode. - - Returns: - A list of sync ops. - """ - - if not ctx.is_input_broadcast_with_iterators(): - return [ - array_ops.check_numerics(v.read_value(), - 'Gradient for %s is NaN' % v.name).op - for v in variables.trainable_variables() - ] - else: - return [control_flow_ops.no_op()] - - -def _increase_eval_step_op(iterations_per_loop): - """Returns an op to increase the eval step for TPU evaluation. - - Args: - iterations_per_loop: Tensor. The number of eval steps running in TPU system - before returning to CPU host for each `Session.run`. - - Returns: - An operation - """ - eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access - # Estimator evaluate increases 1 by default. So, we increase the difference. - return state_ops.assign_add( - eval_step, - math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype), - use_locking=True) - - -def _extract_key_names(tensor_or_dict): - if isinstance(tensor_or_dict, dict): - return sorted(tensor_or_dict.keys()) - return [] - - -class _SIGNAL(object): - """Signal used to control the thread of infeed/outfeed. - - All preserved signals must be negative numbers. Positive numbers are used to - indicate the number of iterations for next training/evaluation loop. - """ - NEXT_BATCH = -1 - STOP = -2 - - -class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - """Ops and objects returned from a `model_fn` and passed to `TPUEstimator`. - - See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and - `export_outputs`. - - For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where - `metric_fn` runs on CPU to generate metrics and `tensors` represents the - `Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`. - To be precise, TPU evaluation expects a slightly different signature from the - `tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a - dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`. - The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The - `tensors` usually specify the model logits, which are transferred back from - TPU system to CPU host. All tensors must have be batch-major, i.e., the batch - size is the first dimension. Once all tensors are available at CPU host from - all shards, they are concatenated (on CPU) and passed as positional arguments - to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is - a dict. `metric_fn` takes the `tensors` and returns a dict from metric string - name to the result of calling a metric function, namely a `(metric_tensor, - update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the - `eval_metrics`. - - `scaffold_fn` is a function running on CPU to generate the `Scaffold`. This - function should not capture any Tensors in `model_fn`. - - `host_call` is a tuple of a `function` and a list or dictionary of `tensors` - to pass to that function and returns a list of Tensors. `host_call` currently - works for train() and evaluate(). The Tensors returned by the function is - executed on the CPU on every step, so there is communication overhead when - sending tensors from TPU to CPU. To reduce the overhead, try reducing the - size of the tensors. The `tensors` are concatenated along their major (batch) - dimension, and so must be >= rank 1. The `host_call` is useful for writing - summaries with `tf.contrib.summary.create_file_writer`. - """ - - def __new__(cls, - mode, - predictions=None, - loss=None, - train_op=None, - eval_metrics=None, - export_outputs=None, - scaffold_fn=None, - host_call=None, - training_hooks=None, - evaluation_hooks=None, - prediction_hooks=None): - """Creates a validated `TPUEstimatorSpec` instance.""" - host_calls = {} - if eval_metrics is not None: - host_calls['eval_metrics'] = eval_metrics - if host_call is not None: - host_calls['host_call'] = host_call - _OutfeedHostCall.validate(host_calls) - - training_hooks = tuple(training_hooks or []) - evaluation_hooks = tuple(evaluation_hooks or []) - prediction_hooks = tuple(prediction_hooks or []) - - for hook in training_hooks + evaluation_hooks + prediction_hooks: - if not isinstance(hook, session_run_hook.SessionRunHook): - raise TypeError('All hooks must be SessionRunHook instances, given: {}' - .format(hook)) - - return super(TPUEstimatorSpec, cls).__new__( - cls, - mode=mode, - predictions=predictions, - loss=loss, - train_op=train_op, - eval_metrics=eval_metrics, - export_outputs=export_outputs, - scaffold_fn=scaffold_fn, - host_call=host_call, - training_hooks=training_hooks, - evaluation_hooks=evaluation_hooks, - prediction_hooks=prediction_hooks) - - def as_estimator_spec(self): - """Creates an equivalent `EstimatorSpec` used by CPU train/eval.""" - host_calls = {} - if self.eval_metrics is not None: - host_calls['eval_metrics'] = self.eval_metrics - if self.host_call is not None: - host_calls['host_call'] = self.host_call - host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) - eval_metric_ops = None - if self.eval_metrics is not None: - eval_metric_ops = host_call_ret['eval_metrics'] - hooks = None - if self.host_call is not None: - hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])] - if tensor_tracer.TensorTracer.is_enabled(): - tt = tensor_tracer.TensorTracer() - tracing_calls = tt.trace_cpu(ops.get_default_graph()) - tracing_call_ret = _OutfeedHostCall.create_cpu_hostcall(tracing_calls) - tracing_functions = tracing_call_ret.values() - if tracing_functions: - if hooks: - hooks.extend([_OutfeedHostCallHook(tracing_functions)]) - else: - hooks = [_OutfeedHostCallHook(tracing_functions)] - hooks = tuple(hooks or []) - scaffold = self.scaffold_fn() if self.scaffold_fn else None - return model_fn_lib.EstimatorSpec( - mode=self.mode, - predictions=self.predictions, - loss=self.loss, - train_op=self.train_op, - eval_metric_ops=eval_metric_ops, - export_outputs=self.export_outputs, - scaffold=scaffold, - training_hooks=self.training_hooks + hooks, - evaluation_hooks=self.evaluation_hooks + hooks, - prediction_hooks=self.prediction_hooks + hooks) - - -class _OpQueueContext(object): - """Manages work queue and thread for a infeed/outfeed thread.""" - - def __init__(self, name, target, args): - self._name = name - self._queue = Queue.Queue() - args = (self,) + args - self._thread = threading.Thread(name=name, target=target, args=args) - self._thread.daemon = True - self._thread.start() - - def stop(self): - self._queue.put(_SIGNAL.STOP) - - def send_next_batch_signal(self, iterations): - self._queue.put(iterations) - - def read_iteration_counts(self): - while True: - iterations = self._queue.get(block=True) - logging.debug('%s read iterations %s', self._name, iterations) - if iterations == _SIGNAL.STOP: - logging.info('%s received shutdown signal, stopping.', self._name) - return - yield iterations - - def join(self): - logging.info('Shutting down %s thread.', self._name) - self.stop() - self._thread.join() - - -class _OpSignalOnceQueueContext(_OpQueueContext): - """Manages work queue and thread for a infeed/outfeed thread. - - This subclass only signals once. - """ - - def __init__(self, name, target, args): - super(_OpSignalOnceQueueContext, self).__init__(name, target, args) - self._has_signaled = False - - def send_next_batch_signal(self, iterations): - if not self._has_signaled: - self._queue.put(iterations) - self._has_signaled = True - - -class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook): - """A Session hook setting up the TPU initialization, infeed, and outfeed. - - This hook does two major things: - 1. initialize and shutdown TPU system. - 2. launch and join the threads for infeed enqueue and (optional) outfeed - dequeue. - """ - - def __init__(self, - ctx, - enqueue_ops, - dequeue_ops, - tpu_compile_op, - run_infeed_loop_on_coordinator=True, - rendezvous=None, - master=None, - session_config=None): - self._master_job = ctx.master_job - self._enqueue_ops = enqueue_ops - self._dequeue_ops = dequeue_ops - self._rendezvous = rendezvous - self._master = master - self._session_config = session_config - self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator - self._initial_infeed_sleep_secs = ( - ctx.config.tpu_config.initial_infeed_sleep_secs) - - self._feed_error = None - self._finished = False - self._should_initialize_tpu = True - self._tpu_compile_op = tpu_compile_op - - def begin(self): - logging.info('TPU job name %s', self._master_job) - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - self._init_ops = [] - if self._should_initialize_tpu: - self._finalize_ops = [tpu.shutdown_system(job=self._master_job)] - else: - self._finalize_ops = [] - - summary_writer_init_ops = contrib_summary.summary_writer_initializer_op() - self._init_ops.extend(summary_writer_init_ops) - # Get all the writer resources from the initializer, so we know what to - # flush. - for op in summary_writer_init_ops: - self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) - - def _run_infeed(self, queue_ctx, session): - logging.info('Starting infeed thread controller.') - if self._initial_infeed_sleep_secs: - logging.info('Infeed thread sleeping for %d seconds.', - self._initial_infeed_sleep_secs) - time.sleep(self._initial_infeed_sleep_secs) - logging.info('Infeed thread starting after sleep') - - with self._rendezvous.catch_errors(source='infeed', session=session): - if self._run_infeed_loop_on_coordinator: - for count, steps in enumerate(queue_ctx.read_iteration_counts()): - for i in xrange(steps): - logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) - session.run(self._enqueue_ops) - else: - for _ in queue_ctx.read_iteration_counts(): - session.run(self._enqueue_ops) - logging.info('Infeed thread finished, shutting down.') - - def _run_outfeed(self, queue_ctx, session): - logging.info('Starting outfeed thread controller.') - with self._rendezvous.catch_errors(source='outfeed', session=session): - for count, steps in enumerate(queue_ctx.read_iteration_counts()): - for i in xrange(steps): - logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i) - session.run(self._dequeue_ops) - logging.info('Outfeed thread finished, shutting down.') - - def _create_infeed_controller(self, name, target, args): - return _OpQueueContext(name=name, target=target, args=args) - - def _assertCompilationSucceeded(self, result, coord): - proto = tpu_compilation_result.CompilationResultProto() - proto.ParseFromString(result) - if proto.status_error_message: - logging.error('Compilation failed: {}'.format(proto.status_error_message)) - coord.request_stop() - else: - logging.info('Compilation succeeded') - - def after_create_session(self, session, coord): - if self._should_initialize_tpu: - logging.info('Init TPU system') - start = time.time() - with ops.Graph().as_default(): - with tf_session.Session( - self._master, config=self._session_config) as sess: - sess.run(tpu.initialize_system(job=self._master_job)) - logging.info('Initialized TPU in %d seconds', time.time() - start) - - session.run(self._init_ops, - options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000)) - - if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1': - logging.info('Compiling user program: this may take a while...') - self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord) - - self._infeed_controller = self._create_infeed_controller( - name='InfeedController', target=self._run_infeed, args=(session,)) - - self._outfeed_controller = _OpQueueContext( - name='OutfeedController', target=self._run_outfeed, args=(session,)) - - # Enable the worker watchdog to terminate workers on coordinator exit. - watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0')) - if watchdog_timeout > 0: - session_support.start_worker_watchdog(session, - shutdown_timeout=watchdog_timeout) - - def before_run(self, run_context): - self._feed_error = None - - iterations = run_context.session.run(self._iterations_per_loop_var) - - logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations) - self._infeed_controller.send_next_batch_signal(iterations) - - logging.info('Dequeue next (%d) batch(es) of data from outfeed.', - iterations) - self._outfeed_controller.send_next_batch_signal(iterations) - - def end(self, session): - self._finished = True - logging.info('Stop infeed thread controller') - self._infeed_controller.join() - self._rendezvous.record_done('infeed') - - logging.info('Stop output thread controller') - self._outfeed_controller.join() - self._rendezvous.record_done('outfeed') - - logging.info('Shutdown TPU system.') - session.run(self._finalize_ops) - - -class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook): - - def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, - rendezvous=None, master=None, session_config=None): - super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__( - ctx, - enqueue_ops, - dequeue_ops, - tpu_compile_op=tpu_compile_op, - run_infeed_loop_on_coordinator=False, - rendezvous=rendezvous, - master=master, - session_config=session_config) - - def _create_infeed_controller(self, name, target, args): - return _OpSignalOnceQueueContext(name=name, target=target, args=args) - - -class _TPUStopAtStepHook(session_run_hook.SessionRunHook): - """Hook that requests stop at a specified step. - - This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with - following differences for TPU training: - - 1. This hook sets the variable for iterations_per_loop, which is used by - `TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed. - As the hook execution order is not guaranteed, the variable update is - handled in `after_create_session` and `after_run` as - `TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`. - - 2. For each training loop (session.run), the global step could be increased - multiple times on TPU. The global step tensor value will be explicitly read - again in `after_run` to ensure the latest value is retrieved to avoid race - condition. - """ - - def __init__(self, iterations, num_steps=None, last_step=None): - """Initializes a `StopAtStepHook`. - - Args: - iterations: The number of iterations to run optimizer per training loop. - num_steps: Number of steps to execute. - last_step: Step after which to stop. - - Raises: - ValueError: If one of the arguments is invalid. - """ - if num_steps is None and last_step is None: - raise ValueError('One of num_steps or last_step must be specified.') - if num_steps is not None and last_step is not None: - raise ValueError('Only one of num_steps or last_step can be specified.') - self._num_steps = num_steps - self._last_step = last_step - self._iterations = iterations - - def _next_iterations(self, global_step, last_step): - gap = last_step - global_step - return min(gap, self._iterations) - - def begin(self): - self._global_step_tensor = training_util.get_global_step() - if self._global_step_tensor is None: - raise RuntimeError('Global step should be created.') - - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - - def after_create_session(self, session, coord): - global_step = session.run(self._global_step_tensor) - if self._last_step is None: - self._last_step = global_step + self._num_steps - - iterations = self._next_iterations(global_step, self._last_step) - - self._iterations_per_loop_var.load(iterations, session=session) - - def after_run(self, run_context, run_values): - # Global step cannot be retrieved via SessionRunArgs and before_run due to - # race condition. - global_step = run_context.session.run(self._global_step_tensor) - if global_step >= self._last_step: - run_context.request_stop() - else: - iterations = self._next_iterations(global_step, self._last_step) - self._iterations_per_loop_var.load( - iterations, session=run_context.session) - - -class _SetEvalIterationsHook(session_run_hook.SessionRunHook): - """Hook that requests stop at a specified step.""" - - def __init__(self, num_steps): - """Initializes a `_SetEvalIterationsHook`. - - Args: - num_steps: Number of steps to execute. - """ - self._num_steps = num_steps - - def begin(self): - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - - def after_create_session(self, session, coord): - self._iterations_per_loop_var.load(self._num_steps, session=session) - - -class _StoppingPredictHook(session_run_hook.SessionRunHook): - """Hook that requests stop according to the stopping signal in prediction.""" - - def __init__(self, scalar_stopping_signal): - self._scalar_stopping_signal = scalar_stopping_signal - - def begin(self): - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - - def after_create_session(self, session, coord): - # This is not necessary as we do not run infeed enqueue and outfeed dequeue - # in side threads for prediction model. But it makes the - # TPUInfeedOutfeedSessionHook prints nice message. - self._iterations_per_loop_var.load(1, session=session) - - def before_run(self, run_context): - return session_run_hook.SessionRunArgs(self._scalar_stopping_signal) - - def after_run(self, run_context, run_values): - _ = run_context - scalar_stopping_signal = run_values.results - if _StopSignals.should_stop(scalar_stopping_signal): - # NOTE(xiejw): In prediction, stopping signals are inserted for each - # batch. And we append one more batch to signal the system it should stop. - # The data flow might look like - # - # batch 0: images, labels, stop = 0 (user provided) - # batch 1: images, labels, stop = 0 (user provided) - # ... - # batch 99: images, labels, stop = 0 (user provided) - # batch 100: images, labels, stop = 1 (TPUEstimator appended) - # - # where the final batch (id = 100) is appended by TPUEstimator, so we - # should drop it before returning the predictions to user. - # To achieve that, we throw the OutOfRangeError in after_run. Once - # Monitored Session sees this error in SessionRunHook.after_run, the - # "current" prediction, i.e., batch with id=100, will be discarded - # immediately - raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') - - -def generate_per_core_enqueue_ops_fn_for_host( - ctx, input_fn, inputs_structure_recorder, host_device, host_id): - """Generates infeed enqueue ops for per-core input_fn on a single host.""" - captured_infeed_queue = _CapturedObject() - tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) - - def enqueue_ops_fn(): - """A fn returns enqueue_ops.""" - num_cores_per_host = ctx.num_of_cores_per_host - per_host_sharded_inputs = [] - for core_ordinal in range(num_cores_per_host): - with ops.name_scope('ordinal_%d' % (core_ordinal)): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, - input_device=host_device, - invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - if inputs.is_dataset: - raise TypeError( - '`input_fn` returning `Dataset` is not yet supported in ' - 'per-Core input pipeline deployment yet. Please set ' - 'TPUConfig.per_host_input_for_training to True or return ' - '`features` and `labels` from `input_fn`') - features, labels = inputs.features_and_labels() - - inputs_structure_recorder.validate_and_record_structure( - features, labels) - flattened_inputs = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels)) - per_host_sharded_inputs.append(flattened_inputs) - - infeed_queue = tpu_feed.InfeedQueue( - number_of_tuple_elements=len(per_host_sharded_inputs[0])) - captured_infeed_queue.capture(infeed_queue) - - per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( - per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) - return per_host_enqueue_ops - - return enqueue_ops_fn, captured_infeed_queue - - -def generate_per_host_enqueue_ops_fn_for_host( - ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id): - """Generates infeed enqueue ops for per-host input_fn on a single host.""" - captured_infeed_queue = _CapturedObject() - - dataset_initializer = None - - with ops.device(device): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, input_device=device, invocation_index=host_id) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - - is_dataset = inputs.is_dataset - if ctx.mode == model_fn_lib.ModeKeys.PREDICT: - if not is_dataset: - raise TypeError( - 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' - '`features` and `labels`.') - if batch_axis is not None: - raise TypeError('For mode PREDICT, batch_axis is not supported yet.') - inputs = _InputsWithStoppingSignals( - dataset=inputs.dataset, - batch_size=ctx.batch_size_for_input_fn, - add_padding=True) - - if is_dataset: - dataset_initializer = inputs.dataset_initializer() - - tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) - - def enqueue_ops_fn(): - """A Fn returning the TPU infeed enqueue ops. - - By providing as a Fn, it can be invoked inside the tf.while_loop such that - the input pipeline for multiple iterations can be executed by one - Session.run call. - - Returns: - list of dict of ops. - """ - with ops.device(device): - num_of_replicas_per_host = ctx.num_of_replicas_per_host - # Convert user input to features and labels. If the user returns a - # dataset, it is initialized and the features and labels extracted via - # `dataset.iterator.get_next()` - features, labels = inputs.features_and_labels() - signals = inputs.signals() - - inputs_structure_recorder.validate_and_record_structure(features, labels) - unsharded_tensor_list = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels, signals)) - - infeed_queue = tpu_feed.InfeedQueue( - tuple_types=[t.dtype for t in unsharded_tensor_list], - tuple_shapes=[t.shape for t in unsharded_tensor_list], - shard_dimensions=batch_axis) - captured_infeed_queue.capture(infeed_queue) - infeed_queue.set_number_of_shards(num_of_replicas_per_host) - per_host_enqueue_ops = ( - infeed_queue.split_inputs_and_generate_enqueue_ops( - unsharded_tensor_list, - placement_function=lambda x: device, - tpu_ordinal_function=tpu_ordinal_function_impl)) - if signals is None: - return per_host_enqueue_ops - else: - return { - 'ops': per_host_enqueue_ops, - 'signals': signals, - } - - return enqueue_ops_fn, captured_infeed_queue, dataset_initializer - - -def generate_per_host_v2_enqueue_ops_fn_for_host( - ctx, input_fn, inputs_structure_recorder, device, host_id): - """Generates infeed enqueue ops for per-host input_fn on a single host.""" - captured_infeed_queue = _CapturedObject() - dataset_initializer = None - - with ops.device(device): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, input_device=device, invocation_index=host_id) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - - is_dataset = inputs.is_dataset - if not is_dataset: - raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 ' - 'input pipeline configuration.') - - if ctx.mode == model_fn_lib.ModeKeys.PREDICT: - inputs = _InputsWithStoppingSignals( - dataset=inputs.dataset, - batch_size=ctx.batch_size_for_input_fn, - add_padding=True, - num_invocations_per_step=ctx.num_of_replicas_per_host) - - dataset_initializer = inputs.dataset_initializer() - tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) - - def enqueue_ops_fn(): - """Generates the per_host enqueue ops.""" - control_deps = [] - per_host_sharded_inputs = [] - num_replicas_per_host = ctx.num_of_replicas_per_host - cached_signals = None - with ops.device(device): - if not inputs.is_dataset: - raise TypeError('`input_fn` must return a `Dataset` for this mode.') - for _ in range(num_replicas_per_host): - # Use control dependencies to ensure a deterministic ordering. - with ops.control_dependencies(control_deps): - features, labels = inputs.features_and_labels() # Calls get_next() - signals = inputs.signals() - - # All the replicas share the replica 0's stopping singal. - # This avoids inconsistent state among different model replcias. - if cached_signals: - signals['stopping'] = cached_signals['stopping'] - else: - cached_signals = signals - - inputs_structure_recorder.validate_and_record_structure( - features, labels) - flattened_inputs = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels, signals)) - control_deps.extend(flattened_inputs) - per_host_sharded_inputs.append(flattened_inputs) - - if inputs_structure_recorder.flattened_input_dims: - input_partition_dims = inputs_structure_recorder.flattened_input_dims - if signals: - input_partition_dims += [None] * len(signals) - # pylint: disable=protected-access - infeed_queue = tpu_feed._PartitionedInfeedQueue( - number_of_tuple_elements=len(per_host_sharded_inputs[0]), - host_id=host_id, - input_partition_dims=input_partition_dims, - device_assignment=ctx.device_assignment) - per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( - per_host_sharded_inputs) - else: - infeed_queue = tpu_feed.InfeedQueue( - number_of_tuple_elements=len(per_host_sharded_inputs[0])) - per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( - per_host_sharded_inputs, - tpu_ordinal_function=tpu_ordinal_function_impl) - captured_infeed_queue.capture(infeed_queue) - - if signals is None: - return per_host_enqueue_ops - else: - return { - 'ops': per_host_enqueue_ops, - 'signals': signals, - } - - return enqueue_ops_fn, captured_infeed_queue, dataset_initializer - - -def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder, - num_hosts): - """Generates infeed enqueue ops for one input_fn on all the hosts.""" - captured_infeed_queue = _CapturedObject() - dataset_initializer = None - device_0 = ctx.tpu_host_placement_function(host_id=0) - with ops.device(device_0): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, input_device=device_0, invocation_index=0) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - - is_dataset = inputs.is_dataset - if ctx.mode == model_fn_lib.ModeKeys.PREDICT: - if not is_dataset: - raise TypeError( - 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' - '`features` and `labels`.') - - inputs = _InputsWithStoppingSignals( - dataset=inputs.dataset, - batch_size=ctx.batch_size_for_input_fn, - add_padding=True) - - if is_dataset: - dataset_initializer = inputs.dataset_initializer() - num_replicas_per_host = ctx.num_of_replicas_per_host - - def tpu_ordinal_function_impl(replica_id): - if ctx.device_assignment: - return ctx.device_assignment.tpu_ordinal(replica=replica_id) - else: - return replica_id % num_replicas_per_host - - def device_function_impl(replica_id): - return ctx.tpu_host_placement_function(replica_id=replica_id) - - def enqueue_ops_fn(): - """Generates enqueue ops for all the hosts.""" - broadcasted_inputs = [] - flattened_inputs = None # Cache result from input_fn. - signals = None - for host_id in xrange(num_hosts): - with ops.device(ctx.tpu_host_placement_function(host_id=host_id)): - for _ in xrange(ctx.num_of_replicas_per_host): - # Note: input_fn is only called once at host 0 for the first replica. - # The features and labels returned from that invocation are - # broadcasted to other replicas(including the replicas on other - # hosts). - if flattened_inputs is None: - features, labels = inputs.features_and_labels() # Calls get_next() - signals = inputs.signals() - - inputs_structure_recorder.validate_and_record_structure( - features, labels) - flattened_inputs = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels, signals)) - broadcasted_inputs.append(flattened_inputs) - - infeed_queue = tpu_feed.InfeedQueue( - number_of_tuple_elements=len(broadcasted_inputs[0])) - captured_infeed_queue.capture(infeed_queue) - enqueue_ops = infeed_queue.generate_enqueue_ops( - broadcasted_inputs, - tpu_ordinal_function=tpu_ordinal_function_impl, - placement_function=device_function_impl) - - if signals is None: - return enqueue_ops - else: - return { - 'ops': enqueue_ops, - 'signals': signals, - } - - return enqueue_ops_fn, captured_infeed_queue, dataset_initializer - - -class _InputPipeline(object): - """`_InputPipeline` handles invoking `input_fn` and piping to infeed queue. - - `_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from - call site. To be precise, based on the configuration in - `_InternalTPUContext`, it invokes `input_fn` for all cores (usually - multi-host TPU training) or for one host (usually for single-host TPU - evaluation), and sends all `features` and `labels` returned by `input_fn` to - TPU infeed. For per-core invocation, `features` and `labels` are piped to - infeed directly, one tuple for each core. For per-host invocation, `features` - and `labels` are split at host (with respect to `batch_axis`) and piped to all - cores accordingly. - - In addition, flatten/unflatten are handled by `_InputPipeline` also. Model - inputs returned by the `input_fn` can have one of the following forms: - 1. features - 2. (features, labels) - 3. ((arbitrarily nested structure of features), labels) - - Internally, form 1 is reformed to `(features, None)` as features and labels - are passed separately to underlying methods. For TPU training, TPUEstimator - may expect multiple `features` and `labels` tuples one for each core. - - TPUEstimator allows various different structures for inputs (namely `features` - and `labels`). Both `features` and `labels` can be any nested sturcture - supported by TF nest (namely, dict, tuples, namedtuples or any nested - structure of such of Tensors). `labels` could be `None` as well. - - These are flattened before they are passed to the infeed/outfeed library - as that expectes flattend lists. - """ - - class InputsStructureRecorder(object): - """The recorder to record inputs structure.""" - - def __init__(self, input_partition_dims=None): - # Holds the structure of inputs - self._feature_structure = {} - self._flattened_input_dims = None - - if input_partition_dims: - # This should have been validated in TPUConfig. - assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.' - if len(input_partition_dims) == 2: - self._feature_dims, self._label_dims = input_partition_dims - else: - self._feature_dims = input_partition_dims[0] - self._label_dims = None - - assert self._feature_dims is not None, ('input_partition_dims[0] must ' - 'not be None') - else: - self._feature_dims = None - self._label_dims = None - - # Internal state. - self._initialized = False - - @property - def flattened_input_dims(self): - assert self._initialized, 'InputsStructureRecorder is not initialized.' - return self._flattened_input_dims - - def has_labels(self): - return 'labels' in self._feature_structure - - def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims, - label_dims_names, label_names, has_labels): - """Flatten input dims with the same order as flattened input tensors.""" - flattened_input_dims = [] - if feature_dims_names: - # We need a fixed ordering for matching the tensors in features. - flattened_input_dims.extend( - [feature_dims[name] for name in feature_dims_names]) - else: - flattened_input_dims.append(feature_dims) - - if label_dims_names: - # We need a fixed ordering for matching the tensors in labels. - flattened_input_dims.extend( - [label_dims[name] for name in label_dims_names]) - else: - if label_names: - num_tensors_in_label = len(label_names) - else: - num_tensors_in_label = int(has_labels) - # Setting `None` in input_partition_dims[1] will apply `None` to - # all the tensors in labels, regardless of internal structure. - flattened_input_dims.extend([label_dims] * num_tensors_in_label) - - return flattened_input_dims - - def validate_and_record_structure(self, features, labels): - """Validates and records the structure of `features` and `labels`.""" - # Extract structure. - has_labels = labels is not None - feature_names = _extract_key_names(features) - label_names = _extract_key_names(labels) - - if not self._initialized: - # Record structure. - self._initialized = True - if self._feature_dims is not None: - feature_dims_names = _extract_key_names(self._feature_dims) - if feature_dims_names != feature_names: - raise ValueError( - 'TPUConfig.input_partition_dims[0] mismatched feature' - ' keys. Expected {}, got {}'.format(feature_names, - feature_dims_names)) - - label_dims_names = _extract_key_names(self._label_dims) - if self._label_dims is not None and label_dims_names != label_names: - raise ValueError( - 'TPUConfig.input_partition_dims[1] mismatched label' - ' keys. Expected {}, got {}'.format(label_names, - label_dims_names)) - - self._flattened_input_dims = self._flatten_input_dims( - self._feature_dims, feature_dims_names, self._label_dims, - label_dims_names, label_names, has_labels) - - def flatten_features_and_labels(self, features, labels, signals=None): - """Flattens the `features` and `labels` to a single tensor list.""" - self._feature_structure['features'] = features - if labels is not None: - self._feature_structure['labels'] = labels - if signals is not None: - self._feature_structure['signals'] = signals - return data_nest.flatten(self._feature_structure) - - def unflatten_features_and_labels(self, flattened_inputs): - """Restores the flattened inputs to original features and labels form. - - Args: - flattened_inputs: Flattened inputs for each shard. - - Returns: - A tuple of (`features`, `labels`), where `labels` could be None. - Each one, if present, should have identical structure (single tensor vs - dict) as the one returned by input_fn. - - Raises: - ValueError: If the number of expected tensors from `flattened_inputs` - mismatches the recorded structure. - """ - - unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure, - flattened_inputs) - return _Inputs( - unflattened_inputs['features'], - unflattened_inputs.get('labels'), - signals=unflattened_inputs.get('signals')) - - def __init__(self, input_fn, batch_axis, ctx): - """Constructor. - - Args: - input_fn: input fn for train or eval. - batch_axis: A python tuple of int values describing how each tensor - produced by the Estimator `input_fn` should be split across the TPU - compute shards. - ctx: A `_InternalTPUContext` instance with mode. - - Raises: - ValueError: If both `sharded_features` and `num_cores` are `None`. - """ - self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder( - ctx.input_partition_dims) - - self._sharded_per_core = ctx.is_input_sharded_per_core() - self._input_fn = input_fn - self._infeed_queue = None - self._ctx = ctx - self._batch_axis = batch_axis - - def generate_infeed_enqueue_ops_and_dequeue_fn(self): - """Generates infeed enqueue ops and dequeue_fn.""" - # While tf.while_loop is called, the body function, which invokes - # `enqueue_fn` passed in, is called to construct the graph. So, input_fn - # structure is recorded. - enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( - self._invoke_input_fn_and_record_structure()) - - self._validate_input_pipeline() - - def dequeue_fn(): - """dequeue_fn is used by TPU to retrieve the tensors.""" - # In the model-parallel case, both the host-side and device-side - # computations must agree on the core on which infeed takes place. We - # choose to perform infeed on logical core 0 of each replica. - values = self._infeed_queue.generate_dequeue_op(tpu_device=0) - # The unflatten process uses the structure information recorded above. - return self._inputs_structure_recorder.unflatten_features_and_labels( - values) - - return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator) - - def _invoke_input_fn_and_record_structure(self): - """Deploys the input pipeline and record input structure.""" - enqueue_ops = [] - infeed_queues = [] - all_dataset_initializers = [] - num_hosts = self._ctx.num_hosts - tpu_host_placement_fn = self._ctx.tpu_host_placement_function - - run_infeed_loop_on_coordinator = True - - if self._sharded_per_core: - # Per-Core input pipeline deployment. - # Invoke input pipeline for each core and placed on the corresponding - # host. - for host_id in range(num_hosts): - host_device = tpu_host_placement_fn(host_id=host_id) - with ops.device(host_device): - with ops.name_scope('input_pipeline_task%d' % (host_id)): - enqueue_ops_fn, captured_infeed_queue = ( - generate_per_core_enqueue_ops_fn_for_host( - self._ctx, self._input_fn, self._inputs_structure_recorder, - host_device, host_id)) - - if _WRAP_INPUT_FN_INTO_WHILE_LOOP: - run_infeed_loop_on_coordinator = False - enqueue_ops.append( - _wrap_computation_in_while_loop( - device=host_device, op_fn=enqueue_ops_fn)) - else: - enqueue_ops.append(enqueue_ops_fn()) - # Infeed_queue_getter must be called after enqueue_ops_fn is called. - infeed_queues.append(captured_infeed_queue.get()) - - elif self._ctx.is_input_broadcast_with_iterators(): - # Only calls input_fn in host 0. - host_device = tpu_host_placement_fn(host_id=0) - enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( - generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn, - self._inputs_structure_recorder, - num_hosts)) - if dataset_initializer: - all_dataset_initializers.append(dataset_initializer) - run_infeed_loop_on_coordinator = False - wrap_fn = ( - _wrap_computation_in_while_loop - if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else - _wrap_computation_in_while_loop_with_stopping_signals) - enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) - else: - enqueue_ops.append(enqueue_ops_fn()) - infeed_queues.append(captured_infeed_queue.get()) - else: - for host_id in range(num_hosts): - host_device = tpu_host_placement_fn(host_id=host_id) - with ops.device(host_device): - with ops.name_scope('input_pipeline_task%d' % (host_id)): - if self._ctx.is_input_per_host_with_iterators(): - enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( - generate_per_host_v2_enqueue_ops_fn_for_host( - self._ctx, self._input_fn, - self._inputs_structure_recorder, host_device, host_id)) - else: - enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( - generate_per_host_enqueue_ops_fn_for_host( - self._ctx, self._input_fn, - self._inputs_structure_recorder, self._batch_axis, - host_device, host_id)) - - # NOTE(xiejw): We dispatch here based on the return type of the - # users `input_fn`. - # - # 1. If input_fn returns a Dataset instance, we initialize the - # iterator outside of tf.while_loop, and call the iterator.get_next - # inside tf.while_loop. This should be always safe. - # - # 2. If input_fn returns (features, labels), it is too late to wrap - # them inside tf.while_loop, as resource initialization cannot be - # handled in TF control flow properly. In this case, we will use - # python loop to enqueue the data into TPU system. This may be - # slow compared to the previous case. - if dataset_initializer: - all_dataset_initializers.append(dataset_initializer) - run_infeed_loop_on_coordinator = False - wrap_fn = ( - _wrap_computation_in_while_loop - if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else - _wrap_computation_in_while_loop_with_stopping_signals) - enqueue_ops.append( - wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) - else: - enqueue_ops.append(enqueue_ops_fn()) - infeed_queues.append(captured_infeed_queue.get()) - # infeed_queue is used to generate dequeue ops. The only thing it uses for - # dequeue is dtypes and types. So, any one can be used. Here, grab the - # first one. - self._infeed_queue = infeed_queues[0] - return enqueue_ops, [ - util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers) - ], run_infeed_loop_on_coordinator - - def _validate_input_pipeline(self): - """Validates the input pipeline. - - Perform some sanity checks to log user friendly information. We should - error out to give users better error message. But, if - _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break - user code, so, log a warning. - - Raises: - RuntimeError: If the validation failed. - """ - if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS): - err_msg = ('Input pipeline contains one or more QueueRunners. ' - 'It could be slow and not scalable. Please consider ' - 'converting your input pipeline to use `tf.data` instead (see ' - 'https://www.tensorflow.org/guide/datasets for ' - 'instructions.') - if _WRAP_INPUT_FN_INTO_WHILE_LOOP: - raise RuntimeError(err_msg) - else: - logging.warn(err_msg) - - -class _ModelFnWrapper(object): - """A `model_fn` wrapper. - - This makes calling model_fn on CPU and TPU easier and more consistent and - performs necessary check and mutation required by TPU training and evaluation. - - In addition, this wrapper manages converting the `model_fn` to a single TPU - train and eval step. - """ - - def __init__(self, model_fn, train_cache_fn, eval_cache_fn, config, params, ctx): - self._model_fn = model_fn - self._train_cache_fn = train_cache_fn - self._eval_cache_fn = eval_cache_fn - self._config = config - self._params = params - self._ctx = ctx - - def call_without_tpu(self, features, labels, is_export_mode): - return self._call_model_fn(features, labels, is_export_mode=is_export_mode) - - def convert_to_single_tpu_train_step(self, dequeue_fn): - """Converts user provided model_fn` as a single train step on TPU. - - The user provided `model_fn` takes input tuple - (features, labels) and produces the EstimatorSpec with train_op and loss for - train `mode`. This usually represents a single train computation on CPU. - - For TPU training, a train (computation) step is first wrapped in a - tf.while_loop control flow to repeat for many times and then replicated to - all TPU shards. Besides the input should be taken from TPU infeed rather - than input pipeline (input_fn) directly. To fit TPU loop and replicate - pattern, the original train computation should be reformed, which is the - returned `train_step`. - - Args: - dequeue_fn: The function to retrieve inputs, features and labels, from TPU - infeed dequeue channel. - - Returns: - A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn - representing the train step for TPU. - """ - - host_call = _OutfeedHostCall(self._ctx) - captured_scaffold_fn = _CapturedObject() - captured_training_hooks = _CapturedObject() - - def train_step(loss, *cache): - """Training step function for use inside a while loop.""" - del loss # unused; required in function signature. - inputs = dequeue_fn() - features, labels = inputs.features_and_labels() - - # Consume the current cache - estimator_spec = self._verify_estimator_spec( - self._call_model_fn(features, labels, cache=cache)) - - # Retrieve the new returned cache - """ - `cache` consists of a list of tensors, potentially empty (of length 0) - """ - cache = estimator_spec.cache - loss, train_op = estimator_spec.loss, estimator_spec.train_op - - if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - captured_scaffold_fn.capture(estimator_spec.scaffold_fn) - else: - captured_scaffold_fn.capture(None) - - captured_training_hooks.capture(estimator_spec.training_hooks) - - tracing_ops = [] - if tensor_tracer.TensorTracer.is_enabled(): - tt = tensor_tracer.TensorTracer() - loss, tracing_ops = tt.trace_tpu(ops.get_default_graph(), loss, - self._ctx.num_replicas) - - # We must run train_op to update the variables prior to running the - # outfeed. - with ops.control_dependencies([train_op]+tracing_ops): - host_call_outfeed_ops = [] - if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access - and estimator_spec.host_call is not None): - host_call.record({'host_call': estimator_spec.host_call}) - host_call_outfeed_ops = host_call.create_enqueue_op() - with ops.control_dependencies(host_call_outfeed_ops): - return [array_ops.identity(loss)] + cache - - return (train_step, host_call, captured_scaffold_fn, - captured_training_hooks) - - def convert_to_single_tpu_eval_step(self, dequeue_fn): - """Converts user provided model_fn` as a single eval step on TPU. - - Similar to training, the user provided `model_fn` takes input tuple - (features, labels) and produces the TPUEstimatorSpec with eval_metrics for - eval `mode`. This usually represents a single evaluation computation on CPU. - - For TPU evaluation, a eval (computation) step is first wrapped in a - tf.while_loop control flow to repeat for many times and then replicated to - all TPU shards. Besides the input and output are slightly different. Input, - features and labels, should be taken from TPU infeed rather than input - pipeline (input_fn) directly. Output is managed in two stages. First, the - model outputs as the result of evaluation computation, usually model logits, - should be transferred from TPU system to CPU. Then, all model outputs are - concatenated first on CPU and sent to the metric_fn for metrics computation. - To fit TPU evaluation pattern, the original eval computation should be - reformed, which is the returned `eval_step`. - - Args: - dequeue_fn: The function to retrieve inputs, features and labels, from TPU - infeed dequeue channel. - - Returns: - A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn - representing the eval step for TPU. - """ - host_calls = _OutfeedHostCall(self._ctx) - captured_scaffold_fn = _CapturedObject() - captured_eval_hooks = _CapturedObject() - - def eval_step(total_loss, *cache): - """Evaluation step function for use inside a while loop.""" - inputs = dequeue_fn() - features, labels = inputs.features_and_labels() - - # Consume the current cache - tpu_estimator_spec = self._call_model_fn(features, labels, cache=cache) - if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - raise RuntimeError( - 'estimator_spec used by TPU evaluation must have type' - '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) - - # Retrieve the new returned cache - cache = tpu_estimator_spec.cache - loss = tpu_estimator_spec.loss - - captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) - captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks) - - to_record = {} - if tpu_estimator_spec.eval_metrics: - to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics - if tpu_estimator_spec.host_call is not None: - # We assume that evaluate won't update global step, so we don't wrap - # this host_call. - to_record['host_call'] = tpu_estimator_spec.host_call - host_calls.record(to_record) - - with ops.control_dependencies(host_calls.create_enqueue_op()): - return [math_ops.add(total_loss, loss)] + cache - - return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks - - def convert_to_single_tpu_predict_step(self, dequeue_fn): - """Converts user provided model_fn` as a single predict step on TPU. - - Args: - dequeue_fn: The function to retrieve inputs, features and labels, from TPU - infeed dequeue channel. - - Returns: - A tuple of predict_fn, host_calls, and captured scaffold_fn. The - predict_fn representing the predict step for TPU. - """ - host_calls = _OutfeedHostCall(self._ctx) - captured_scaffold_fn = _CapturedObject() - captured_predict_hooks = _CapturedObject() - - def predict_step(unused_scalar_stopping_signal): - """Evaluation step function for use inside a while loop.""" - inputs = dequeue_fn() - features, labels = inputs.features_and_labels() - stopping_signals = inputs.signals() - - assert stopping_signals is not None, ( - 'Internal Error: `signals` is missing.') - - tpu_estimator_spec = self._call_model_fn( - features, labels, is_export_mode=False) - if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - raise RuntimeError( - 'estimator_spec used by TPU prediction must have type' - '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) - - self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions) - - captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) - captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks) - to_record = {} - identity_fn = lambda **kwargs: kwargs - to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions] - to_record['signals'] = [identity_fn, stopping_signals] - if tpu_estimator_spec.host_call is not None: - to_record['host_call'] = tpu_estimator_spec.host_call - host_calls.record(to_record) - - with ops.control_dependencies(host_calls.create_enqueue_op()): - return _StopSignals.as_scalar_stopping_signal(stopping_signals) - - return (predict_step, host_calls, captured_scaffold_fn, - captured_predict_hooks) - - def _verify_tpu_spec_predictions(self, predictions): - """Validates TPUEstimatorSpec.predictions dict.""" - # TODO(xiejw): Adds validation for prediction dictionrary. - # TODO(xiejw): Adds support for single tensor as predictions. - if not isinstance(predictions, dict): - raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.') - - for (key, tensor) in predictions.items(): - if tensor.shape.dims[0].value is None: - raise ValueError( - 'The tensor with key ({}) in TPUEstimatorSpec.predictions has ' - 'dynamic shape (should be static). Tensor: {}'.format(key, tensor)) - return predictions - - def _validate_model_features_and_labels(self, features, labels, - is_export_mode): - """Validates that the features and labels for the model function are valid. - - A valid features/labels object is the one with: - - Type: A tensor or any nested structure of tensors supported by TF nest, - namely nested dictionary, tuple, namedtuple, or sequence of tensors. - - Static shape if is_export_mode is False. - - Args: - features: the features that would be input to the model function. - labels: the labels that would be input to the model function. - is_export_mode: boolean value specifying if in export mode. - - Raises: - TypeError: If features/labels are not of the correct type. - ValueError: If features/labels have dynamic shape. - """ - - def validate(obj, obj_name): - """Helper validate function.""" - if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode): - return - if isinstance(obj, ops.Tensor): - if not obj.get_shape().is_fully_defined(): - raise ValueError( - 'The {} to the model returned by input_fn must have static shape.' - ' Tensor: {}'.format(obj_name, obj)) - else: - for tensor in data_nest.flatten(obj): - if not tensor.get_shape().is_fully_defined(): - raise ValueError( - ('The {} to the model returned by input_fn must have static ' - 'shape. Tensor: {}').format(obj_name, tensor)) - - validate(features, 'features') - if labels is not None: - validate(labels, 'labels') - - def _call_model_fn(self, features, labels, cache=None, is_export_mode=False): - """Calls the model_fn with required parameters.""" - self._validate_model_features_and_labels(features, labels, is_export_mode) - model_fn_args = function_utils.fn_args(self._model_fn) - kwargs = {} - - # Makes deep copy with `config` and params` in case user mutates them. - config = copy.deepcopy(self._config) - params = copy.deepcopy(self._params) - - if 'labels' in model_fn_args: - kwargs['labels'] = labels - elif labels is not None: - raise ValueError( - 'model_fn does not take labels, but input_fn returns labels.') - if 'mode' in model_fn_args: - kwargs['mode'] = self._ctx.mode - if 'config' in model_fn_args: - kwargs['config'] = config - if 'params' in model_fn_args: - kwargs['params'] = params - - if cache is not None: - params['cache'] = cache - - if 'params' not in model_fn_args: - raise ValueError('model_fn ({}) does not include params argument, ' - 'required by TPUEstimator to pass batch size as ' - 'params[\'batch_size\']'.format(self._model_fn)) - - if is_export_mode: - batch_size_for_model_fn = None - else: - batch_size_for_model_fn = self._ctx.batch_size_for_model_fn - - if batch_size_for_model_fn is not None: - _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn) - - running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode) - _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu) - - if not running_on_cpu: - user_context = tpu_context.TPUContext( - internal_ctx=self._ctx, call_from_input_fn=False) - _add_item_to_params(params, _CTX_KEY, user_context) - - estimator_spec = self._model_fn(features=features, **kwargs) - if (running_on_cpu and - isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access - # The estimator_spec will be passed to `Estimator` directly, which expects - # type `EstimatorSpec`. - return estimator_spec.as_estimator_spec() - else: - return estimator_spec - - def _verify_estimator_spec(self, estimator_spec): - """Validates the estimator_spec.""" - if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - return estimator_spec - - err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.' - if estimator_spec.training_chief_hooks: - raise ValueError( - err_msg.format('training_chief_hooks') + 'If you want' + - ' to pass training hooks, please pass via training_hooks.') - - if estimator_spec.scaffold: - logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. ' - 'Please use TPUEstimatorSpec.') - return estimator_spec - - -class _OutfeedHostCall(object): - """Support for `eval_metrics` and `host_call` in TPUEstimatorSpec.""" - - def __init__(self, ctx): - self._ctx = ctx - self._names = [] - # All of these are dictionaries of lists keyed on the name. - self._host_fns = {} - self._tensor_keys = collections.defaultdict(list) - self._tensors = collections.defaultdict(list) - self._tensor_dtypes = collections.defaultdict(list) - self._tensor_shapes = collections.defaultdict(list) - - @staticmethod - def validate(host_calls): - """Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.""" - - for name, host_call in host_calls.items(): - if not isinstance(host_call, (tuple, list)): - raise ValueError('{} should be tuple or list'.format(name)) - if len(host_call) != 2: - raise ValueError('{} should have two elements.'.format(name)) - if not callable(host_call[0]): - raise TypeError('{}[0] should be callable.'.format(name)) - if not isinstance(host_call[1], (tuple, list, dict)): - raise ValueError('{}[1] should be tuple or list, or dict.'.format(name)) - - if isinstance(host_call[1], (tuple, list)): - fullargspec = tf_inspect.getfullargspec(host_call[0]) - fn_args = function_utils.fn_args(host_call[0]) - # wrapped_hostcall_with_global_step uses varargs, so we allow that. - if fullargspec.varargs is None and len(host_call[1]) != len(fn_args): - raise RuntimeError( - 'In TPUEstimatorSpec.{}, length of tensors {} does not match ' - 'method args of the function, which takes {}.'.format( - name, len(host_call[1]), len(fn_args))) - - @staticmethod - def create_cpu_hostcall(host_calls): - """Runs on the host_call on CPU instead of TPU when use_tpu=False.""" - - _OutfeedHostCall.validate(host_calls) - ret = {} - for name, host_call in host_calls.items(): - host_fn, tensors = host_call - if isinstance(tensors, (tuple, list)): - ret[name] = host_fn(*tensors) - else: - # Must be dict. - try: - ret[name] = host_fn(**tensors) - except TypeError as e: - logging.warning( - 'Exception while calling %s: %s. It is likely the tensors ' - '(%s[1]) do not match the ' - 'function\'s arguments', name, e, name) - raise e - return ret - - def record(self, host_calls): - """Records the host_call structure.""" - - for name, host_call in host_calls.items(): - host_fn, tensor_list_or_dict = host_call - self._names.append(name) - self._host_fns[name] = host_fn - - if isinstance(tensor_list_or_dict, dict): - for (key, tensor) in six.iteritems(tensor_list_or_dict): - self._tensor_keys[name].append(key) - self._tensors[name].append(tensor) - self._tensor_dtypes[name].append(tensor.dtype) - self._tensor_shapes[name].append(tensor.shape) - else: - # List or tuple. - self._tensor_keys[name] = None - for tensor in tensor_list_or_dict: - self._tensors[name].append(tensor) - self._tensor_dtypes[name].append(tensor.dtype) - self._tensor_shapes[name].append(tensor.shape) - - def create_enqueue_op(self): - """Create the op to enqueue the recorded host_calls. - - Returns: - A list of enqueue ops, which is empty if there are no host calls. - """ - if not self._names: - return [] - - tensors = [] - # TODO(jhseu): Consider deduping tensors. - for name in self._names: - tensors.extend(self._tensors[name]) - - with ops.device(tpu.core(0)): - return [tpu_ops.outfeed_enqueue_tuple(tensors)] - - def create_tpu_hostcall(self): - """Sends the tensors through outfeed and runs the host_fn on CPU. - - The tensors are concatenated along dimension 0 to form a global tensor - across all shards. The concatenated function is passed to the host_fn and - executed on the first host. - - Returns: - A dictionary mapping name to the return type of the host_call by that - name. - - Raises: - RuntimeError: If outfeed tensor is scalar. - """ - if not self._names: - return {} - - ret = {} - # For each i, dequeue_ops[i] is a list containing the tensors from all - # shards. This list is concatenated later. - dequeue_ops = [] - tensor_dtypes = [] - tensor_shapes = [] - for name in self._names: - for _ in self._tensors[name]: - dequeue_ops.append([]) - for dtype in self._tensor_dtypes[name]: - tensor_dtypes.append(dtype) - for shape in self._tensor_shapes[name]: - tensor_shapes.append(shape) - - # Outfeed ops execute on each replica's first logical core. Note: we must - # constraint it such that we have at most one outfeed dequeue and enqueue - # per replica. - for i in xrange(self._ctx.num_replicas): - host_device, ordinal_id = self._ctx.device_for_replica(i) - with ops.device(host_device): - outfeed_tensors = tpu_ops.outfeed_dequeue_tuple( - dtypes=tensor_dtypes, - shapes=tensor_shapes, - device_ordinal=ordinal_id) - for j, item in enumerate(outfeed_tensors): - dequeue_ops[j].append(item) - - # Deconstruct dequeue ops. - dequeue_ops_by_name = {} - pos = 0 - for name in self._names: - dequeue_ops_by_name[name] = dequeue_ops[pos:pos + - len(self._tensors[name])] - pos += len(self._tensors[name]) - - # It is assumed evaluation always happens on single host TPU system. So, - # place all ops on tpu host if possible. - # - # TODO(jhseu): Evaluate whether this is right for summaries. - with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)): - for name in self._names: - dequeue_ops = dequeue_ops_by_name[name] - for i, item in enumerate(dequeue_ops): - if dequeue_ops[i][0].shape.ndims == 0: - raise RuntimeError( - 'All tensors outfed from TPU should preserve batch size ' - 'dimension, but got scalar {}'.format(dequeue_ops[i][0])) - # TODO(xiejw): Allow users to specify the axis for batch size - # dimension. - dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0) - - if self._tensor_keys[name] is not None: - # The user-provided eval_metrics[1] is a dict. - dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops)) - try: - ret[name] = self._host_fns[name](**dequeue_ops) - except TypeError as e: - logging.warning( - 'Exception while calling %s: %s. It is likely the tensors ' - '(%s[1]) do not match the ' - 'function\'s arguments', name, e, name) - raise e - else: - ret[name] = self._host_fns[name](*dequeue_ops) - - return ret - - -class _OutfeedHostCallHook(session_run_hook.SessionRunHook): - """Hook to run host calls when use_tpu=False.""" - - def __init__(self, tensors): - self._tensors = tensors - - def begin(self): - # We duplicate this code from the TPUInfeedOutfeedSessionHook rather than - # create a separate hook to guarantee execution order, because summaries - # need to be initialized before the outfeed thread starts. - # TODO(jhseu): Make a wrapper hook instead? - self._init_ops = contrib_summary.summary_writer_initializer_op() - # Get all the writer resources from the initializer, so we know what to - # flush. - self._finalize_ops = [] - for op in self._init_ops: - self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) - - def after_create_session(self, session, coord): - session.run(self._init_ops) - - def before_run(self, run_context): - return basic_session_run_hooks.SessionRunArgs(self._tensors) - - def end(self, session): - session.run(self._finalize_ops) - - -class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook): - """Calculate and report global_step/sec and examples/sec during runtime.""" - - def __init__(self, - batch_size, - every_n_steps=100, - every_n_secs=None, - output_dir=None, - summary_writer=None): - self._batch_size = batch_size - super(ExamplesPerSecondHook, self).__init__( - every_n_steps=every_n_steps, - every_n_secs=every_n_secs, - output_dir=output_dir, - summary_writer=summary_writer) - - def _log_and_record(self, elapsed_steps, elapsed_time, global_step): - global_step_per_sec = elapsed_steps / elapsed_time - examples_per_sec = self._batch_size * global_step_per_sec - if self._summary_writer is not None: - global_step_summary = Summary(value=[ - Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec) - ]) - example_summary = Summary(value=[ - Summary.Value(tag='examples/sec', simple_value=examples_per_sec) - ]) - self._summary_writer.add_summary(global_step_summary, global_step) - self._summary_writer.add_summary(example_summary, global_step) - logging.info('global_step/sec: %g', global_step_per_sec) - logging.info('examples/sec: %g', examples_per_sec) - - -class InstallSignalHandlerHook(session_run_hook.SessionRunHook): - """Change SIGINT (CTRL^C) handler to force quit the process. - - The default behavior often results in hanging processes. - The original handler is restored after training/evaluation. - """ - - def __init__(self): - self._signal_fn = signal.getsignal(signal.SIGINT) - - def before_run(self, run_context): - signal.signal(signal.SIGINT, signal.SIG_DFL) - - def end(self, session): - signal.signal(signal.SIGINT, self._signal_fn) - - -class TPUEstimator(estimator_lib.Estimator): - """Estimator with TPU support. - - TPUEstimator also supports training on CPU and GPU. You don't need to define - a separate `tf.estimator.Estimator`. - - TPUEstimator handles many of the details of running on TPU devices, such as - replicating inputs and models for each core, and returning to host - periodically to run hooks. - - TPUEstimator transforms a global batch size in params to a per-shard batch - size when calling the `input_fn` and `model_fn`. Users should specify - global batch size in constructor, and then get the batch size for each shard - in `input_fn` and `model_fn` by `params['batch_size']`. - - - For training, `model_fn` gets per-core batch size; `input_fn` may get - per-core or per-host batch size depending on `per_host_input_for_training` - in `TPUConfig` (See docstring for TPUConfig for details). - - - For evaluation and prediction, `model_fn` gets per-core batch size and - `input_fn` get per-host batch size. - - Evaluation - ========== - - `model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics` - for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return - `EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case - the following discussion on TPU evaluation does not apply. - - `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where - `tensors` could be a list of any nested structure of `Tensor`s (See - `TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns - a dict from metric string name to the result of calling a metric function, - namely a `(metric_tensor, update_op)` tuple. - - One can set `use_tpu` to `False` for testing. All training, evaluation, and - predict will be executed on CPU. `input_fn` and `model_fn` will receive - `train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`. - - Current limitations: - -------------------- - - 1. TPU evaluation only works on a single host (one TPU worker) except - BROADCAST mode. - - 2. `input_fn` for evaluation should **NOT** raise an end-of-input exception - (`OutOfRangeError` or `StopIteration`). And all evaluation steps and all - batches should have the same size. - - Example (MNIST): - ---------------- - - ``` - # The metric Fn which runs on CPU. - def metric_fn(labels, logits): - predictions = tf.argmax(logits, 1) - return { - 'accuracy': tf.metrics.precision( - labels=labels, predictions=predictions), - } - - # Your model Fn which runs on TPU (eval_metrics is list in this example) - def model_fn(features, labels, mode, config, params): - ... - logits = ... - - if mode = tf.estimator.ModeKeys.EVAL: - return tpu_estimator.TPUEstimatorSpec( - mode=mode, - loss=loss, - eval_metrics=(metric_fn, [labels, logits])) - - # or specify the eval_metrics tensors as dict. - def model_fn(features, labels, mode, config, params): - ... - final_layer_output = ... - - if mode = tf.estimator.ModeKeys.EVAL: - return tpu_estimator.TPUEstimatorSpec( - mode=mode, - loss=loss, - eval_metrics=(metric_fn, { - 'labels': labels, - 'logits': final_layer_output, - })) - ``` - - Prediction - ========== - - Prediction on TPU is an experimental feature to support large batch inference. - It is not designed for latency-critical system. In addition, due to some - usability issues, for prediction with small dataset, CPU `.predict`, i.e., - creating a new `TPUEstimator` instance with `use_tpu=False`, might be more - convenient. - - Note: In contrast to TPU training/evaluation, the `input_fn` for prediction - *should* raise an end-of-input exception (`OutOfRangeError` or - `StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be - precise, the ops created by `input_fn` produce one batch of the data. - The `predict()` API processes one batch at a time. When reaching the end of - the data source, an end-of-input exception should be raised by one of these - operations. The user usually does not need to do this manually. As long as the - dataset is not repeated forever, the `tf.data` API will raise an end-of-input - exception automatically after the last batch has been produced. - - Note: Estimator.predict returns a Python generator. Please consume all the - data from the generator so that TPUEstimator can shutdown the TPU system - properly for user. - - Current limitations: - -------------------- - 1. TPU prediction only works on a single host (one TPU worker). - - 2. `input_fn` must return a `Dataset` instance rather than `features`. In - fact, .train() and .evaluate() also support Dataset as return value. - - Example (MNIST): - ---------------- - ``` - height = 32 - width = 32 - total_examples = 100 - - def predict_input_fn(params): - batch_size = params['batch_size'] - - images = tf.random_uniform( - [total_examples, height, width, 3], minval=-1, maxval=1) - - dataset = tf.data.Dataset.from_tensor_slices(images) - dataset = dataset.map(lambda images: {'image': images}) - - dataset = dataset.batch(batch_size) - return dataset - - def model_fn(features, labels, params, mode): - # Generate predictions, called 'output', from features['image'] - - if mode == tf.estimator.ModeKeys.PREDICT: - return tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - predictions={ - 'predictions': output, - 'is_padding': features['is_padding'] - }) - - tpu_est = TPUEstimator( - model_fn=model_fn, - ..., - predict_batch_size=16) - - # Fully consume the generator so that TPUEstimator can shutdown the TPU - # system. - for item in tpu_est.predict(input_fn=input_fn): - # Filter out item if the `is_padding` is 1. - # Process the 'predictions' - ``` - - Exporting - ========= - - `export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`, - and another with `tag_constants.SERVING` and `tag_constants.TPU`. - At serving time, these tags are used to select metagraph to load. - - Before running the graph on TPU, TPU system needs to be initialized. If - TensorFlow Serving model-server is used, this is done automatically. If - not, please call `session.run(tpu.initialize_system())`. - - `tpu.outside_compilation` can be used to wrap TPU incompatible ops in - `model_fn`. - - Example: - ---------------- - - ``` - def model_fn(features, labels, mode, config, params): - ... - logits = ... - export_outputs = { - 'logits': export_output_lib.PredictOutput( - {'logits': logits}) - } - - def host_call(logits): - class_ids = math_ops.argmax(logits) - classes = string_ops.as_string(class_ids) - export_outputs['classes'] = - export_output_lib.ClassificationOutput(classes=classes) - - tpu.outside_compilation(host_call, logits) - - ... - ``` - - """ - - def __init__(self, - model_fn=None, - train_cache_fn=None, - eval_cache_fn=None, - model_dir=None, - config=None, - params=None, - use_tpu=True, - train_batch_size=None, - eval_batch_size=None, - predict_batch_size=None, - batch_axis=None, - eval_on_tpu=True, - export_to_tpu=True, - warm_start_from=None): - """Constructs an `TPUEstimator` instance. - - Args: - model_fn: Model function as required by `Estimator` which returns - EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks', - and `prediction_hooks` must not capure any TPU Tensor inside the - model_fn. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. If `None`, the model_dir in - `config` will be used if set. If both are set, they must be same. If - both are `None`, a temporary directory will be used. - config: An `tpu_config.RunConfig` configuration object. Cannot be `None`. - params: An optional `dict` of hyper parameters that will be passed into - `input_fn` and `model_fn`. Keys are names of parameters, values are - basic python types. There are reserved keys for `TPUEstimator`, - including 'batch_size'. - use_tpu: A bool indicating whether TPU support is enabled. Currently, - - TPU training and evaluation respect this bit, but eval_on_tpu can - override execution of eval. See below. - Predict still happens on CPU. - train_batch_size: An int representing the global training batch size. - TPUEstimator transforms this global batch size to a per-shard batch - size, as params['batch_size'], when calling `input_fn` and `model_fn`. - Cannot be `None` if `use_tpu` is `True`. Must be divisible by total - number of replicas. - eval_batch_size: An int representing evaluation batch size. Must be - divisible by total number of replicas. - predict_batch_size: An int representing the prediction batch size. Must be - divisible by total number of replicas. - batch_axis: A python tuple of int values describing how each tensor - produced by the Estimator `input_fn` should be split across the TPU - compute shards. For example, if your input_fn produced (images, labels) - where the images tensor is in `HWCN` format, your shard dimensions would - be [3, 0], where 3 corresponds to the `N` dimension of your images - Tensor, and 0 corresponds to the dimension along which to split the - labels to match up with the corresponding images. If None is supplied, - and per_host_input_for_training is True, batches will be sharded based - on the major dimension. If tpu_config.per_host_input_for_training is - False or `PER_HOST_V2`, batch_axis is ignored. - eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the - model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`. - export_to_tpu: If True, `export_savedmodel()` exports a metagraph for - serving on TPU besides the one on CPU. - warm_start_from: Optional string filepath to a checkpoint or SavedModel to - warm-start from, or a `tf.estimator.WarmStartSettings` object to fully - configure warm-starting. If the string filepath is provided instead of - a `WarmStartSettings`, then all variables are warm-started, and it is - assumed that vocabularies and Tensor names are unchanged. - - Raises: - ValueError: `params` has reserved keys already. - """ - if config is None or not isinstance(config, tpu_config.RunConfig): - raise ValueError( - '`config` must be provided with type `tpu_config.RunConfig`') - - if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS): - raise ValueError('{} are reserved keys but existed in params {}.'.format( - _RESERVED_PARAMS_KEYS, params)) - - if use_tpu: - # Perform some very basic validations. More validations will be found in - # _InternalTPUContext. - if train_batch_size is None: - raise ValueError('`train_batch_size` cannot be `None`') - util_lib.check_positive_integer(train_batch_size, 'train_batch_size') - - if (config.tpu_config.per_host_input_for_training is - tpu_config.InputPipelineConfig.PER_SHARD_V1 and - config.tpu_config.num_cores_per_replica): - raise ValueError( - 'Model parallelism only supports per host input for training. ' - 'Please adjust TPURunconfig.per_host_input_for_training.') - - if eval_batch_size is not None: - util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size') - - if predict_batch_size is not None: - util_lib.check_positive_integer(predict_batch_size, - 'predict_batch_size') - - # Verifies the model_fn signature according to Estimator framework. - estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access - # We cannot store config and params in this constructor as parent - # constructor might change them, such as assigning a temp dir for - # config.model_dir. - model_function = self._augment_model_fn( - model_fn, - train_cache_fn, - eval_cache_fn, - batch_axis) - - # Overwrite log_step_count_steps to disable TensorLoggingHook and - # StepCounterHook from being created in Estimator. TPUEstimator already - # added equivalent hooks in _augment_model_fn above. - self._log_every_n_steps = config.log_step_count_steps - config = config.replace(log_step_count_steps=None) - - # Passing non-None params as wrapped model_fn has it. - params = params or {} - super(TPUEstimator, self).__init__( - model_fn=model_function, - model_dir=model_dir, - config=config, - params=params, - warm_start_from=warm_start_from) - self._iterations_per_training_loop = ( - self._config.tpu_config.iterations_per_loop) - - # All properties passed to _InternalTPUContext are immutable. - # pylint: disable=protected-access - self._ctx = tpu_context._get_tpu_context( - self._config, train_batch_size, eval_batch_size, predict_batch_size, - use_tpu, eval_on_tpu) - - self._export_to_tpu = export_to_tpu - - self._is_input_fn_invoked = None - self._rendezvous = {} - - def _add_meta_graph_for_mode(self, - builder, - input_receiver_fn_map, - checkpoint_path, - save_variables=True, - mode=model_fn_lib.ModeKeys.PREDICT, - export_tags=None, - check_variables=True): - if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT: - raise NotImplementedError( - 'TPUEstimator only handles mode PREDICT for exporting ' - 'when `export_to_tpu` is `True`; ' - 'got {}.'.format(mode)) - - (super(TPUEstimator, self)._add_meta_graph_for_mode( - builder, - input_receiver_fn_map, - checkpoint_path, - save_variables, - mode=mode, - export_tags=export_tags, - check_variables=check_variables)) - - if self._export_to_tpu: - input_receiver_fn_map = { - _REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode] - } - export_tags = [tag_constants.SERVING, tag_constants.TPU] - mode = _REWRITE_FOR_INFERENCE_MODE - # See b/110052256 for why `check_variables` is `False`. - (super(TPUEstimator, self)._add_meta_graph_for_mode( - builder, - input_receiver_fn_map, - checkpoint_path, - save_variables=False, - mode=mode, - export_tags=export_tags, - check_variables=False)) - - def _call_model_fn(self, features, labels, mode, config): - if mode == _REWRITE_FOR_INFERENCE_MODE: - return self._call_model_fn_for_inference(features, labels, mode, config) - else: - return super(TPUEstimator, self)._call_model_fn(features, labels, mode, - config) - - def _call_model_fn_for_inference(self, features, labels, mode, config): - """Wraps `_call_model_fn` for `export_savedmodel`.""" - if mode != _REWRITE_FOR_INFERENCE_MODE: - raise ValueError('mode must be {}; ' - 'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode)) - - capture = _CapturedObject() - - def computation(): - """Compute tpu tensors used in export_outputs. - - Passed to rewrite_for_inference so that model_fn will be called under - the rewriting contexts. Only tpu tensors are returned, but export_outputs - and scaffold are captured. - - Returns: - A list of Tensors used in export_outputs and not marked for - outside_compilation. - """ - # We should only call model fn once and it should be inside `computation` - # so that building the graph will happen under `rewrite_for_inference`. - mode = model_fn_lib.ModeKeys.PREDICT - estimator_spec = self._call_model_fn(features, labels, mode, config) - - # We pick the TPU tensors out from `export_output` and later return them - # from `computation` for rewriting. - tensors_dict = collections.OrderedDict( - (k, _export_output_to_tensors(v)) - for k, v in six.iteritems(estimator_spec.export_outputs)) - tensors = nest.flatten(tensors_dict) - tpu_tensors = [t for t in tensors if t is not None] - - # We cannot return anything other than `tpu_tensors` here so we capture - # the rest for later use. - capture.capture((estimator_spec, tensors_dict, tensors)) - return tpu_tensors - - tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation) - estimator_spec, tensors_dict, tensors = capture.get() - - # Reconstruct `tensors`, but with `tpu_tensors` replaced with - # `tpu_tensors_on_cpu`. - new_tensors = [] - for t in tensors: - if t is None: - new_tensors.append(None) - else: - new_tensors.append(tpu_tensors_on_cpu.pop(0)) - - # Reconstruct `tensors_dict`. - new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors) - # Reconstruct `export_outputs`. - export_outputs = estimator_spec.export_outputs - new_export_outputs = collections.OrderedDict( - (k, _clone_export_output_with_tensors(export_outputs[k], v)) - for k, v in six.iteritems(new_tensors_dict)) - - return estimator_spec._replace(export_outputs=new_export_outputs) - - def _create_global_step(self, graph): - """Creates a global step suitable for TPUs. - - Args: - graph: The graph in which to create the global step. - - Returns: - A global step `Tensor`. - - Raises: - ValueError: if the global step tensor is already defined. - """ - return _create_global_step(graph) - - def _convert_train_steps_to_hooks(self, steps, max_steps): - with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx: - if ctx.is_running_on_cpu(): - return super(TPUEstimator, self)._convert_train_steps_to_hooks( - steps, max_steps) - - # On TPU. - if steps is None and max_steps is None: - raise ValueError( - 'For TPU training, one of `steps` or `max_steps` must be set. ' - 'Cannot be both `None`.') - - # Estimator.train has explicit positiveness check. - if steps is not None: - util_lib.check_positive_integer(steps, 'Train steps') - if max_steps is not None: - util_lib.check_positive_integer(max_steps, 'Train max_steps') - - return [ - _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps) - ] - - def _convert_eval_steps_to_hooks(self, steps): - with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx: - if ctx.is_running_on_cpu(): - return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps) - - if steps is None: - raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.') - - util_lib.check_positive_integer(steps, 'Eval steps') - - return [ - evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access - num_evals=steps), - _SetEvalIterationsHook(steps) - ] - - def _call_input_fn(self, input_fn, mode): - """Calls the input function. - - Args: - input_fn: The input function. - mode: ModeKeys - - Returns: - In TPU mode, returns an input_fn to be called later in model_fn. - Otherwise, calls the input_fn and returns either fatures or - (features, labels). - - Raises: - ValueError: if input_fn takes invalid arguments or does not have `params`. - """ - input_fn_args = function_utils.fn_args(input_fn) - config = self.config # a deep copy. - kwargs = {} - if 'params' in input_fn_args: - kwargs['params'] = self.params # a deep copy. - else: - raise ValueError('input_fn ({}) does not include params argument, ' - 'required by TPUEstimator to pass batch size as ' - 'params["batch_size"]'.format(input_fn)) - if 'config' in input_fn_args: - kwargs['config'] = config - - if 'mode' in input_fn_args: - kwargs['mode'] = mode - - # Records the fact input_fn has been invoked. - self._is_input_fn_invoked = True - - with self._ctx.with_mode(mode) as ctx: - # Setting the batch size in params first. This helps user to have same - # input_fn for use_tpu=True/False. - batch_size_for_input_fn = ctx.batch_size_for_input_fn - if batch_size_for_input_fn is not None: - _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY, - batch_size_for_input_fn) - - # For export_savedmodel, input_fn is never passed to Estimator. So, - # `is_export_mode` must be False. - if ctx.is_running_on_cpu(is_export_mode=False): - with ops.device('/device:CPU:0'): - return input_fn(**kwargs) - - # For TPU computation, input_fn should be invoked in a tf.while_loop for - # performance. While constructing the tf.while_loop, the structure of - # inputs returned by the `input_fn` needs to be recorded. The structure - # includes whether features or labels is dict or single Tensor, dict keys, - # tensor shapes, and dtypes. The recorded structure is used to create the - # infeed dequeue ops, which must be wrapped and passed as a Fn, called - # inside the TPU computation, as the TPU computation is wrapped inside a - # tf.while_loop also. So, we either pass input_fn to model_fn or pass - # dequeue_fn to model_fn. Here, `input_fn` is passed directly as - # `features` in `model_fn` signature. - def _input_fn(ctx): - _add_item_to_params(kwargs['params'], _CTX_KEY, ctx) - return input_fn(**kwargs) - - return _input_fn - - def _validate_features_in_predict_input(self, result): - """Skip the validation. - - For TPUEstimator, we do not need to check the result type. `_InputPipeline` - has stronger check. Parent class's check generates confusing warning msg. - - Args: - result: `features` returned by input_fn. - """ - pass - - def train(self, - input_fn, - hooks=None, - steps=None, - max_steps=None, - saving_listeners=None): - rendezvous = error_handling.ErrorRendezvous(num_sources=3) - self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous - try: - return super(TPUEstimator, self).train( - input_fn=input_fn, - hooks=hooks, - steps=steps, - max_steps=max_steps, - saving_listeners=saving_listeners) - except Exception: # pylint: disable=broad-except - rendezvous.record_error('training_loop', sys.exc_info()) - finally: - rendezvous.record_done('training_loop') - rendezvous.raise_errors() - - def evaluate(self, - input_fn, - steps=None, - hooks=None, - checkpoint_path=None, - name=None): - rendezvous = error_handling.ErrorRendezvous(num_sources=3) - self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous - try: - return super(TPUEstimator, self).evaluate( - input_fn, - steps=steps, - hooks=hooks, - checkpoint_path=checkpoint_path, - name=name) - except Exception: # pylint: disable=broad-except - rendezvous.record_error('evaluation_loop', sys.exc_info()) - finally: - rendezvous.record_done('evaluation_loop') - rendezvous.raise_errors() - - def predict(self, - input_fn, - predict_keys=None, - hooks=None, - checkpoint_path=None, - yield_single_examples=True): - rendezvous = error_handling.ErrorRendezvous(num_sources=3) - self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous - try: - for result in super(TPUEstimator, self).predict( - input_fn=input_fn, - predict_keys=predict_keys, - hooks=hooks, - checkpoint_path=checkpoint_path, - yield_single_examples=yield_single_examples): - yield result - except Exception: # pylint: disable=broad-except - rendezvous.record_error('prediction_loop', sys.exc_info()) - finally: - rendezvous.record_done('prediction_loop') - rendezvous.raise_errors() - - rendezvous.record_done('prediction_loop') - rendezvous.raise_errors() - - def _augment_model_fn(self, model_fn, train_cache_fn, eval_cache_fn, batch_axis): - """Returns a new model_fn, which wraps the TPU support.""" - - def _model_fn(features, labels, mode, config, params): - """A Estimator `model_fn` for TPUEstimator.""" - with self._ctx.with_mode(mode) as ctx: - model_fn_wrapper = _ModelFnWrapper(model_fn, train_cache_fn, - eval_cache_fn, config, params, ctx) - - # `input_fn` is called in `train()`, `evaluate()`, and `predict()`, - # but not in `export_savedmodel()`. - if self._is_input_fn_invoked: - is_export_mode = False - else: - is_export_mode = True - - # Clear the bit. - self._is_input_fn_invoked = None - - # examples_hook is added to training_hooks for both CPU and TPU - # execution. - if self._log_every_n_steps is not None: - examples_hook = ExamplesPerSecondHook( - ctx.global_batch_size, - output_dir=self.model_dir, - every_n_steps=self._log_every_n_steps) - - if ctx.is_running_on_cpu(is_export_mode=is_export_mode): - logging.info('Running %s on CPU', mode) - estimator_spec = model_fn_wrapper.call_without_tpu( - features, labels, is_export_mode=is_export_mode) - if self._log_every_n_steps is not None: - estimator_spec = estimator_spec._replace( - training_hooks=estimator_spec.training_hooks + (examples_hook,)) - return estimator_spec - - assert labels is None, '`labels` passed to `model_fn` must be `None`.' - # TPUEstimator._call_input_fn passes `input_fn` as features to here. - assert callable(features), '`input_fn` is not callable.' - input_fn = features - - input_holders = _InputPipeline(input_fn, batch_axis, ctx) - enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( - input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) - - graph = ops.get_default_graph() - for enqueue_op in enqueue_ops: - if isinstance(enqueue_op, list): - graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) - else: - graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) - - if mode == model_fn_lib.ModeKeys.TRAIN: - compile_op, loss, host_call, scaffold, training_hooks = ( - _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) - host_ops = host_call.create_tpu_hostcall() - if host_ops is None: - host_ops = [] - - shutdown_hooks = [] - shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE', - 'shutdown_worker') - if shutdown_mode: - if shutdown_mode == 'shutdown_worker': - finalizer_hooks = [ - session_support.ShutdownLameWorkers(timeout_ms=60 * 1000), - ] - elif shutdown_mode == 'shutdown_computation': - finalizer_hooks = [ - session_support.RestartComputation(timeout_ms=60 * 1000), - ] - else: - raise ValueError( - 'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode) - - shutdown_hooks.append( - session_support.GracefulShutdownHook( - checkpoint_prefix=self.model_dir + '/model.ckpt', - on_shutdown_hooks=finalizer_hooks)) - - with ops.control_dependencies([loss]): - global_step = array_ops.identity(training.get_global_step()) - hooks = input_hooks + shutdown_hooks - hooks.extend([ - TPUInfeedOutfeedSessionHook( - ctx, - enqueue_ops, - host_ops, - tpu_compile_op=compile_op, - run_infeed_loop_on_coordinator=( - run_infeed_loop_on_coordinator), - rendezvous=self._rendezvous[mode], - master=self._config.master, - session_config=self._session_config, - ), - InstallSignalHandlerHook() - ]) - if self._log_every_n_steps is not None: - logging_hook_frequency = ( # Divide and round up - (self._log_every_n_steps + - self._config.tpu_config.iterations_per_loop - 1) // - self._config.tpu_config.iterations_per_loop) - hooks.append( - training.LoggingTensorHook({ - 'loss': array_ops.identity(loss), - 'step': global_step, - }, - every_n_iter=logging_hook_frequency)) - examples_hook._set_steps_per_run( # pylint: disable=protected-access - self._config.tpu_config.iterations_per_loop) - hooks.append(examples_hook) - - if training_hooks: - hooks.extend(training_hooks) - - chief_hooks = [] - if (self._config.save_checkpoints_secs or - self._config.save_checkpoints_steps): - checkpoint_hook = training.CheckpointSaverHook( - self.model_dir, - save_secs=self._config.save_checkpoints_secs, - save_steps=self._config.save_checkpoints_steps, - scaffold=scaffold) - checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access - self._config.tpu_config.iterations_per_loop) - chief_hooks.append(checkpoint_hook) - - summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) - with ops.control_dependencies([loss]): - update_ops = _sync_variables_ops(ctx) - - # Validate the TPU training graph to catch basic errors - _validate_tpu_training_graph() - - train_op = control_flow_ops.group(*update_ops) - graph.add_to_collection(_TPU_TRAIN_OP, train_op) - - return model_fn_lib.EstimatorSpec( - mode, - loss=loss, - training_chief_hooks=chief_hooks, - training_hooks=hooks, - train_op=train_op, - scaffold=scaffold) - - if mode == model_fn_lib.ModeKeys.EVAL: - compile_op, total_loss, host_calls, scaffold, eval_hooks = ( - _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) - iterations_per_loop_var = _create_or_get_iterations_per_loop() - mean_loss = math_ops.div( - total_loss, - math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype)) - - with ops.control_dependencies([mean_loss]): - # After TPU evaluation computation is done (the mean_loss tensor), - # reads all variables back from TPU and updates the eval step - # counter properly - internal_ops_to_run = _sync_variables_ops(ctx) - internal_ops_to_run.append( - _increase_eval_step_op(iterations_per_loop_var)) - - host_call_ret = host_calls.create_tpu_hostcall() - eval_metric_ops = {} - eval_update_ops = [] - - eval_metrics = host_call_ret.get('eval_metrics', {}) - if eval_metrics: - # Creates a dummy metric update_op for all metrics. Estimator - # expects all metrics in `eval_metric_ops` have update_op and calls - # them one by one. The real metric update_ops are invoked in a - # separated thread. So, here give Estimator the dummy op for all - # metrics. - with ops.control_dependencies(internal_ops_to_run): - dummy_update_op = control_flow_ops.no_op() - - for k, v in eval_metrics.items(): - eval_metric_ops[k] = (v[0], dummy_update_op) - eval_update_ops.append(v[1]) - else: - # If no eval metrics are passed, create an identity node for the - # loss and add `internal_ops_to_run` to its dependencies. So - # `internal_ops_to_run` can be executed. - with ops.control_dependencies(internal_ops_to_run): - mean_loss = array_ops.identity(mean_loss) - - if 'host_call' not in host_call_ret: - host_ops = [] - else: - host_ops = host_call_ret['host_call'] - hooks = [ - TPUInfeedOutfeedSessionHook( - ctx, - enqueue_ops, - eval_update_ops + host_ops, - tpu_compile_op=compile_op, - run_infeed_loop_on_coordinator=( - run_infeed_loop_on_coordinator), - rendezvous=self._rendezvous[mode], - master=self._config.evaluation_master, - session_config=self._session_config, - )] + input_hooks - - if eval_hooks: - hooks.extend(eval_hooks) - - return model_fn_lib.EstimatorSpec( - mode, - loss=mean_loss, - evaluation_hooks=hooks, - eval_metric_ops=eval_metric_ops, - scaffold=scaffold) - - # Predict - assert mode == model_fn_lib.ModeKeys.PREDICT - - (compile_op, dummy_predict_op, host_calls, - scaffold, prediction_hooks) = _predict_on_tpu_system( - ctx, model_fn_wrapper, dequeue_fn) - with ops.control_dependencies([dummy_predict_op]): - internal_ops_to_run = _sync_variables_ops(ctx) - with ops.control_dependencies(internal_ops_to_run): - dummy_predict_op = control_flow_ops.no_op() - - # In train and evaluation, the main TPU program is passed to monitored - # training session to run. Infeed enqueue and outfeed dequeue are - # executed in side threads. This is not the configuration for - # prediction mode. - # - # For prediction, the Estimator executes the EstimatorSpec.predictions - # directly and yield the element (via generator) to call site. So, the - # outfeed based prediction must be passed to MonitoredSession directly. - # Other parts of the TPU execution are organized as follows. - # - # 1. All outfeed based Tensors must be grouped with predictions Tensors - # to form a single invocation. This avoid the issue we might trigger - # multiple outfeeds incorrectly. To achieve this, `host_call` is - # placed in control_dependencies of `stopping_signals`, and - # `stopping_signals` is passed into _StoppingPredictHook, which sets - # the `stopping_signals` as SessionRunArgs. MonitoredSession merges - # all SessionRunArgs with the fetch in session.run together. - # - # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) - # are grouped together. They will be launched once and only once in - # side threads and they quit naturally according to the SAME stopping - # condition. - enqueue_ops.append(dummy_predict_op) - - host_call_ret = host_calls.create_tpu_hostcall() - if 'host_call' not in host_call_ret: - host_ops = [] - else: - host_ops = host_call_ret['host_call'] - - predictions = host_call_ret['predictions'] - _verify_cross_hosts_transfer_size( - predictions, - message=( - 'The estimated size for TPUEstimatorSpec.predictions is too ' - 'large.')) - signals = host_call_ret['signals'] - - with ops.control_dependencies(host_ops): - host_ops = [] # Empty, we do do not need it anymore. - scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( - signals) - predictions = _PaddingSignals.slice_tensor_or_dict( - predictions, signals) - - hooks = [ - _StoppingPredictHook(scalar_stopping_signal), - TPUInfeedOutfeedSessionHookForPrediction( - ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode], - tpu_compile_op=compile_op, - master=self._config.master, - session_config=self._session_config), - ] + input_hooks - - if prediction_hooks: - hooks.extend(prediction_hooks) - - return model_fn_lib.EstimatorSpec( - mode, - prediction_hooks=hooks, - predictions=predictions, - scaffold=scaffold) - - return _model_fn - - -def _export_output_to_tensors(export_output): - """Get a list of `Tensors` used in `export_output`. - - Args: - export_output: an `ExportOutput` object such as `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - - Returns: - a list of tensors used in export_output. - - Raises: - ValueError: if `export_output` is not one of `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - """ - if isinstance(export_output, export_output_lib.ClassificationOutput): - return [export_output.scores, export_output.classes] - elif isinstance(export_output, export_output_lib.RegressionOutput): - return [export_output.value] - elif isinstance(export_output, export_output_lib.PredictOutput): - return list(export_output.outputs.values()) - else: - raise ValueError( - '`export_output` must be have type `ClassificationOutput`, ' - '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) - - -def _clone_export_output_with_tensors(export_output, tensors): - """Clones `export_output` but with new `tensors`. - - Args: - export_output: an `ExportOutput` object such as `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - tensors: a list of `Tensors` used to construct a new `export_output`. - - Returns: - A dict similar to `export_output` but with `tensors`. - - Raises: - ValueError: if `export_output` is not one of `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - """ - if isinstance(export_output, export_output_lib.ClassificationOutput): - if len(tensors) != 2: - raise ValueError('tensors must be of length 2; ' - 'got {}.'.format(len(tensors))) - return export_output_lib.ClassificationOutput(*tensors) - elif isinstance(export_output, export_output_lib.RegressionOutput): - if len(tensors) != 1: - raise ValueError('tensors must be of length 1; ' - 'got {}'.format(len(tensors))) - return export_output_lib.RegressionOutput(*tensors) - elif isinstance(export_output, export_output_lib.PredictOutput): - return export_output_lib.PredictOutput( - dict(zip(export_output.outputs.keys(), tensors))) - else: - raise ValueError( - '`export_output` must be have type `ClassificationOutput`, ' - '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) - - -def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): - """Executes `model_fn_wrapper` multiple times on all TPU shards.""" - iterations_per_loop_var = _create_or_get_iterations_per_loop() - - (single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks - ) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn) - - def multi_tpu_eval_steps_on_single_shard(): - loop_vars = [_ZERO_LOSS] - if model_fn_wrapper._eval_cache_fn is not None: - batch_size = ctx.global_batch_size - num_shards = ctx._config._tpu_config.num_shards - loop_vars += model_fn_wrapper._eval_cache_fn(batch_size // num_shards) - - return training_loop.repeat( - iterations_per_loop_var, - single_tpu_eval_step, - loop_vars) - - compile_op, ret = tpu.split_compile_and_shard( - multi_tpu_eval_steps_on_single_shard, - inputs=[], - num_shards=ctx.num_replicas, - outputs_from_all_shards=False, - device_assignment=ctx.device_assignment) - - loss = ret[0] - scaffold = _get_scaffold(captured_scaffold_fn) - return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get() - - -def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): - """Executes `model_fn_wrapper` multiple times on all TPU shards.""" - iterations_per_loop_var = _create_or_get_iterations_per_loop() - - (single_tpu_train_step, host_call, captured_scaffold_fn, - captured_training_hooks) = ( - model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) - - def multi_tpu_train_steps_on_single_shard(): - loop_vars = [_INITIAL_LOSS] - if model_fn_wrapper._train_cache_fn is not None: - batch_size = ctx.global_batch_size - num_shards = ctx._config._tpu_config.num_shards - loop_vars += model_fn_wrapper._train_cache_fn(batch_size // num_shards) - - return training_loop.repeat( - iterations_per_loop_var, - single_tpu_train_step, - loop_vars) - - compile_op, ret = tpu.split_compile_and_shard( - multi_tpu_train_steps_on_single_shard, - inputs=[], - num_shards=ctx.num_replicas, - outputs_from_all_shards=False, - device_assignment=ctx.device_assignment) - - loss = ret[0] - scaffold = _get_scaffold(captured_scaffold_fn) - return compile_op, loss, host_call, scaffold, captured_training_hooks.get() - - -def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): - """Executes `model_fn_wrapper` multiple times on all TPU shards.""" - (single_tpu_predict_step, host_calls, captured_scaffold_fn, - captured_predict_hooks - ) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn) - - def multi_tpu_predict_steps_on_single_shard(): - - def cond(scalar_stopping_signal): - return math_ops.logical_not( - _StopSignals.should_stop(scalar_stopping_signal)) - - inputs = [_StopSignals.NON_STOPPING_SIGNAL] - outputs = training_loop.while_loop( - cond, single_tpu_predict_step, inputs=inputs, name=b'loop') - return outputs - - (compile_op, dummy_predict_op,) = tpu.split_compile_and_shard( - multi_tpu_predict_steps_on_single_shard, - inputs=[], - num_shards=ctx.num_replicas, - outputs_from_all_shards=False, - device_assignment=ctx.device_assignment) - - dummy_predict_op = dummy_predict_op[0] - scaffold = _get_scaffold(captured_scaffold_fn) - return (compile_op, dummy_predict_op, host_calls, scaffold, - captured_predict_hooks.get()) - - -def _wrap_computation_in_while_loop(device, op_fn): - """Wraps the ops generated by `op_fn` in tf.while_loop.""" - - def computation(i): - with ops.control_dependencies(op_fn()): - return i + 1 - - iterations_per_loop_var = _create_or_get_iterations_per_loop() - # By setting parallel_iterations=1, the parallel execution in while_loop is - # basically turned off. - with ops.device(device): - iterations = array_ops.identity(iterations_per_loop_var) - return control_flow_ops.while_loop( - lambda i: i < iterations, - computation, [constant_op.constant(0)], - parallel_iterations=1) - - -def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): - """Wraps the ops generated by `op_fn` in tf.while_loop.""" - - def cond(scalar_stopping_signal): - return math_ops.logical_not( - _StopSignals.should_stop(scalar_stopping_signal)) - - def computation(unused_scalar_stopping_signal): - return_value = op_fn() - execute_ops = return_value['ops'] - signals = return_value['signals'] - with ops.control_dependencies(execute_ops): - return _StopSignals.as_scalar_stopping_signal(signals) - - # By setting parallel_iterations=1, the parallel execution in while_loop is - # basically turned off. - with ops.device(device): - return control_flow_ops.while_loop( - cond, - computation, [_StopSignals.NON_STOPPING_SIGNAL], - parallel_iterations=1) - - -def _validate_tpu_training_graph(): - """Validate graph before running distributed training. - - Raises: - ValueError: If the graph seems invalid for running on device - """ - operations = ops.get_default_graph().get_operations() - - # Check if there is atleast one CrossReplicaSum operation in the graph - # This should be introduced by using the CrossShardOptimizer wrapper - cross_replica_sum_ops = [ - o for o in operations if o.type == _CROSS_REPLICA_SUM_OP - ] - if not cross_replica_sum_ops: - raise ValueError( - 'CrossShardOptimizer must be used for model training on TPUs.') - - -class _CapturedObject(object): - """A placeholder to capture an object. - - This is useful when we need to capture a Python object in the Tensorflow - control flow body function and use it outside the control flow. - """ - - def __init__(self): - self._object = None - self._captured = False - - def capture(self, o): - if self._captured: - raise RuntimeError( - 'InternalError: Object can capture only once. Please file bug.') - - self._captured = True - self._object = o - - def get(self): - if not self._captured: - raise RuntimeError( - 'InternalError: Object is not captured properly before `get`. ' - 'Please file bug.') - return self._object - - -def _get_scaffold(captured_scaffold_fn): - """Retrieves the Scaffold from `captured_scaffold_fn`.""" - with _CapturingContext(message='Inside scaffold_fn'): - scaffold_fn = captured_scaffold_fn.get() - if scaffold_fn: - scaffold = scaffold_fn() - if scaffold is None: - raise ValueError( - 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') - else: - scaffold = None - - if scaffold: - wrapped_finalize = scaffold.finalize - - def _finalize(): - with _CapturingContext('Inside Scaffold.finalize'): - wrapped_finalize() - - scaffold.finalize = _finalize - return scaffold - - -class _CapturingContext(control_flow_ops.ControlFlowContext): - """Tracks references to Tensors defined in TPU replication.""" - - def __init__(self, message): - control_flow_ops.ControlFlowContext.__init__(self) - self._message = message - - def to_control_flow_context_def(self, context_def, export_scope=None): - # pylint: disable=useless-super-delegation - # NOTE(slebedev): the method is required by `ControlFlowContext`. - super(_CapturingContext, self).to_control_flow_context_def( - context_def, export_scope) - - def AddOp(self, op): # pylint: disable=invalid-name - for c in op.inputs: - if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access - raise ValueError('{}: Op {} depends on TPU computation {}, ' - 'which is not allowed.'.format(self._message, op, c)) - - def __enter__(self): - # pylint: disable=protected-access - self._g = ops.get_default_graph() - self._old = self._g._get_control_flow_context() - self._g._set_control_flow_context(self) - # pylint: enable=protected-access - - def __exit__(self, _, __, ___): # pylint: disable=invalid-name - self._g._set_control_flow_context(self._old) # pylint: disable=protected-access - - -class _Inputs(object): - """A data structure representing the input_fn returned values. - - This also supports the returned value from input_fn as `Dataset`. - """ - - def __init__(self, features=None, labels=None, dataset=None, signals=None): - if dataset is not None and (features is not None or labels is not None or - signals is not None): - raise RuntimeError('Internal Error: Either (features and labels) or ' - 'dataset should be provided, not both. Please file ' - 'bug') - - self._features = features - self._labels = labels - self._signals = signals - - self._dataset = dataset - self._iterator = None - - @staticmethod - def from_input_fn(return_values): - """Returns an `_Inputs` instance according to `input_fn` return value.""" - if isinstance(return_values, dataset_ops.DatasetV2): - dataset = return_values - return _Inputs(dataset=dataset) - - features, labels = _Inputs._parse_inputs(return_values) - return _Inputs(features, labels) - - @staticmethod - def _parse_inputs(return_values): - if isinstance(return_values, tuple): - features, labels = return_values - else: - features, labels = return_values, None - return features, labels - - @property - def is_dataset(self): - """Returns True if the return value from input_fn is Dataset.""" - return self._dataset is not None - - def dataset_initializer(self): - """Returns the dataset's initializer. - - The initializer must be run before calling `features_and_labels`. - """ - self._iterator = dataset_ops.make_initializable_iterator(self._dataset) - return self._iterator.initializer - - def features_and_labels(self): - """Gets `features` and `labels`.""" - if self.is_dataset: - if self._iterator is None: - raise RuntimeError('Internal error: Must run dataset_initializer ' - 'before calling features_and_labels(). Please file ' - 'a bug!') - return _Inputs._parse_inputs(self._iterator.get_next()) - - return (self._features, self._labels) - - def signals(self): - return self._signals - - @property - def dataset(self): - return self._dataset - - -class _InputsWithStoppingSignals(_Inputs): - """Inputs with `_StopSignals` inserted into the dataset.""" - - def __init__(self, - dataset, - batch_size, - add_padding=False, - num_invocations_per_step=1): - - assert dataset is not None - user_provided_dataset = dataset.map( - _InputsWithStoppingSignals.insert_stopping_signal( - stop=False, batch_size=batch_size, add_padding=add_padding)) - if num_invocations_per_step == 1: - final_batch_dataset = dataset.take(1).map( - _InputsWithStoppingSignals.insert_stopping_signal( - stop=True, batch_size=batch_size, add_padding=add_padding)) - else: - # We append (2 * num_invocations_per_step - 1) batches for exhausting the - # user_provided_dataset and stop properly. - # For example, if num_invocations_per_step is 2, we append 3 additional - # padding batches: b1, b2, b3. - # If user_provided_dataset contains two batches: a1, a2 - # Step 1: [a1, a2] - # Step 2: [b1, b2] -> STOP - # If user_provided_dataset contains three batches: a1, a2, a3. - # The training loops: - # Step 1: [a1, a2] - # Step 2: [a3, b1] - # Step 3: [b2, b3] -> STOP. - final_batch_dataset = dataset.take(1).map( - _InputsWithStoppingSignals.insert_stopping_signal( - stop=True, batch_size=batch_size, add_padding=add_padding)) - final_batch_dataset = final_batch_dataset.repeat( - 2 * num_invocations_per_step - 1) - - def _set_mask(data_dict): - signals = data_dict['signals'] - signals['padding_mask'] = array_ops.ones_like(signals['padding_mask']) - data_dict['signals'] = signals - return data_dict - - # Mask out the extra batch. - final_batch_dataset = final_batch_dataset.map(_set_mask) - - dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2) - - super(_InputsWithStoppingSignals, self).__init__(dataset=dataset) - self._current_inputs = None - - def features_and_labels(self): - if self._current_inputs is not None: - raise RuntimeError( - 'Internal Error: The previous inputs have not been properly ' - 'consumed. First call features_and_labels, then call signals.') - - inputs_with_signals = self._iterator.get_next() - features = inputs_with_signals['features'] - labels = inputs_with_signals.get('labels') - - self._current_inputs = inputs_with_signals - return features, labels - - def signals(self): - """Returns the `Signals` from `_Inputs`.""" - if self._current_inputs is None: - raise RuntimeError( - 'Internal Error: The current inputs have not been properly ' - 'generated. First call features_and_labels, then call signals.') - signals = self._current_inputs['signals'] - self._current_inputs = None - return signals - - @staticmethod - def insert_stopping_signal(stop, batch_size, add_padding=False): - """Inserts stopping_signal into dataset via _map_fn. - - Here we change the data structure in the dataset, such that the return value - is a dictionary now and `features`, `labels`, and `signals` are three - distinguished keys in that dict. This provides a better structure, which - eases the process to decompose the inputs (see `features_and_labels`). - - Args: - stop: bool, state of current stopping signals. - batch_size: int, batch size. - add_padding: bool, whether to pad the tensor to full batch size. - - Returns: - A map_fn passed to dataset.map API. - """ - - def _map_fn(*args): - """The map fn to insert signals.""" - if len(args) == 1: - # Unpack the single Tensor/dict argument as features. This is required - # for the input_fn returns no labels. - args = args[0] - features, labels = _Inputs._parse_inputs(args) - new_input_dict = {} - - if add_padding: - padding_mask, features, labels = ( - _PaddingSignals.pad_features_and_labels(features, labels, - batch_size)) - - new_input_dict['features'] = features - if labels is not None: - new_input_dict['labels'] = labels - - else: - new_input_dict['features'] = features - if labels is not None: - new_input_dict['labels'] = labels - padding_mask = None - - new_input_dict['signals'] = _StopSignals( - stop=stop, batch_size=batch_size, - padding_mask=padding_mask).as_dict() - - return new_input_dict - - return _map_fn - - -class _StopSignals(object): - """Signals class holding all logic to handle TPU stopping condition.""" - - NON_STOPPING_SIGNAL = False - STOPPING_SIGNAL = True - - def __init__(self, stop, batch_size, padding_mask=None): - self._stop = stop - self._batch_size = batch_size - self._padding_mask = padding_mask - - def as_dict(self): - """Returns the signals as Python dict.""" - shape = [self._batch_size, 1] - dtype = dtypes.bool - - if self._stop: - stopping = array_ops.ones(shape=shape, dtype=dtype) - else: - stopping = array_ops.zeros(shape=shape, dtype=dtype) - - signals = {'stopping': stopping} - if self._padding_mask is not None: - signals['padding_mask'] = self._padding_mask - return signals - - @staticmethod - def as_scalar_stopping_signal(signals): - return array_ops.identity(signals['stopping'][0][0]) - - @staticmethod - def should_stop(scalar_stopping_signal): - """Detects whether scalar_stopping_signal indicates stopping.""" - if isinstance(scalar_stopping_signal, ops.Tensor): - # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF - # way to express the bool check whether scalar_stopping_signal is True. - return math_ops.logical_and(scalar_stopping_signal, - _StopSignals.STOPPING_SIGNAL) - else: - # For non Tensor case, it is used in SessionRunHook. So, we cannot modify - # the graph anymore. Here, we use pure Python. - return bool(scalar_stopping_signal) - - -class _PaddingSignals(object): - """Signals class holding all logic to handle padding.""" - - @staticmethod - def pad_features_and_labels(features, labels, batch_size): - """Pads out the batch dimension of features and labels.""" - real_batch_size = array_ops.shape( - _PaddingSignals._find_any_tensor(features))[0] - - batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) - - check_greater = check_ops.assert_greater_equal( - batch_size_tensor, - real_batch_size, - data=(batch_size_tensor, real_batch_size), - message='The real batch size should not be greater than batch_size.') - - with ops.control_dependencies([check_greater]): - missing_count = batch_size_tensor - real_batch_size - - def pad_single_tensor(tensor): - """Pads out the batch dimension of a tensor to the complete batch_size.""" - rank = len(tensor.shape) - assert rank > 0 - padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) - padded_shape = (batch_size,) + tuple(tensor.shape[1:]) - padded_tensor = array_ops.pad(tensor, padding) - padded_tensor.set_shape(padded_shape) - return padded_tensor - - def nest_pad(tensor_or_dict): - return nest.map_structure(pad_single_tensor, tensor_or_dict) - - features = nest_pad(features) - if labels is not None: - labels = nest_pad(labels) - - padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count, - batch_size) - - return padding_mask, features, labels - - @staticmethod - def slice_tensor_or_dict(tensor_or_dict, signals): - """Slice the real Tensors according to padding mask in signals.""" - - padding_mask = signals['padding_mask'] - batch_size = array_ops.shape(padding_mask)[0] - - def verify_batch_size(tensor): - check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) - with ops.control_dependencies([check_batch_size]): - return array_ops.identity(tensor) - - def slice_single_tensor(tensor): - rank = len(tensor.shape) - assert rank > 0 - real_batch_size = batch_size - math_ops.reduce_sum(padding_mask) - return verify_batch_size(tensor)[0:real_batch_size] - - # As we split the Tensors to all TPU cores and concat them back, it is - # important to ensure the real data is placed before padded ones, i.e., - # order is preserved. By that, the sliced padding mask should have all 0's. - # If this assertion failed, # the slice logic here would not hold. - sliced_padding_mask = slice_single_tensor(padding_mask) - assert_padding_mask = math_ops.equal( - math_ops.reduce_sum(sliced_padding_mask), 0) - - with ops.control_dependencies([assert_padding_mask]): - should_stop = _StopSignals.should_stop( - _StopSignals.as_scalar_stopping_signal(signals)) - - is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0) - - def slice_fn(tensor): - # If the current batch is full batch or part of stopping signals, we do - # not need to slice to save performance. - return control_flow_ops.cond( - math_ops.logical_or(should_stop, is_full_batch), - (lambda: verify_batch_size(tensor)), - (lambda: slice_single_tensor(tensor))) - - return nest.map_structure(slice_fn, tensor_or_dict) - - @staticmethod - def _find_any_tensor(batch_features): - tensors = [ - x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor) - ] - if not tensors: - raise ValueError('Cannot find any Tensor in features dict.') - return tensors[0] - - @staticmethod - def _padding_mask(real_batch_size, missing_count, batch_size): - padding_mask = array_ops.concat([ - array_ops.zeros((real_batch_size,), dtype=dtypes.int32), - array_ops.ones((missing_count,), dtype=dtypes.int32) - ], - axis=0) - padding_mask.set_shape((batch_size,)) - return padding_mask - - -def _verify_cross_hosts_transfer_size(tensor_dict, message): - total_size = 0 - tensor_structure = {} - for key, tensor in tensor_dict.items(): - shape = tensor.shape - size = np.product(shape) * tensor.dtype.size - tensor_structure[key] = shape - total_size += size - if total_size >= _ONE_GIGABYTE: - raise ValueError( - '{} The transfer size is larger than the protobuf limit. Please ' - 'consider to use Tensors with smaller shapes or reduce batch ' - 'size. Given:\n' - '{}'.format( - message, '\n'.join([ - ' -- Key: {}, Shape: {}'.format(k, v) - for k, v in tensor_structure.items() - ]))) - - -def _add_item_to_params(params, key, value): - """Adds a new item into `params`.""" - if isinstance(params, hparam.HParams): - # For HParams, we need to use special API. - if key in params: - params.set_hparam(key, value) - else: - params.add_hparam(key, value) - else: - # Now params is Python dict. - params[key] = value - - -def export_estimator_savedmodel(estimator, - export_dir_base, - serving_input_receiver_fn, - assets_extra=None, - as_text=False, - checkpoint_path=None, - strip_default_attrs=False): - """Export `Estimator` trained model for TPU inference. - - Args: - estimator: `Estimator` with which model has been trained. - export_dir_base: A string containing a directory in which to create - timestamped subdirectories containing exported SavedModels. - serving_input_receiver_fn: A function that takes no argument and returns a - `ServingInputReceiver` or `TensorServingInputReceiver`. - assets_extra: A dict specifying how to populate the assets.extra directory - within the exported SavedModel, or `None` if no extra assets are needed. - as_text: whether to write the SavedModel proto in text format. - checkpoint_path: The checkpoint path to export. If `None` (the default), - the most recent checkpoint found within the model directory is chosen. - strip_default_attrs: Boolean. If `True`, default-valued attributes will be - removed from the NodeDefs. - - Returns: - The string path to the exported directory. - """ - # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use - # `estimator.config`. - config = tpu_config.RunConfig(model_dir=estimator.model_dir) - est = TPUEstimator( - estimator._model_fn, # pylint: disable=protected-access - config=config, - params=estimator.params, - use_tpu=True, - train_batch_size=2048, # Does not matter. - eval_batch_size=2048, # Does not matter. - ) - return est.export_savedmodel(export_dir_base, serving_input_receiver_fn, - assets_extra, as_text, checkpoint_path, - strip_default_attrs) diff --git a/build/lib/caire-covid/mrqa/xlnet.py b/build/lib/caire-covid/mrqa/xlnet.py deleted file mode 100644 index 6d410cd..0000000 --- a/build/lib/caire-covid/mrqa/xlnet.py +++ /dev/null @@ -1,292 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json -import os -import tensorflow as tf -import mrqa.modeling - - -def _get_initializer(FLAGS): - """Get variable intializer.""" - if FLAGS.init == "uniform": - initializer = tf.initializers.random_uniform( - minval=-FLAGS.init_range, - maxval=FLAGS.init_range, - seed=None) - elif FLAGS.init == "normal": - initializer = tf.initializers.random_normal( - stddev=FLAGS.init_std, - seed=None) - else: - raise ValueError("Initializer {} not supported".format(FLAGS.init)) - return initializer - - -class XLNetConfig(object): - """XLNetConfig contains hyperparameters that are specific to a model checkpoint; - i.e., these hyperparameters should be the same between - pretraining and finetuning. - - The following hyperparameters are defined: - n_layer: int, the number of layers. - d_model: int, the hidden size. - n_head: int, the number of attention heads. - d_head: int, the dimension size of each attention head. - d_inner: int, the hidden size in feed-forward layers. - ff_activation: str, "relu" or "gelu". - untie_r: bool, whether to untie the biases in attention. - n_token: int, the vocab size. - """ - - def __init__(self, FLAGS=None, json_path=None): - """Constructing an XLNetConfig. - One of FLAGS or json_path should be provided.""" - - assert FLAGS is not None or json_path is not None - - self.keys = ["n_layer", "d_model", "n_head", "d_head", "d_inner", - "ff_activation", "untie_r", "n_token"] - - if FLAGS is not None: - self.init_from_flags(FLAGS) - - if json_path is not None: - self.init_from_json(json_path) - - def init_from_flags(self, FLAGS): - for key in self.keys: - setattr(self, key, getattr(FLAGS, key)) - - def init_from_json(self, json_path): - with tf.gfile.Open(json_path) as f: - json_data = json.load(f) - for key in self.keys: - setattr(self, key, json_data[key]) - - def to_json(self, json_path): - """Save XLNetConfig to a json file.""" - json_data = {} - for key in self.keys: - json_data[key] = getattr(self, key) - - json_dir = os.path.dirname(json_path) - if not tf.gfile.Exists(json_dir): - tf.gfile.MakeDirs(json_dir) - with tf.gfile.Open(json_path, "w") as f: - json.dump(json_data, f, indent=4, sort_keys=True) - - -def create_run_config(is_training, is_finetune, FLAGS): - kwargs = dict( - is_training=is_training, - use_tpu=FLAGS.use_tpu, - use_bfloat16=FLAGS.use_bfloat16, - dropout=FLAGS.dropout, - dropatt=FLAGS.dropatt, - init=FLAGS.init, - init_range=FLAGS.init_range, - init_std=FLAGS.init_std, - clamp_len=FLAGS.clamp_len) - - if not is_finetune: - kwargs.update(dict( - mem_len=FLAGS.mem_len, - reuse_len=FLAGS.reuse_len, - bi_data=FLAGS.bi_data, - clamp_len=FLAGS.clamp_len, - same_length=FLAGS.same_length)) - - return RunConfig(**kwargs) - - -class RunConfig(object): - """RunConfig contains hyperparameters that could be different - between pretraining and finetuning. - These hyperparameters can also be changed from run to run. - We store them separately from XLNetConfig for flexibility. - """ - - def __init__(self, is_training, use_tpu, use_bfloat16, dropout, dropatt, - init="normal", init_range=0.1, init_std=0.02, mem_len=None, - reuse_len=None, bi_data=False, clamp_len=-1, same_length=False): - """ - Args: - is_training: bool, whether in training mode. - use_tpu: bool, whether TPUs are used. - use_bfloat16: bool, use bfloat16 instead of float32. - dropout: float, dropout rate. - dropatt: float, dropout rate on attention probabilities. - init: str, the initialization scheme, either "normal" or "uniform". - init_range: float, initialize the parameters with a uniform distribution - in [-init_range, init_range]. Only effective when init="uniform". - init_std: float, initialize the parameters with a normal distribution - with mean 0 and stddev init_std. Only effective when init="normal". - mem_len: int, the number of tokens to cache. - reuse_len: int, the number of tokens in the currect batch to be cached - and reused in the future. - bi_data: bool, whether to use bidirectional input pipeline. - Usually set to True during pretraining and False during finetuning. - clamp_len: int, clamp all relative distances larger than clamp_len. - -1 means no clamping. - same_length: bool, whether to use the same attention length for each token. - """ - - self.init = init - self.init_range = init_range - self.init_std = init_std - self.is_training = is_training - self.dropout = dropout - self.dropatt = dropatt - self.use_tpu = use_tpu - self.use_bfloat16 = use_bfloat16 - self.mem_len = mem_len - self.reuse_len = reuse_len - self.bi_data = bi_data - self.clamp_len = clamp_len - self.same_length = same_length - - -class XLNetModel(object): - """A wrapper of the XLNet model used during both pretraining and finetuning.""" - - def __init__(self, xlnet_config, run_config, input_ids, seg_ids, input_mask, - mems=None, perm_mask=None, target_mapping=None, inp_q=None, - **kwargs): - """ - Args: - xlnet_config: XLNetConfig, - run_config: RunConfig, - input_ids: int32 Tensor in shape [len, bsz], the input token IDs. - seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs. - input_mask: float32 Tensor in shape [len, bsz], the input mask. - 0 for real tokens and 1 for padding. - mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory - from previous batches. The length of the list equals n_layer. - If None, no memory is used. - perm_mask: float32 Tensor in shape [len, len, bsz]. - If perm_mask[i, j, k] = 0, i attend to j in batch k; - if perm_mask[i, j, k] = 1, i does not attend to j in batch k. - If None, each position attends to all the others. - target_mapping: float32 Tensor in shape [num_predict, len, bsz]. - If target_mapping[i, j, k] = 1, the i-th predict in batch k is - on the j-th token. - Only used during pretraining for partial prediction. - Set to None during finetuning. - inp_q: float32 Tensor in shape [len, bsz]. - 1 for tokens with losses and 0 for tokens without losses. - Only used during pretraining for two-stream attention. - Set to None during finetuning. - """ - - initializer = _get_initializer(run_config) - - tfm_args = dict( - n_token=xlnet_config.n_token, - initializer=initializer, - attn_type="bi", - n_layer=xlnet_config.n_layer, - d_model=xlnet_config.d_model, - n_head=xlnet_config.n_head, - d_head=xlnet_config.d_head, - d_inner=xlnet_config.d_inner, - ff_activation=xlnet_config.ff_activation, - untie_r=xlnet_config.untie_r, - - is_training=run_config.is_training, - use_bfloat16=run_config.use_bfloat16, - use_tpu=run_config.use_tpu, - dropout=run_config.dropout, - dropatt=run_config.dropatt, - - mem_len=run_config.mem_len, - reuse_len=run_config.reuse_len, - bi_data=run_config.bi_data, - clamp_len=run_config.clamp_len, - same_length=run_config.same_length - ) - - input_args = dict( - inp_k=input_ids, - seg_id=seg_ids, - input_mask=input_mask, - mems=mems, - perm_mask=perm_mask, - target_mapping=target_mapping, - inp_q=inp_q) - tfm_args.update(input_args) - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - (self.output, self.new_mems, self.lookup_table - ) = modeling.transformer_xl(**tfm_args) - - self.input_mask = input_mask - self.initializer = initializer - self.xlnet_config = xlnet_config - self.run_config = run_config - - def get_pooled_out(self, summary_type, use_summ_proj=True): - """ - Args: - summary_type: str, "last", "first", "mean", or "attn". The method - to pool the input to get a vector representation. - use_summ_proj: bool, whether to use a linear projection during pooling. - - Returns: - float32 Tensor in shape [bsz, d_model], the pooled representation. - """ - - xlnet_config = self.xlnet_config - run_config = self.run_config - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - summary = modeling.summarize_sequence( - summary_type=summary_type, - hidden=self.output, - d_model=xlnet_config.d_model, - n_head=xlnet_config.n_head, - d_head=xlnet_config.d_head, - dropout=run_config.dropout, - dropatt=run_config.dropatt, - is_training=run_config.is_training, - input_mask=self.input_mask, - initializer=self.initializer, - use_proj=use_summ_proj) - - return summary - - def get_sequence_output(self): - """ - Returns: - float32 Tensor in shape [len, bsz, d_model]. The last layer hidden - representation of XLNet. - """ - - return self.output - - def get_new_memory(self): - """ - Returns: - list of float32 Tensors in shape [mem_len, bsz, d_model], the new - memory that concatenates the previous memory with the current input - representations. - The length of the list equals n_layer. - """ - return self.new_mems - - def get_embedding_table(self): - """ - Returns: - float32 Tensor in shape [n_token, d_model]. The embedding lookup table. - Used for tying embeddings between input and output layers. - """ - return self.lookup_table - - def get_initializer(self): - """ - Returns: - A tf initializer. Used to initialize variables in layers on top of XLNet. - """ - return self.initializer - diff --git a/build/lib/caire-covid/qa.py b/build/lib/caire-covid/qa.py deleted file mode 100644 index f95838f..0000000 --- a/build/lib/caire-covid/qa.py +++ /dev/null @@ -1,252 +0,0 @@ -import os -import sys -from collections import namedtuple -import tensorflow as tf -from nltk.tokenize import sent_tokenize - -from mrqa.predictor_kaggle import mrqa_predictor -from biobert.predictor_biobert import biobert_predictor - - -class QaModule(): - def __init__(self, model_name): - # init QA models - self.model_name = model_name - self.predict_fn = self.getPredictors() - - def readIR(self, data): - synthetic = [] - - idx = 0 - for data_item in data: - question = data_item["question"] - answer = data_item["data"]["answer"] - contexts = data_item["data"]["context"] - dois = data_item["data"]["doi"] - titles = data_item["data"]["titles"] - - for (context, doi, title) in zip(contexts, dois, titles): - data_sample = { - "context": context, - "qas": [] - } - - qas_item = { - "id": idx, - "question": question, - "answer": answer, - "doi": doi, - "title": title, - } - data_sample["qas"].append(qas_item) - synthetic.append(data_sample) - - idx += 1 - return synthetic - - def mrqaPredictor(self, data): - return mrqa_predictor(self.mrqaFLAGS, self.mrqa_predict_fn, data) - - def biobertPredictor(self, data): - return biobert_predictor(self.bioFLAGS, self.bio_predict_fn, data) - - def getPredictors(self): - if "mrqa" in self.model_name: - self.mrqa_predict_fn = self.getPredictor("mrqa") - if "biobert" in self.model_name: - self.bio_predict_fn = self.getPredictor("biobert") - - def getPredictor(self, model_name): - if model_name == 'mrqa': - d = { - "uncased": False, - "start_n_top": 5, - "end_n_top": 5, - "use_tpu": False, - "train_batch_size": 1, - "predict_batch_size": 1, - "shuffle_buffer": 2048, - "spiece_model_file": "./mrqa/model/spiece.model", - "max_seq_length": 512, - "doc_stride": 128, - "max_query_length": 64, - "n_best_size": 5, - "max_answer_length": 64, - } - self.mrqaFLAGS = namedtuple("FLAGS", d.keys())(*d.values()) - return tf.contrib.predictor.from_saved_model("/kaggle/input/pretrained-qa-models/mrqa/1564469515") - elif model_name == 'biobert': - d = { - "version_2_with_negative": False, - "null_score_diff_threshold": 0.0, - "verbose_logging": False, - "init_checkpoint": None, - "do_lower_case": False, - "bert_config_file": "./biobert/model/bert_config.json", - "vocab_file": "./biobert/model/vocab.txt", - "train_batch_size": 1, - "predict_batch_size": 1, - "max_seq_length": 384, - "doc_stride": 128, - "max_query_length": 64, - "n_best_size": 5, - "max_answer_length": 30, - } - self.bioFLAGS = namedtuple("FLAGS", d.keys())(*d.values()) - return tf.contrib.predictor.from_saved_model("/kaggle/input/pretrained-qa-models/biobert/1585470591") - else: - raise ValueError("invalid model name") - - def getAnswers(self, data): - """ - Output: - List [{ - "question": "xxxx", - "data": - { - "answer": ["answer1", "answer2", ...], - "confidence": [1,2, ...], - "context": ["paragraph1", "paragraph2", ...], - } - }] - """ - answers = [] - qas = self.readIR(data) - for qa in qas: - question = qa["qas"][0]["question"] - if len(answers)==0 or answers[-1]["question"]!=question: - answer_sample = {} - answer_sample["question"] = question - answer_sample["data"] = { - "answer": [], - "context": [], - "title": [], - "doi": [], - } - answers.append(answer_sample) - - context = qa["context"] - doi = qa["qas"][0]["doi"] - title = qa["qas"][0]["title"] - - answers[-1]["data"]["context"].append(context) - - sents = sent_tokenize(context) - spans = self.convert_idx(context, sents) - - if "mrqa" in self.model_name: - raw_mrqa = self.mrqaPredictor([qa]) - # get sentence from MRQA - raw = raw_mrqa[qa["qas"][0]["id"]] - # question answering one by one - answer_start = context.find(raw, 0) - answer_end = answer_start + len(raw) - answer_span = [] - for idx, span in enumerate(spans): - if not (answer_end <= span[0] or answer_start >= span[1]): - answer_span.append(idx) - - y1, y2 = answer_span[0], answer_span[-1] - if not y1 == y2: - # context tokens in index y1 and y2 should be merged together - # print("Merge knowledge sentence") - answer_sent_mrqa = " ".join(sents[y1:y2+1]) - else: - answer_sent_mrqa = sents[y1] - assert raw in answer_sent_mrqa - else: - answer_sent_mrqa = "" - - - if "biobert" in self.model_name: - raw_bio = self.biobertPredictor([qa]) - # get sentence from BioBERT - raw = raw_bio[qa["qas"][0]["id"]] - # question answering one by one - answer_start = context.find(raw, 0) - answer_end = answer_start + len(raw) - answer_span = [] - for idx, span in enumerate(spans): - if not (answer_end <= span[0] or answer_start >= span[1]): - answer_span.append(idx) - - y1, y2 = answer_span[0], answer_span[-1] - if not y1 == y2: - # context tokens in index y1 and y2 should be merged together - # print("Merge knowledge sentence") - answer_sent_bio = " ".join(sents[y1:y2+1]) - else: - answer_sent_bio = sents[y1] - - # if raw not in answer_sent_bio: - # print("RAW", raw) - # print("BIO", answer_sent_bio) - # assert raw in answer_sent_bio - else: - answer_sent_bio = "" - - if answer_sent_mrqa == answer_sent_bio or answer_sent_mrqa in answer_sent_bio: - # print("SAME OR QA < BIO") - answer_sent = answer_sent_bio - elif answer_sent_bio in answer_sent_mrqa: - # print("BIO < QA") - answer_sent = answer_sent_mrqa - else: - # print("DIFFERENT ANSWERS") - answer_sent= " ".join([answer_sent_mrqa, answer_sent_bio]) - - answers[-1]["data"]["answer"].append(answer_sent) - - - # print("context:", context) - # print("-"*80) - # print("query:", question) - # print("-"*80) - # print("answer:", answer_sent) - # input() - # break - return answers - - def convert_idx(self, text, tokens): - current = 0 - spans = [] - for token in tokens: - current = text.find(token, current) - if current < 0: - print("Token {} cannot be found".format(token)) - raise Exception() - spans.append((current, current + len(token))) - current += len(token) - return spans - -def print_answers_in_file(answers, filepath="./answers.txt"): - """ - Input: - List [{ - "question": "xxxx", - "data": - { - "answer": ["answer1", "answer2", ...], - "confidence": [1,2, ...], - "context": ["paragraph1", "paragraph2", ...], - } - }] - """ - with open(filepath, "w") as f: - print("WRITE ANSWERS IN FILES ...") - for item in answers: - question = item["question"] - cas = item["data"] - for (answer, context) in zip(cas["answer"], cas["context"]): - f.write("-"*80+"\n") - f.write("context: "+context+"\n") - f.write("-"*80+"\n") - f.write("question: "+question+"\n") - f.write("-"*80+"\n") - f.write("answer: "+answer+"\n") - f.write("="*80+"\n") - - - - - diff --git a/build/lib/caire-covid/retrieval.py b/build/lib/caire-covid/retrieval.py deleted file mode 100644 index 6750ef1..0000000 --- a/build/lib/caire-covid/retrieval.py +++ /dev/null @@ -1,83 +0,0 @@ - -import json -import requests - -def retrieve_paragraph(query): - url = "http://hlt027.ece.ust.hk:5000/query_paragraph" - - payload = "{\n\t\"text\": \""+query+"\"\n}" - headers = { - 'Content-Type': "application/json", - 'cache-control': "no-cache", - 'Postman-Token': "696fa512-5fed-45ca-bbe7-b7a1b4d19fe4" - } - response = requests.request("POST", url, data=payload, headers=headers) - - response = response.json() - return response - - -def information_retrieval(file_name): - """ - Inputs: - file_name: file name - Outputs: - all_results: - List [{ - "question": "xxxx", - "data": retri_result - }] - data_for_qa: - List [{ - "question": "xxxx", - "data": - { - "answer": "", - "context": [paragraph1, paragraph2, ...], - } - }] - """ - with open(file_name) as f: - json_file = json.load(f) - subtasks = json_file["sub_task"] - - all_results = [] - data_for_qa = [] - for item in subtasks: - questions = item["questions"] - for query in questions: - result_item = {"question" : query} - retri_result = retrieve_paragraph(query) - result_item["data"] = retri_result - - qa_item = {"question": query} - context = [] - titles = [] - doi = [] - count = 1 - for item in retri_result: - #context.append(item["paragraph"] if "paragraph" in item and len(item["paragraph"]) > 0 else item["abstract"]) - if count>20: - break - if 'abstract' in item and len(item['abstract']) > 0: - context.append(item['abstract']) - doi.append(item["doi"]) - titles.append(item["title"]) - count+=1 - if 'paragraphs' in item: - # for para in item['paragraphs']: - # context.append(para['text']) - # count+=1 - # if count>20: - # break - context.append(item['paragraphs'][0]['text']) - doi.append(item["doi"]) - titles.append(item["title"]) - count+=1 - - qa_item["data"] = {"answer": "", "context": context, "doi": doi, "titles": titles} - - all_results.append(result_item) - data_for_qa.append(qa_item) - - return all_results, data_for_qa \ No newline at end of file diff --git a/build/lib/caire-covid/test_api.py b/build/lib/caire-covid/test_api.py deleted file mode 100644 index b4b6d86..0000000 --- a/build/lib/caire-covid/test_api.py +++ /dev/null @@ -1,51 +0,0 @@ -from flask import Flask, request, jsonify -import json -from retrieval import retrieve_paragraph -from qa import QaModule - -def get_qa_result(query): - temp_json = retrieve_paragraph(query) - qa_item = {'question': query} - contexts = [] - titles = [] - doi = [] - count = 1 - for item in temp_json: - if count>10: - break - if 'abstract' in item and len(item['abstract']) > 0: - contexts.append(item['abstract']) - if 'paragraphs' in item: - contexts.append(item['paragraphs'][0]['text']) - doi.append(item["doi"]) - titles.append(item["title"]) - count+=1 - #print(len(doi), len(titles)) - qa_item['data'] = {'answer': '', 'context':contexts, 'doi': doi, 'titles': titles} - data_for_qa = [qa_item] - qa_model = QaModule(['mrqa', 'biobert']) - answers = qa_model.getAnswers(data_for_qa) - output_list = [] - for i in range(len(answers[0]['data']['answer'])): - outJson = {} - outJson['question'] = answers[0]['question'] - outJson['answer'] = answers[0]['data']['answer'][i] - outJson['context'] = answers[0]['data']['context'][i] - outJson['doi'] = doi[i] - outJson['title'] = titles[i] - output_list.append(outJson) - #print(len(output_list)) - return output_list - -#print(json.dumps(get_qa_result('incubation period of covid-19 in humans'), indent=4)) - -app = Flask(__name__) - -@app.route('/query_qa', methods=['POST']) -def return_matches(): - content = request.json - out = get_qa_result(content['text']) - return jsonify(out) - -if __name__ == '__main__': - app.run(host= '0.0.0.0',debug=True) diff --git a/build/lib/caireCovid/__init__.py b/build/lib/caireCovid/__init__.py deleted file mode 100644 index 142e75a..0000000 --- a/build/lib/caireCovid/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .retrieval import information_retrieval -from .qa import QaModule, print_answers_in_file diff --git a/build/lib/caireCovid/biobert/__init__.py b/build/lib/caireCovid/biobert/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/build/lib/caireCovid/biobert/modeling.py b/build/lib/caireCovid/biobert/modeling.py deleted file mode 100644 index 88443f4..0000000 --- a/build/lib/caireCovid/biobert/modeling.py +++ /dev/null @@ -1,988 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""The main BERT model and related functions.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import copy -import json -import math -import re -import six -import tensorflow as tf - - -class BertConfig(object): - """Configuration for `BertModel`.""" - - def __init__(self, - vocab_size, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02): - """Constructs BertConfig. - - Args: - vocab_size: Vocabulary size of `inputs_ids` in `BertModel`. - hidden_size: Size of the encoder layers and the pooler layer. - num_hidden_layers: Number of hidden layers in the Transformer encoder. - num_attention_heads: Number of attention heads for each attention layer in - the Transformer encoder. - intermediate_size: The size of the "intermediate" (i.e., feed-forward) - layer in the Transformer encoder. - hidden_act: The non-linear activation function (function or string) in the - encoder and pooler. - hidden_dropout_prob: The dropout probability for all fully connected - layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob: The dropout ratio for the attention - probabilities. - max_position_embeddings: The maximum sequence length that this model might - ever be used with. Typically set this to something large just in case - (e.g., 512 or 1024 or 2048). - type_vocab_size: The vocabulary size of the `token_type_ids` passed into - `BertModel`. - initializer_range: The stdev of the truncated_normal_initializer for - initializing all weight matrices. - """ - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - - @classmethod - def from_dict(cls, json_object): - """Constructs a `BertConfig` from a Python dictionary of parameters.""" - config = BertConfig(vocab_size=None) - for (key, value) in six.iteritems(json_object): - config.__dict__[key] = value - return config - - @classmethod - def from_json_file(cls, json_file): - """Constructs a `BertConfig` from a json file of parameters.""" - with tf.gfile.GFile(json_file, "r") as reader: - text = reader.read() - return cls.from_dict(json.loads(text)) - - def to_dict(self): - """Serializes this instance to a Python dictionary.""" - output = copy.deepcopy(self.__dict__) - return output - - def to_json_string(self): - """Serializes this instance to a JSON string.""" - return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" - - -class BertModel(object): - """BERT model ("Bidirectional Encoder Representations from Transformers"). - - Example usage: - - ```python - # Already been converted into WordPiece token ids - input_ids = tf.constant([[31, 51, 99], [15, 5, 0]]) - input_mask = tf.constant([[1, 1, 1], [1, 1, 0]]) - token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]]) - - config = modeling.BertConfig(vocab_size=32000, hidden_size=512, - num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024) - - model = modeling.BertModel(config=config, is_training=True, - input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids) - - label_embeddings = tf.get_variable(...) - pooled_output = model.get_pooled_output() - logits = tf.matmul(pooled_output, label_embeddings) - ... - ``` - """ - - def __init__(self, - config, - is_training, - input_ids, - input_mask=None, - token_type_ids=None, - use_one_hot_embeddings=True, - scope=None): - """Constructor for BertModel. - - Args: - config: `BertConfig` instance. - is_training: bool. true for training model, false for eval model. Controls - whether dropout will be applied. - input_ids: int32 Tensor of shape [batch_size, seq_length]. - input_mask: (optional) int32 Tensor of shape [batch_size, seq_length]. - token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. - use_one_hot_embeddings: (optional) bool. Whether to use one-hot word - embeddings or tf.embedding_lookup() for the word embeddings. On the TPU, - it is much faster if this is True, on the CPU or GPU, it is faster if - this is False. - scope: (optional) variable scope. Defaults to "bert". - - Raises: - ValueError: The config is invalid or one of the input tensor shapes - is invalid. - """ - config = copy.deepcopy(config) - if not is_training: - config.hidden_dropout_prob = 0.0 - config.attention_probs_dropout_prob = 0.0 - - input_shape = get_shape_list(input_ids, expected_rank=2) - batch_size = input_shape[0] - seq_length = input_shape[1] - - if input_mask is None: - input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32) - - if token_type_ids is None: - token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32) - - with tf.variable_scope(scope, default_name="bert"): - with tf.variable_scope("embeddings"): - # Perform embedding lookup on the word ids. - (self.embedding_output, self.embedding_table) = embedding_lookup( - input_ids=input_ids, - vocab_size=config.vocab_size, - embedding_size=config.hidden_size, - initializer_range=config.initializer_range, - word_embedding_name="word_embeddings", - use_one_hot_embeddings=use_one_hot_embeddings) - - # Add positional embeddings and token type embeddings, then layer - # normalize and perform dropout. - self.embedding_output = embedding_postprocessor( - input_tensor=self.embedding_output, - use_token_type=True, - token_type_ids=token_type_ids, - token_type_vocab_size=config.type_vocab_size, - token_type_embedding_name="token_type_embeddings", - use_position_embeddings=True, - position_embedding_name="position_embeddings", - initializer_range=config.initializer_range, - max_position_embeddings=config.max_position_embeddings, - dropout_prob=config.hidden_dropout_prob) - - with tf.variable_scope("encoder"): - # This converts a 2D mask of shape [batch_size, seq_length] to a 3D - # mask of shape [batch_size, seq_length, seq_length] which is used - # for the attention scores. - attention_mask = create_attention_mask_from_input_mask( - input_ids, input_mask) - - # Run the stacked transformer. - # `sequence_output` shape = [batch_size, seq_length, hidden_size]. - self.all_encoder_layers = transformer_model( - input_tensor=self.embedding_output, - attention_mask=attention_mask, - hidden_size=config.hidden_size, - num_hidden_layers=config.num_hidden_layers, - num_attention_heads=config.num_attention_heads, - intermediate_size=config.intermediate_size, - intermediate_act_fn=get_activation(config.hidden_act), - hidden_dropout_prob=config.hidden_dropout_prob, - attention_probs_dropout_prob=config.attention_probs_dropout_prob, - initializer_range=config.initializer_range, - do_return_all_layers=True) - - self.sequence_output = self.all_encoder_layers[-1] - # The "pooler" converts the encoded sequence tensor of shape - # [batch_size, seq_length, hidden_size] to a tensor of shape - # [batch_size, hidden_size]. This is necessary for segment-level - # (or segment-pair-level) classification tasks where we need a fixed - # dimensional representation of the segment. - with tf.variable_scope("pooler"): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. We assume that this has been pre-trained - first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1) - self.pooled_output = tf.layers.dense( - first_token_tensor, - config.hidden_size, - activation=tf.tanh, - kernel_initializer=create_initializer(config.initializer_range)) - - def get_pooled_output(self): - return self.pooled_output - - def get_sequence_output(self): - """Gets final hidden layer of encoder. - - Returns: - float Tensor of shape [batch_size, seq_length, hidden_size] corresponding - to the final hidden of the transformer encoder. - """ - return self.sequence_output - - def get_all_encoder_layers(self): - return self.all_encoder_layers - - def get_embedding_output(self): - """Gets output of the embedding lookup (i.e., input to the transformer). - - Returns: - float Tensor of shape [batch_size, seq_length, hidden_size] corresponding - to the output of the embedding layer, after summing the word - embeddings with the positional embeddings and the token type embeddings, - then performing layer normalization. This is the input to the transformer. - """ - return self.embedding_output - - def get_embedding_table(self): - return self.embedding_table - - -def gelu(input_tensor): - """Gaussian Error Linear Unit. - - This is a smoother version of the RELU. - Original paper: https://arxiv.org/abs/1606.08415 - - Args: - input_tensor: float Tensor to perform activation. - - Returns: - `input_tensor` with the GELU activation applied. - """ - cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0))) - return input_tensor * cdf - - -def get_activation(activation_string): - """Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`. - - Args: - activation_string: String name of the activation function. - - Returns: - A Python function corresponding to the activation function. If - `activation_string` is None, empty, or "linear", this will return None. - If `activation_string` is not a string, it will return `activation_string`. - - Raises: - ValueError: The `activation_string` does not correspond to a known - activation. - """ - - # We assume that anything that"s not a string is already an activation - # function, so we just return it. - if not isinstance(activation_string, six.string_types): - return activation_string - - if not activation_string: - return None - - act = activation_string.lower() - if act == "linear": - return None - elif act == "relu": - return tf.nn.relu - elif act == "gelu": - return gelu - elif act == "tanh": - return tf.tanh - else: - raise ValueError("Unsupported activation: %s" % act) - - -def get_assignment_map_from_checkpoint(tvars, init_checkpoint): - """Compute the union of the current variables and checkpoint variables.""" - assignment_map = {} - initialized_variable_names = {} - - name_to_variable = collections.OrderedDict() - for var in tvars: - name = var.name - m = re.match("^(.*):\\d+$", name) - if m is not None: - name = m.group(1) - name_to_variable[name] = var - - init_vars = tf.train.list_variables(init_checkpoint) - - assignment_map = collections.OrderedDict() - for x in init_vars: - (name, var) = (x[0], x[1]) - if name not in name_to_variable: - continue - assignment_map[name] = name - initialized_variable_names[name] = 1 - initialized_variable_names[name + ":0"] = 1 - - return (assignment_map, initialized_variable_names) - - -def dropout(input_tensor, dropout_prob): - """Perform dropout. - - Args: - input_tensor: float Tensor. - dropout_prob: Python float. The probability of dropping out a value (NOT of - *keeping* a dimension as in `tf.nn.dropout`). - - Returns: - A version of `input_tensor` with dropout applied. - """ - if dropout_prob is None or dropout_prob == 0.0: - return input_tensor - - output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob) - return output - - -def layer_norm(input_tensor, name=None): - """Run layer normalization on the last dimension of the tensor.""" - return tf.contrib.layers.layer_norm( - inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name) - - -def layer_norm_and_dropout(input_tensor, dropout_prob, name=None): - """Runs layer normalization followed by dropout.""" - output_tensor = layer_norm(input_tensor, name) - output_tensor = dropout(output_tensor, dropout_prob) - return output_tensor - - -def create_initializer(initializer_range=0.02): - """Creates a `truncated_normal_initializer` with the given range.""" - return tf.truncated_normal_initializer(stddev=initializer_range) - - -def embedding_lookup(input_ids, - vocab_size, - embedding_size=128, - initializer_range=0.02, - word_embedding_name="word_embeddings", - use_one_hot_embeddings=False): - """Looks up words embeddings for id tensor. - - Args: - input_ids: int32 Tensor of shape [batch_size, seq_length] containing word - ids. - vocab_size: int. Size of the embedding vocabulary. - embedding_size: int. Width of the word embeddings. - initializer_range: float. Embedding initialization range. - word_embedding_name: string. Name of the embedding table. - use_one_hot_embeddings: bool. If True, use one-hot method for word - embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better - for TPUs. - - Returns: - float Tensor of shape [batch_size, seq_length, embedding_size]. - """ - # This function assumes that the input is of shape [batch_size, seq_length, - # num_inputs]. - # - # If the input is a 2D tensor of shape [batch_size, seq_length], we - # reshape to [batch_size, seq_length, 1]. - if input_ids.shape.ndims == 2: - input_ids = tf.expand_dims(input_ids, axis=[-1]) - - embedding_table = tf.get_variable( - name=word_embedding_name, - shape=[vocab_size, embedding_size], - initializer=create_initializer(initializer_range)) - - if use_one_hot_embeddings: - flat_input_ids = tf.reshape(input_ids, [-1]) - one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size) - output = tf.matmul(one_hot_input_ids, embedding_table) - else: - output = tf.nn.embedding_lookup(embedding_table, input_ids) - - input_shape = get_shape_list(input_ids) - - output = tf.reshape(output, - input_shape[0:-1] + [input_shape[-1] * embedding_size]) - return (output, embedding_table) - - -def embedding_postprocessor(input_tensor, - use_token_type=False, - token_type_ids=None, - token_type_vocab_size=16, - token_type_embedding_name="token_type_embeddings", - use_position_embeddings=True, - position_embedding_name="position_embeddings", - initializer_range=0.02, - max_position_embeddings=512, - dropout_prob=0.1): - """Performs various post-processing on a word embedding tensor. - - Args: - input_tensor: float Tensor of shape [batch_size, seq_length, - embedding_size]. - use_token_type: bool. Whether to add embeddings for `token_type_ids`. - token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length]. - Must be specified if `use_token_type` is True. - token_type_vocab_size: int. The vocabulary size of `token_type_ids`. - token_type_embedding_name: string. The name of the embedding table variable - for token type ids. - use_position_embeddings: bool. Whether to add position embeddings for the - position of each token in the sequence. - position_embedding_name: string. The name of the embedding table variable - for positional embeddings. - initializer_range: float. Range of the weight initialization. - max_position_embeddings: int. Maximum sequence length that might ever be - used with this model. This can be longer than the sequence length of - input_tensor, but cannot be shorter. - dropout_prob: float. Dropout probability applied to the final output tensor. - - Returns: - float tensor with same shape as `input_tensor`. - - Raises: - ValueError: One of the tensor shapes or input values is invalid. - """ - input_shape = get_shape_list(input_tensor, expected_rank=3) - batch_size = input_shape[0] - seq_length = input_shape[1] - width = input_shape[2] - - output = input_tensor - - if use_token_type: - if token_type_ids is None: - raise ValueError("`token_type_ids` must be specified if" - "`use_token_type` is True.") - token_type_table = tf.get_variable( - name=token_type_embedding_name, - shape=[token_type_vocab_size, width], - initializer=create_initializer(initializer_range)) - # This vocab will be small so we always do one-hot here, since it is always - # faster for a small vocabulary. - flat_token_type_ids = tf.reshape(token_type_ids, [-1]) - one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size) - token_type_embeddings = tf.matmul(one_hot_ids, token_type_table) - token_type_embeddings = tf.reshape(token_type_embeddings, - [batch_size, seq_length, width]) - output += token_type_embeddings - - if use_position_embeddings: - assert_op = tf.assert_less_equal(seq_length, max_position_embeddings) - with tf.control_dependencies([assert_op]): - full_position_embeddings = tf.get_variable( - name=position_embedding_name, - shape=[max_position_embeddings, width], - initializer=create_initializer(initializer_range)) - # Since the position embedding table is a learned variable, we create it - # using a (long) sequence length `max_position_embeddings`. The actual - # sequence length might be shorter than this, for faster training of - # tasks that do not have long sequences. - # - # So `full_position_embeddings` is effectively an embedding table - # for position [0, 1, 2, ..., max_position_embeddings-1], and the current - # sequence has positions [0, 1, 2, ... seq_length-1], so we can just - # perform a slice. - position_embeddings = tf.slice(full_position_embeddings, [0, 0], - [seq_length, -1]) - num_dims = len(output.shape.as_list()) - - # Only the last two dimensions are relevant (`seq_length` and `width`), so - # we broadcast among the first dimensions, which is typically just - # the batch size. - position_broadcast_shape = [] - for _ in range(num_dims - 2): - position_broadcast_shape.append(1) - position_broadcast_shape.extend([seq_length, width]) - position_embeddings = tf.reshape(position_embeddings, - position_broadcast_shape) - output += position_embeddings - - output = layer_norm_and_dropout(output, dropout_prob) - return output - - -def create_attention_mask_from_input_mask(from_tensor, to_mask): - """Create 3D attention mask from a 2D tensor mask. - - Args: - from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. - to_mask: int32 Tensor of shape [batch_size, to_seq_length]. - - Returns: - float Tensor of shape [batch_size, from_seq_length, to_seq_length]. - """ - from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) - batch_size = from_shape[0] - from_seq_length = from_shape[1] - - to_shape = get_shape_list(to_mask, expected_rank=2) - to_seq_length = to_shape[1] - - to_mask = tf.cast( - tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) - - # We don't assume that `from_tensor` is a mask (although it could be). We - # don't actually care if we attend *from* padding tokens (only *to* padding) - # tokens so we create a tensor of all ones. - # - # `broadcast_ones` = [batch_size, from_seq_length, 1] - broadcast_ones = tf.ones( - shape=[batch_size, from_seq_length, 1], dtype=tf.float32) - - # Here we broadcast along two dimensions to create the mask. - mask = broadcast_ones * to_mask - - return mask - - -def attention_layer(from_tensor, - to_tensor, - attention_mask=None, - num_attention_heads=1, - size_per_head=512, - query_act=None, - key_act=None, - value_act=None, - attention_probs_dropout_prob=0.0, - initializer_range=0.02, - do_return_2d_tensor=False, - batch_size=None, - from_seq_length=None, - to_seq_length=None): - """Performs multi-headed attention from `from_tensor` to `to_tensor`. - - This is an implementation of multi-headed attention based on "Attention - is all you Need". If `from_tensor` and `to_tensor` are the same, then - this is self-attention. Each timestep in `from_tensor` attends to the - corresponding sequence in `to_tensor`, and returns a fixed-with vector. - - This function first projects `from_tensor` into a "query" tensor and - `to_tensor` into "key" and "value" tensors. These are (effectively) a list - of tensors of length `num_attention_heads`, where each tensor is of shape - [batch_size, seq_length, size_per_head]. - - Then, the query and key tensors are dot-producted and scaled. These are - softmaxed to obtain attention probabilities. The value tensors are then - interpolated by these probabilities, then concatenated back to a single - tensor and returned. - - In practice, the multi-headed attention are done with transposes and - reshapes rather than actual separate tensors. - - Args: - from_tensor: float Tensor of shape [batch_size, from_seq_length, - from_width]. - to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width]. - attention_mask: (optional) int32 Tensor of shape [batch_size, - from_seq_length, to_seq_length]. The values should be 1 or 0. The - attention scores will effectively be set to -infinity for any positions in - the mask that are 0, and will be unchanged for positions that are 1. - num_attention_heads: int. Number of attention heads. - size_per_head: int. Size of each attention head. - query_act: (optional) Activation function for the query transform. - key_act: (optional) Activation function for the key transform. - value_act: (optional) Activation function for the value transform. - attention_probs_dropout_prob: (optional) float. Dropout probability of the - attention probabilities. - initializer_range: float. Range of the weight initializer. - do_return_2d_tensor: bool. If True, the output will be of shape [batch_size - * from_seq_length, num_attention_heads * size_per_head]. If False, the - output will be of shape [batch_size, from_seq_length, num_attention_heads - * size_per_head]. - batch_size: (Optional) int. If the input is 2D, this might be the batch size - of the 3D version of the `from_tensor` and `to_tensor`. - from_seq_length: (Optional) If the input is 2D, this might be the seq length - of the 3D version of the `from_tensor`. - to_seq_length: (Optional) If the input is 2D, this might be the seq length - of the 3D version of the `to_tensor`. - - Returns: - float Tensor of shape [batch_size, from_seq_length, - num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is - true, this will be of shape [batch_size * from_seq_length, - num_attention_heads * size_per_head]). - - Raises: - ValueError: Any of the arguments or tensor shapes are invalid. - """ - - def transpose_for_scores(input_tensor, batch_size, num_attention_heads, - seq_length, width): - output_tensor = tf.reshape( - input_tensor, [batch_size, seq_length, num_attention_heads, width]) - - output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3]) - return output_tensor - - from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) - to_shape = get_shape_list(to_tensor, expected_rank=[2, 3]) - - if len(from_shape) != len(to_shape): - raise ValueError( - "The rank of `from_tensor` must match the rank of `to_tensor`.") - - if len(from_shape) == 3: - batch_size = from_shape[0] - from_seq_length = from_shape[1] - to_seq_length = to_shape[1] - elif len(from_shape) == 2: - if (batch_size is None or from_seq_length is None or to_seq_length is None): - raise ValueError( - "When passing in rank 2 tensors to attention_layer, the values " - "for `batch_size`, `from_seq_length`, and `to_seq_length` " - "must all be specified.") - - # Scalar dimensions referenced here: - # B = batch size (number of sequences) - # F = `from_tensor` sequence length - # T = `to_tensor` sequence length - # N = `num_attention_heads` - # H = `size_per_head` - - from_tensor_2d = reshape_to_matrix(from_tensor) - to_tensor_2d = reshape_to_matrix(to_tensor) - - # `query_layer` = [B*F, N*H] - query_layer = tf.layers.dense( - from_tensor_2d, - num_attention_heads * size_per_head, - activation=query_act, - name="query", - kernel_initializer=create_initializer(initializer_range)) - - # `key_layer` = [B*T, N*H] - key_layer = tf.layers.dense( - to_tensor_2d, - num_attention_heads * size_per_head, - activation=key_act, - name="key", - kernel_initializer=create_initializer(initializer_range)) - - # `value_layer` = [B*T, N*H] - value_layer = tf.layers.dense( - to_tensor_2d, - num_attention_heads * size_per_head, - activation=value_act, - name="value", - kernel_initializer=create_initializer(initializer_range)) - - # `query_layer` = [B, N, F, H] - query_layer = transpose_for_scores(query_layer, batch_size, - num_attention_heads, from_seq_length, - size_per_head) - - # `key_layer` = [B, N, T, H] - key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads, - to_seq_length, size_per_head) - - # Take the dot product between "query" and "key" to get the raw - # attention scores. - # `attention_scores` = [B, N, F, T] - attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) - attention_scores = tf.multiply(attention_scores, - 1.0 / math.sqrt(float(size_per_head))) - - if attention_mask is not None: - # `attention_mask` = [B, 1, F, T] - attention_mask = tf.expand_dims(attention_mask, axis=[1]) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0 - - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - attention_scores += adder - - # Normalize the attention scores to probabilities. - # `attention_probs` = [B, N, F, T] - attention_probs = tf.nn.softmax(attention_scores) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = dropout(attention_probs, attention_probs_dropout_prob) - - # `value_layer` = [B, T, N, H] - value_layer = tf.reshape( - value_layer, - [batch_size, to_seq_length, num_attention_heads, size_per_head]) - - # `value_layer` = [B, N, T, H] - value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) - - # `context_layer` = [B, N, F, H] - context_layer = tf.matmul(attention_probs, value_layer) - - # `context_layer` = [B, F, N, H] - context_layer = tf.transpose(context_layer, [0, 2, 1, 3]) - - if do_return_2d_tensor: - # `context_layer` = [B*F, N*H] - context_layer = tf.reshape( - context_layer, - [batch_size * from_seq_length, num_attention_heads * size_per_head]) - else: - # `context_layer` = [B, F, N*H] - context_layer = tf.reshape( - context_layer, - [batch_size, from_seq_length, num_attention_heads * size_per_head]) - - return context_layer - - -def transformer_model(input_tensor, - attention_mask=None, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - intermediate_act_fn=gelu, - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - initializer_range=0.02, - do_return_all_layers=False): - """Multi-headed, multi-layer Transformer from "Attention is All You Need". - - This is almost an exact implementation of the original Transformer encoder. - - See the original paper: - https://arxiv.org/abs/1706.03762 - - Also see: - https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py - - Args: - input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size]. - attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length, - seq_length], with 1 for positions that can be attended to and 0 in - positions that should not be. - hidden_size: int. Hidden size of the Transformer. - num_hidden_layers: int. Number of layers (blocks) in the Transformer. - num_attention_heads: int. Number of attention heads in the Transformer. - intermediate_size: int. The size of the "intermediate" (a.k.a., feed - forward) layer. - intermediate_act_fn: function. The non-linear activation function to apply - to the output of the intermediate/feed-forward layer. - hidden_dropout_prob: float. Dropout probability for the hidden layers. - attention_probs_dropout_prob: float. Dropout probability of the attention - probabilities. - initializer_range: float. Range of the initializer (stddev of truncated - normal). - do_return_all_layers: Whether to also return all layers or just the final - layer. - - Returns: - float Tensor of shape [batch_size, seq_length, hidden_size], the final - hidden layer of the Transformer. - - Raises: - ValueError: A Tensor shape or parameter is invalid. - """ - if hidden_size % num_attention_heads != 0: - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (hidden_size, num_attention_heads)) - - attention_head_size = int(hidden_size / num_attention_heads) - input_shape = get_shape_list(input_tensor, expected_rank=3) - batch_size = input_shape[0] - seq_length = input_shape[1] - input_width = input_shape[2] - - # The Transformer performs sum residuals on all layers so the input needs - # to be the same as the hidden size. - if input_width != hidden_size: - raise ValueError("The width of the input tensor (%d) != hidden size (%d)" % - (input_width, hidden_size)) - - # We keep the representation as a 2D tensor to avoid re-shaping it back and - # forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on - # the GPU/CPU but may not be free on the TPU, so we want to minimize them to - # help the optimizer. - prev_output = reshape_to_matrix(input_tensor) - - all_layer_outputs = [] - for layer_idx in range(num_hidden_layers): - with tf.variable_scope("layer_%d" % layer_idx): - layer_input = prev_output - - with tf.variable_scope("attention"): - attention_heads = [] - with tf.variable_scope("self"): - attention_head = attention_layer( - from_tensor=layer_input, - to_tensor=layer_input, - attention_mask=attention_mask, - num_attention_heads=num_attention_heads, - size_per_head=attention_head_size, - attention_probs_dropout_prob=attention_probs_dropout_prob, - initializer_range=initializer_range, - do_return_2d_tensor=True, - batch_size=batch_size, - from_seq_length=seq_length, - to_seq_length=seq_length) - attention_heads.append(attention_head) - - attention_output = None - if len(attention_heads) == 1: - attention_output = attention_heads[0] - else: - # In the case where we have other sequences, we just concatenate - # them to the self-attention head before the projection. - attention_output = tf.concat(attention_heads, axis=-1) - - # Run a linear projection of `hidden_size` then add a residual - # with `layer_input`. - with tf.variable_scope("output"): - attention_output = tf.layers.dense( - attention_output, - hidden_size, - kernel_initializer=create_initializer(initializer_range)) - attention_output = dropout(attention_output, hidden_dropout_prob) - attention_output = layer_norm(attention_output + layer_input) - - # The activation is only applied to the "intermediate" hidden layer. - with tf.variable_scope("intermediate"): - intermediate_output = tf.layers.dense( - attention_output, - intermediate_size, - activation=intermediate_act_fn, - kernel_initializer=create_initializer(initializer_range)) - - # Down-project back to `hidden_size` then add the residual. - with tf.variable_scope("output"): - layer_output = tf.layers.dense( - intermediate_output, - hidden_size, - kernel_initializer=create_initializer(initializer_range)) - layer_output = dropout(layer_output, hidden_dropout_prob) - layer_output = layer_norm(layer_output + attention_output) - prev_output = layer_output - all_layer_outputs.append(layer_output) - - if do_return_all_layers: - final_outputs = [] - for layer_output in all_layer_outputs: - final_output = reshape_from_matrix(layer_output, input_shape) - final_outputs.append(final_output) - return final_outputs - else: - final_output = reshape_from_matrix(prev_output, input_shape) - return final_output - - -def get_shape_list(tensor, expected_rank=None, name=None): - """Returns a list of the shape of tensor, preferring static dimensions. - - Args: - tensor: A tf.Tensor object to find the shape of. - expected_rank: (optional) int. The expected rank of `tensor`. If this is - specified and the `tensor` has a different rank, and exception will be - thrown. - name: Optional name of the tensor for the error message. - - Returns: - A list of dimensions of the shape of tensor. All static dimensions will - be returned as python integers, and dynamic dimensions will be returned - as tf.Tensor scalars. - """ - if name is None: - name = tensor.name - - if expected_rank is not None: - assert_rank(tensor, expected_rank, name) - - shape = tensor.shape.as_list() - - non_static_indexes = [] - for (index, dim) in enumerate(shape): - if dim is None: - non_static_indexes.append(index) - - if not non_static_indexes: - return shape - - dyn_shape = tf.shape(tensor) - for index in non_static_indexes: - shape[index] = dyn_shape[index] - return shape - - -def reshape_to_matrix(input_tensor): - """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix).""" - ndims = input_tensor.shape.ndims - if ndims < 2: - raise ValueError("Input tensor must have at least rank 2. Shape = %s" % - (input_tensor.shape)) - if ndims == 2: - return input_tensor - - width = input_tensor.shape[-1] - output_tensor = tf.reshape(input_tensor, [-1, width]) - return output_tensor - - -def reshape_from_matrix(output_tensor, orig_shape_list): - """Reshapes a rank 2 tensor back to its original rank >= 2 tensor.""" - if len(orig_shape_list) == 2: - return output_tensor - - output_shape = get_shape_list(output_tensor) - - orig_dims = orig_shape_list[0:-1] - width = output_shape[-1] - - return tf.reshape(output_tensor, orig_dims + [width]) - - -def assert_rank(tensor, expected_rank, name=None): - """Raises an exception if the tensor rank is not of the expected rank. - - Args: - tensor: A tf.Tensor to check the rank of. - expected_rank: Python integer or list of integers, expected rank. - name: Optional name of the tensor for the error message. - - Raises: - ValueError: If the expected shape doesn't match the actual shape. - """ - if name is None: - name = tensor.name - - expected_rank_dict = {} - if isinstance(expected_rank, six.integer_types): - expected_rank_dict[expected_rank] = True - else: - for x in expected_rank: - expected_rank_dict[x] = True - - actual_rank = tensor.shape.ndims - if actual_rank not in expected_rank_dict: - scope_name = tf.get_variable_scope().name - raise ValueError( - "For the tensor `%s` in scope `%s`, the actual rank " - "`%d` (shape = %s) is not equal to the expected rank `%s`" % - (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank))) diff --git a/build/lib/caireCovid/biobert/optimization.py b/build/lib/caireCovid/biobert/optimization.py deleted file mode 100644 index d33dabd..0000000 --- a/build/lib/caireCovid/biobert/optimization.py +++ /dev/null @@ -1,174 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Functions and classes related to optimization (weight updates).""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import re -import tensorflow as tf - - -def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu): - """Creates an optimizer training op.""" - global_step = tf.train.get_or_create_global_step() - - learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32) - - # Implements linear decay of the learning rate. - learning_rate = tf.train.polynomial_decay( - learning_rate, - global_step, - num_train_steps, - end_learning_rate=0.0, - power=1.0, - cycle=False) - - # Implements linear warmup. I.e., if global_step < num_warmup_steps, the - # learning rate will be `global_step/num_warmup_steps * init_lr`. - if num_warmup_steps: - global_steps_int = tf.cast(global_step, tf.int32) - warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32) - - global_steps_float = tf.cast(global_steps_int, tf.float32) - warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) - - warmup_percent_done = global_steps_float / warmup_steps_float - warmup_learning_rate = init_lr * warmup_percent_done - - is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) - learning_rate = ( - (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate) - - # It is recommended that you use this optimizer for fine tuning, since this - # is how the model was trained (note that the Adam m/v variables are NOT - # loaded from init_checkpoint.) - optimizer = AdamWeightDecayOptimizer( - learning_rate=learning_rate, - weight_decay_rate=0.01, - beta_1=0.9, - beta_2=0.999, - epsilon=1e-6, - exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"]) - - if use_tpu: - optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) - - tvars = tf.trainable_variables() - grads = tf.gradients(loss, tvars) - - # This is how the model was pre-trained. - (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0) - - train_op = optimizer.apply_gradients( - zip(grads, tvars), global_step=global_step) - - # Normally the global step update is done inside of `apply_gradients`. - # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use - # a different optimizer, you should probably take this line out. - new_global_step = global_step + 1 - train_op = tf.group(train_op, [global_step.assign(new_global_step)]) - return train_op - - -class AdamWeightDecayOptimizer(tf.train.Optimizer): - """A basic Adam optimizer that includes "correct" L2 weight decay.""" - - def __init__(self, - learning_rate, - weight_decay_rate=0.0, - beta_1=0.9, - beta_2=0.999, - epsilon=1e-6, - exclude_from_weight_decay=None, - name="AdamWeightDecayOptimizer"): - """Constructs a AdamWeightDecayOptimizer.""" - super(AdamWeightDecayOptimizer, self).__init__(False, name) - - self.learning_rate = learning_rate - self.weight_decay_rate = weight_decay_rate - self.beta_1 = beta_1 - self.beta_2 = beta_2 - self.epsilon = epsilon - self.exclude_from_weight_decay = exclude_from_weight_decay - - def apply_gradients(self, grads_and_vars, global_step=None, name=None): - """See base class.""" - assignments = [] - for (grad, param) in grads_and_vars: - if grad is None or param is None: - continue - - param_name = self._get_variable_name(param.name) - - m = tf.get_variable( - name=param_name + "/adam_m", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - v = tf.get_variable( - name=param_name + "/adam_v", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - - # Standard Adam update. - next_m = ( - tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) - next_v = ( - tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, - tf.square(grad))) - - update = next_m / (tf.sqrt(next_v) + self.epsilon) - - # Just adding the square of the weights to the loss function is *not* - # the correct way of using L2 regularization/weight decay with Adam, - # since that will interact with the m and v parameters in strange ways. - # - # Instead we want ot decay the weights in a manner that doesn't interact - # with the m/v parameters. This is equivalent to adding the square - # of the weights to the loss with plain (non-momentum) SGD. - if self._do_use_weight_decay(param_name): - update += self.weight_decay_rate * param - - update_with_lr = self.learning_rate * update - - next_param = param - update_with_lr - - assignments.extend( - [param.assign(next_param), - m.assign(next_m), - v.assign(next_v)]) - return tf.group(*assignments, name=name) - - def _do_use_weight_decay(self, param_name): - """Whether to use L2 weight decay for `param_name`.""" - if not self.weight_decay_rate: - return False - if self.exclude_from_weight_decay: - for r in self.exclude_from_weight_decay: - if re.search(r, param_name) is not None: - return False - return True - - def _get_variable_name(self, param_name): - """Get the variable name from the tensor name.""" - m = re.match("^(.*):\\d+$", param_name) - if m is not None: - param_name = m.group(1) - return param_name diff --git a/build/lib/caireCovid/biobert/predictor_biobert.py b/build/lib/caireCovid/biobert/predictor_biobert.py deleted file mode 100644 index cdc1b01..0000000 --- a/build/lib/caireCovid/biobert/predictor_biobert.py +++ /dev/null @@ -1,1074 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Run BERT on SQuAD 1.1 and SQuAD 2.0.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import json -import math -import os -import random -from .modeling import (BertConfig, BertModel, get_shape_list, get_assignment_map_from_checkpoint) -from .optimization import (create_optimizer) -from .tokenization import (printable_text, whitespace_tokenize, BasicTokenizer, FullTokenizer) -import six -import tensorflow as tf - -class SquadExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (printable_text(self.qas_id)) - s += ", question_text: %s" % (printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training): - """Read a SQuAD json file into a list of SquadExample.""" - is_bioasq=True # for BioASQ - - with tf.gfile.Open(input_file, "r") as reader: - #if is_bioasq: - #input_data = [{u'paragraphs':json.load(reader)["questions"], u'title':'bioASQ'}] # to fit the shape of squad code - #else: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - if is_bioasq: - paragraph_text.replace('/',' ') # need review - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if FLAGS.version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - -## TODO -def arrange_kaggle_data(input_data, is_training): - """Read a QA data jsonl file into a list of Examples.""" - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - paragraph_text = entry["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in entry["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - return examples - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - use_one_hot_embeddings): - """Creates a classification model.""" - model = BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - final_hidden = model.get_sequence_output() - - final_hidden_shape = get_shape_list(final_hidden, expected_rank=3) - batch_size = final_hidden_shape[0] - seq_length = final_hidden_shape[1] - hidden_size = final_hidden_shape[2] - - output_weights = tf.get_variable( - "cls/squad/output_weights", [2, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) - - final_hidden_matrix = tf.reshape(final_hidden, - [batch_size * seq_length, hidden_size]) - logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - - logits = tf.reshape(logits, [batch_size, seq_length, 2]) - logits = tf.transpose(logits, [2, 0, 1]) - - unstacked_logits = tf.unstack(logits, axis=0) - - (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) - - return (start_logits, end_logits) - - -def model_fn_builder(bert_config, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) - - unique_ids = features["unique_ids"] - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (start_logits, end_logits) = create_model( - bert_config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - tvars = tf.trainable_variables() - - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - seq_length = get_shape_list(input_ids)[1] - - def compute_loss(logits, positions): - one_hot_positions = tf.one_hot( - positions, depth=seq_length, dtype=tf.float32) - log_probs = tf.nn.log_softmax(logits, axis=-1) - loss = -tf.reduce_mean( - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) - return loss - - start_positions = features["start_positions"] - end_positions = features["end_positions"] - - start_loss = compute_loss(start_logits, start_positions) - end_loss = compute_loss(end_logits, end_positions) - - total_loss = (start_loss + end_loss) / 2.0 - - train_op = create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, loss=total_loss, train_op=train_op) - - elif mode == tf.estimator.ModeKeys.PREDICT: - predictions = { - "unique_ids": unique_ids, - "start_logits": start_logits, - "end_logits": end_logits, - } - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, predictions=predictions) - - else: - raise ValueError( - "Only TRAIN and PREDICT modes are supported: %s" % (mode)) - - return output_spec - - return model_fn - - -def input_fn_builder(input_file, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn - - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_logits", "end_logits"]) - -def get_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, do_lower_case, FLAGS): - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - min_null_feature_index = 0 # the paragraph slice with min mull score - null_start_logit = 0 # the start logit at the slice with min null score - null_end_logit = 0 # the end logit at the slice with min null score - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - if FLAGS.version_2_with_negative: - feature_null_score = result.start_logits[0] + result.end_logits[0] - if feature_null_score < score_null: - score_null = feature_null_score - min_null_feature_index = feature_index - null_start_logit = result.start_logits[0] - null_end_logit = result.end_logits[0] - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - - if FLAGS.version_2_with_negative: - prelim_predictions.append( - _PrelimPrediction( - feature_index=min_null_feature_index, - start_index=0, - end_index=0, - start_logit=null_start_logit, - end_logit=null_end_logit)) - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_logit", "end_logit"]) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, do_lower_case, FLAGS) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_logit=pred.start_logit, - end_logit=pred.end_logit)) - - # if we didn't inlude the empty option in the n-best, inlcude it - if FLAGS.version_2_with_negative: - if "" not in seen_predictions: - nbest.append( - _NbestPrediction( - text="", start_logit=null_start_logit, - end_logit=null_end_logit)) - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1 - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1 - - if not FLAGS.version_2_with_negative: - all_predictions[example.qas_id] = nbest_json[0]["text"] - else: - # predict "" iff the null score - the score of best non-null > threshold - score_diff = score_null - best_non_null_entry.start_logit - ( - best_non_null_entry.end_logit) - scores_diff_json[example.qas_id] = score_diff - if score_diff > FLAGS.null_score_diff_threshold: - all_predictions[example.qas_id] = "" - else: - all_predictions[example.qas_id] = best_non_null_entry.text - - return all_predictions - - -def get_final_text(pred_text, orig_text, do_lower_case, FLAGS): - """Project the tokenized prediction back to the original text.""" - - # When we created the data, we kept track of the alignment between original - # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So - # now `orig_text` contains the span of our original text corresponding to the - # span that we predicted. - # - # However, `orig_text` may contain extra characters that we don't want in - # our prediction. - # - # For example, let's say: - # pred_text = steve smith - # orig_text = Steve Smith's - # - # We don't want to return `orig_text` because it contains the extra "'s". - # - # We don't want to return `pred_text` because it's already been normalized - # (the SQuAD eval script also does punctuation stripping/lower casing but - # our tokenizer does additional normalization like stripping accent - # characters). - # - # What we really want to return is "Steve Smith". - # - # Therefore, we have to apply a semi-complicated alignment heruistic between - # `pred_text` and `orig_text` to get a character-to-charcter alignment. This - # can fail in certain cases in which case we just return `orig_text`. - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - tokenizer = BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if FLAGS.verbose_logging: - tf.logging.info( - "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if FLAGS.verbose_logging: - tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", - orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in six.iteritems(tok_ns_to_s_map): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, is_training): - # self.filename = filename - self.is_training = is_training - self.num_features = 0 - # self._writer = tf.python_io.TFRecordWriter(filename) - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_int_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - return tf_example.SerializeToString() - # self._writer.write(tf_example.SerializeToString()) - - # def close(self): - # self._writer.close() - - -def validate_flags_or_throw(FLAGS, bert_config): - """Validate the input FLAGS or throw an exception.""" - # tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, - # FLAGS.init_checkpoint) - - # if not FLAGS.do_train and not FLAGS.do_predict: - # raise ValueError("At least one of `do_train` or `do_predict` must be True.") - - # if FLAGS.do_train: - # if not FLAGS.train_file: - # raise ValueError( - # "If `do_train` is True, then `train_file` must be specified.") - # if FLAGS.do_predict: - # if not FLAGS.predict_file: - # raise ValueError( - # "If `do_predict` is True, then `predict_file` must be specified.") - - if FLAGS.max_seq_length > bert_config.max_position_embeddings: - raise ValueError( - "Cannot use sequence length %d because the BERT model " - "was only trained up to sequence length %d" % - (FLAGS.max_seq_length, bert_config.max_position_embeddings)) - - if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: - raise ValueError( - "The max_seq_length (%d) must be greater than max_query_length " - "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) - - -def biobert_predictor(FLAGS, predict_fn, data): - tf.logging.set_verbosity(tf.logging.INFO) - - bert_config = BertConfig.from_json_file(FLAGS.bert_config_file) - - validate_flags_or_throw(FLAGS, bert_config) - - tokenizer = FullTokenizer( - vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) - - - eval_examples = arrange_kaggle_data(data, is_training=False) - - eval_writer = FeatureWriter(is_training=False) - eval_features = [] - eval_features_inp = [] - - def append_feature(feature): - eval_features.append(feature) - eval_features_inp.append(eval_writer.process_feature(feature)) - - convert_examples_to_features( - examples=eval_examples, - tokenizer=tokenizer, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=False, - output_fn=append_feature) - - # If running eval on the TPU, you will need to specify the number of - # steps. - all_results = [] - for num, eval_feature in enumerate(eval_features_inp): - result = predict_fn({"examples":[eval_feature]}) - - # if len(all_results) % 1000 == 0: - # tf.logging.info("Processing example: %d" % (len(all_results))) - unique_id = int(result["unique_ids"]) - start_logits = [float(x) for x in result["start_logits"].flat] - end_logits = [float(x) for x in result["end_logits"].flat] - all_results.append( - RawResult( - unique_id=unique_id, - start_logits=start_logits, - end_logits=end_logits)) - - ret = get_predictions(eval_examples, eval_features, all_results, - FLAGS.n_best_size, FLAGS.max_answer_length, - FLAGS.do_lower_case, FLAGS) - return ret - -def main(): - raise NotImplementedError - -if __name__ == "__main__": - main() diff --git a/build/lib/caireCovid/biobert/run_factoid.py b/build/lib/caireCovid/biobert/run_factoid.py deleted file mode 100644 index fcfbc21..0000000 --- a/build/lib/caireCovid/biobert/run_factoid.py +++ /dev/null @@ -1,1290 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Run BERT on SQuAD 1.1 and SQuAD 2.0.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import json -import math -import os -import random -import modeling -import optimization -import tokenization -import six -import tensorflow as tf - -flags = tf.flags - -FLAGS = flags.FLAGS - -## Required parameters -flags.DEFINE_string( - "bert_config_file", None, - "The config json file corresponding to the pre-trained BERT model. " - "This specifies the model architecture.") - -flags.DEFINE_string("vocab_file", None, - "The vocabulary file that the BERT model was trained on.") - -flags.DEFINE_string( - "output_dir", None, - "The output directory where the model checkpoints will be written.") - -## Other parameters -flags.DEFINE_string("train_file", None, - "SQuAD json for training. E.g., train-v1.1.json") - -flags.DEFINE_string( - "predict_file", None, - "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") - -flags.DEFINE_string( - "init_checkpoint", None, - "Initial checkpoint (usually from a pre-trained BERT model).") - -flags.DEFINE_bool( - "do_lower_case", True, - "Whether to lower case the input text. Should be True for uncased " - "models and False for cased models.") - -flags.DEFINE_integer( - "max_seq_length", 384, - "The maximum total input sequence length after WordPiece tokenization. " - "Sequences longer than this will be truncated, and sequences shorter " - "than this will be padded.") - -flags.DEFINE_integer( - "doc_stride", 128, - "When splitting up a long document into chunks, how much stride to " - "take between chunks.") - -flags.DEFINE_integer( - "max_query_length", 64, - "The maximum number of tokens for the question. Questions longer than " - "this will be truncated to this length.") - -flags.DEFINE_bool("do_train", False, "Whether to run training.") - -flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.") - -flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") - -flags.DEFINE_integer("predict_batch_size", 8, - "Total batch size for predictions.") - -flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") - -flags.DEFINE_float("num_train_epochs", 3.0, - "Total number of training epochs to perform.") - -flags.DEFINE_float( - "warmup_proportion", 0.1, - "Proportion of training to perform linear learning rate warmup for. " - "E.g., 0.1 = 10% of training.") - -flags.DEFINE_integer("save_checkpoints_steps", 1000, - "How often to save the model checkpoint.") - -flags.DEFINE_integer("iterations_per_loop", 1000, - "How many steps to make in each estimator call.") - -flags.DEFINE_integer( - "n_best_size", 20, - "The total number of n-best predictions to generate in the " - "nbest_predictions.json output file.") - -flags.DEFINE_integer( - "max_answer_length", 30, - "The maximum length of an answer that can be generated. This is needed " - "because the start and end predictions are not conditioned on one another.") - -flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") - -tf.flags.DEFINE_string( - "tpu_name", None, - "The Cloud TPU to use for training. This should be either the name " - "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " - "url.") - -tf.flags.DEFINE_string( - "tpu_zone", None, - "[Optional] GCE zone where the Cloud TPU is located in. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string( - "gcp_project", None, - "[Optional] Project name for the Cloud TPU-enabled project. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") - -flags.DEFINE_integer( - "num_tpu_cores", 8, - "Only used if `use_tpu` is True. Total number of TPU cores to use.") - -flags.DEFINE_bool( - "verbose_logging", False, - "If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation.") - -flags.DEFINE_bool( - "version_2_with_negative", False, - "If true, the SQuAD examples contain some that do not have an answer.") - -flags.DEFINE_float( - "null_score_diff_threshold", 0.0, - "If null_score - best_non_null is greater than the threshold predict null.") - - -class SquadExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) - s += ", question_text: %s" % ( - tokenization.printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training): - """Read a SQuAD json file into a list of SquadExample.""" - is_bioasq=True # for BioASQ - - with tf.gfile.Open(input_file, "r") as reader: - #if is_bioasq: - #input_data = [{u'paragraphs':json.load(reader)["questions"], u'title':'bioASQ'}] # to fit the shape of squad code - #else: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - if is_bioasq: - paragraph_text.replace('/',' ') # need review - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if FLAGS.version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - tokenization.whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - if example_index < 20: - tf.logging.info("*** Example ***") - tf.logging.info("unique_id: %s" % (unique_id)) - tf.logging.info("example_index: %s" % (example_index)) - tf.logging.info("doc_span_index: %s" % (doc_span_index)) - tf.logging.info("tokens: %s" % " ".join( - [tokenization.printable_text(x) for x in tokens])) - tf.logging.info("token_to_orig_map: %s" % " ".join( - ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) - tf.logging.info("token_is_max_context: %s" % " ".join([ - "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) - ])) - tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - tf.logging.info( - "input_mask: %s" % " ".join([str(x) for x in input_mask])) - tf.logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - if is_training and example.is_impossible: - tf.logging.info("impossible example") - if is_training and not example.is_impossible: - answer_text = " ".join(tokens[start_position:(end_position + 1)]) - tf.logging.info("start_position: %d" % (start_position)) - tf.logging.info("end_position: %d" % (end_position)) - tf.logging.info( - "answer: %s" % (tokenization.printable_text(answer_text))) - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - use_one_hot_embeddings): - """Creates a classification model.""" - model = modeling.BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - final_hidden = model.get_sequence_output() - - final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) - batch_size = final_hidden_shape[0] - seq_length = final_hidden_shape[1] - hidden_size = final_hidden_shape[2] - - output_weights = tf.get_variable( - "cls/squad/output_weights", [2, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) - - final_hidden_matrix = tf.reshape(final_hidden, - [batch_size * seq_length, hidden_size]) - logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - - logits = tf.reshape(logits, [batch_size, seq_length, 2]) - logits = tf.transpose(logits, [2, 0, 1]) - - unstacked_logits = tf.unstack(logits, axis=0) - - (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) - - return (start_logits, end_logits) - - -def model_fn_builder(bert_config, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) - - unique_ids = features["unique_ids"] - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (start_logits, end_logits) = create_model( - bert_config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - tvars = tf.trainable_variables() - - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - seq_length = modeling.get_shape_list(input_ids)[1] - - def compute_loss(logits, positions): - one_hot_positions = tf.one_hot( - positions, depth=seq_length, dtype=tf.float32) - log_probs = tf.nn.log_softmax(logits, axis=-1) - loss = -tf.reduce_mean( - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) - return loss - - start_positions = features["start_positions"] - end_positions = features["end_positions"] - - start_loss = compute_loss(start_logits, start_positions) - end_loss = compute_loss(end_logits, end_positions) - - total_loss = (start_loss + end_loss) / 2.0 - - train_op = optimization.create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - elif mode == tf.estimator.ModeKeys.PREDICT: - predictions = { - "unique_ids": unique_ids, - "start_logits": start_logits, - "end_logits": end_logits, - } - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) - else: - raise ValueError( - "Only TRAIN and PREDICT modes are supported: %s" % (mode)) - - return output_spec - - return model_fn - - -def input_fn_builder(input_file, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn - - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_logits", "end_logits"]) - - -def write_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, do_lower_case, output_prediction_file, - output_nbest_file, output_null_log_odds_file): - """Write final predictions to the json file and log-odds of null if needed.""" - tf.logging.info("Writing predictions to: %s" % (output_prediction_file)) - tf.logging.info("Writing nbest to: %s" % (output_nbest_file)) - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) - - all_predictions = collections.OrderedDict() - all_nbest_json = collections.OrderedDict() - scores_diff_json = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - min_null_feature_index = 0 # the paragraph slice with min mull score - null_start_logit = 0 # the start logit at the slice with min null score - null_end_logit = 0 # the end logit at the slice with min null score - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - if FLAGS.version_2_with_negative: - feature_null_score = result.start_logits[0] + result.end_logits[0] - if feature_null_score < score_null: - score_null = feature_null_score - min_null_feature_index = feature_index - null_start_logit = result.start_logits[0] - null_end_logit = result.end_logits[0] - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - - if FLAGS.version_2_with_negative: - prelim_predictions.append( - _PrelimPrediction( - feature_index=min_null_feature_index, - start_index=0, - end_index=0, - start_logit=null_start_logit, - end_logit=null_end_logit)) - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - - _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_logit", "end_logit"]) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - - final_text = get_final_text(tok_text, orig_text, do_lower_case) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_logit=pred.start_logit, - end_logit=pred.end_logit)) - - # if we didn't inlude the empty option in the n-best, inlcude it - if FLAGS.version_2_with_negative: - if "" not in seen_predictions: - nbest.append( - _NbestPrediction( - text="", start_logit=null_start_logit, - end_logit=null_end_logit)) - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1 - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1 - - if not FLAGS.version_2_with_negative: - all_predictions[example.qas_id] = nbest_json[0]["text"] - else: - # predict "" iff the null score - the score of best non-null > threshold - score_diff = score_null - best_non_null_entry.start_logit - ( - best_non_null_entry.end_logit) - scores_diff_json[example.qas_id] = score_diff - if score_diff > FLAGS.null_score_diff_threshold: - all_predictions[example.qas_id] = "" - else: - all_predictions[example.qas_id] = best_non_null_entry.text - - all_nbest_json[example.qas_id] = nbest_json - - with tf.gfile.GFile(output_prediction_file, "w") as writer: - writer.write(json.dumps(all_predictions, indent=4) + "\n") - - with tf.gfile.GFile(output_nbest_file, "w") as writer: - writer.write(json.dumps(all_nbest_json, indent=4) + "\n") - - if FLAGS.version_2_with_negative: - with tf.gfile.GFile(output_null_log_odds_file, "w") as writer: - writer.write(json.dumps(scores_diff_json, indent=4) + "\n") - - -def get_final_text(pred_text, orig_text, do_lower_case): - """Project the tokenized prediction back to the original text.""" - - # When we created the data, we kept track of the alignment between original - # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So - # now `orig_text` contains the span of our original text corresponding to the - # span that we predicted. - # - # However, `orig_text` may contain extra characters that we don't want in - # our prediction. - # - # For example, let's say: - # pred_text = steve smith - # orig_text = Steve Smith's - # - # We don't want to return `orig_text` because it contains the extra "'s". - # - # We don't want to return `pred_text` because it's already been normalized - # (the SQuAD eval script also does punctuation stripping/lower casing but - # our tokenizer does additional normalization like stripping accent - # characters). - # - # What we really want to return is "Steve Smith". - # - # Therefore, we have to apply a semi-complicated alignment heruistic between - # `pred_text` and `orig_text` to get a character-to-charcter alignment. This - # can fail in certain cases in which case we just return `orig_text`. - - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - # We first tokenize `orig_text`, strip whitespace from the result - # and `pred_text`, and check if they are the same length. If they are - # NOT the same length, the heuristic has failed. If they are the same - # length, we assume the characters are one-to-one aligned. - tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) - - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - if FLAGS.verbose_logging: - tf.logging.info( - "Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - if FLAGS.verbose_logging: - tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", - orig_ns_text, tok_ns_text) - return orig_text - - # We then project the characters in `pred_text` back to `orig_text` using - # the character-to-character alignment. - tok_s_to_ns_map = {} - for (i, tok_index) in six.iteritems(tok_ns_to_s_map): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map start position") - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - if FLAGS.verbose_logging: - tf.logging.info("Couldn't map end position") - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, filename, is_training): - self.filename = filename - self.is_training = is_training - self.num_features = 0 - self._writer = tf.python_io.TFRecordWriter(filename) - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_int_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - self._writer.write(tf_example.SerializeToString()) - - def close(self): - self._writer.close() - - -def validate_flags_or_throw(bert_config): - """Validate the input FLAGS or throw an exception.""" - tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, - FLAGS.init_checkpoint) - - if not FLAGS.do_train and not FLAGS.do_predict: - raise ValueError("At least one of `do_train` or `do_predict` must be True.") - - if FLAGS.do_train: - if not FLAGS.train_file: - raise ValueError( - "If `do_train` is True, then `train_file` must be specified.") - if FLAGS.do_predict: - if not FLAGS.predict_file: - raise ValueError( - "If `do_predict` is True, then `predict_file` must be specified.") - - if FLAGS.max_seq_length > bert_config.max_position_embeddings: - raise ValueError( - "Cannot use sequence length %d because the BERT model " - "was only trained up to sequence length %d" % - (FLAGS.max_seq_length, bert_config.max_position_embeddings)) - - if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: - raise ValueError( - "The max_seq_length (%d) must be greater than max_query_length " - "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - - bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) - - validate_flags_or_throw(bert_config) - - tf.gfile.MakeDirs(FLAGS.output_dir) - - tokenizer = tokenization.FullTokenizer( - vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) - - tpu_cluster_resolver = None - if FLAGS.use_tpu and FLAGS.tpu_name: - tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) - - is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - cluster=tpu_cluster_resolver, - master=FLAGS.master, - model_dir=FLAGS.output_dir, - save_checkpoints_steps=FLAGS.save_checkpoints_steps, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations_per_loop, - num_shards=FLAGS.num_tpu_cores, - per_host_input_for_training=is_per_host)) - - train_examples = None - num_train_steps = None - num_warmup_steps = None - if FLAGS.do_train: - train_examples = read_squad_examples( - input_file=FLAGS.train_file, is_training=True) - num_train_steps = int( - len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) - num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) - - # Pre-shuffle the input to avoid having to make a very large shuffle - # buffer in in the `input_fn`. - rng = random.Random(12345) - rng.shuffle(train_examples) - - model_fn = model_fn_builder( - bert_config=bert_config, - init_checkpoint=FLAGS.init_checkpoint, - learning_rate=FLAGS.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - use_tpu=FLAGS.use_tpu, - use_one_hot_embeddings=FLAGS.use_tpu) - - # If TPU is not available, this will fall back to normal Estimator on CPU - # or GPU. - estimator = tf.contrib.tpu.TPUEstimator( - use_tpu=FLAGS.use_tpu, - model_fn=model_fn, - config=run_config, - train_batch_size=FLAGS.train_batch_size, - predict_batch_size=FLAGS.predict_batch_size) - - if FLAGS.do_train: - # We write to a temporary file to avoid storing very large constant tensors - # in memory. - train_writer = FeatureWriter( - filename=os.path.join(FLAGS.output_dir, "train.tf_record"), - is_training=True) - convert_examples_to_features( - examples=train_examples, - tokenizer=tokenizer, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=True, - output_fn=train_writer.process_feature) - train_writer.close() - - tf.logging.info("***** Running training *****") - tf.logging.info(" Num orig examples = %d", len(train_examples)) - tf.logging.info(" Num split examples = %d", train_writer.num_features) - tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) - tf.logging.info(" Num steps = %d", num_train_steps) - del train_examples - - train_input_fn = input_fn_builder( - input_file=train_writer.filename, - seq_length=FLAGS.max_seq_length, - is_training=True, - drop_remainder=True) - estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) - - if FLAGS.do_predict: - eval_examples = read_squad_examples( - input_file=FLAGS.predict_file, is_training=False) - - eval_writer = FeatureWriter( - filename=os.path.join(FLAGS.output_dir, "eval.tf_record"), - is_training=False) - eval_features = [] - - def append_feature(feature): - eval_features.append(feature) - eval_writer.process_feature(feature) - - convert_examples_to_features( - examples=eval_examples, - tokenizer=tokenizer, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=False, - output_fn=append_feature) - eval_writer.close() - - tf.logging.info("***** Running predictions *****") - tf.logging.info(" Num orig examples = %d", len(eval_examples)) - tf.logging.info(" Num split examples = %d", len(eval_features)) - tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size) - - all_results = [] - - predict_input_fn = input_fn_builder( - input_file=eval_writer.filename, - seq_length=FLAGS.max_seq_length, - is_training=False, - drop_remainder=False) - - # If running eval on the TPU, you will need to specify the number of - # steps. - all_results = [] - for result in estimator.predict( - predict_input_fn, yield_single_examples=True): - if len(all_results) % 1000 == 0: - tf.logging.info("Processing example: %d" % (len(all_results))) - unique_id = int(result["unique_ids"]) - start_logits = [float(x) for x in result["start_logits"].flat] - end_logits = [float(x) for x in result["end_logits"].flat] - all_results.append( - RawResult( - unique_id=unique_id, - start_logits=start_logits, - end_logits=end_logits)) - - output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json") - output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json") - output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json") - - write_predictions(eval_examples, eval_features, all_results, - FLAGS.n_best_size, FLAGS.max_answer_length, - FLAGS.do_lower_case, output_prediction_file, - output_nbest_file, output_null_log_odds_file) - - -if __name__ == "__main__": - flags.mark_flag_as_required("vocab_file") - flags.mark_flag_as_required("bert_config_file") - flags.mark_flag_as_required("output_dir") - tf.app.run() diff --git a/build/lib/caireCovid/biobert/save_biobert.py b/build/lib/caireCovid/biobert/save_biobert.py deleted file mode 100644 index 5fa16ba..0000000 --- a/build/lib/caireCovid/biobert/save_biobert.py +++ /dev/null @@ -1,912 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Run BERT on SQuAD 1.1 and SQuAD 2.0.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import json -import math -import os -import random -import modeling -import optimization -import tokenization -import six -import tensorflow as tf - -flags = tf.flags - -FLAGS = flags.FLAGS - -## Required parameters -flags.DEFINE_string( - "bert_config_file", None, - "The config json file corresponding to the pre-trained BERT model. " - "This specifies the model architecture.") - -flags.DEFINE_string("vocab_file", None, - "The vocabulary file that the BERT model was trained on.") - -flags.DEFINE_string( - "output_dir", None, - "The output directory where the model checkpoints will be written.") - -flags.DEFINE_string( - "export_dir_base", None, - "The output directory where the model will be saved.") - -## Other parameters -flags.DEFINE_string("train_file", None, - "SQuAD json for training. E.g., train-v1.1.json") - -flags.DEFINE_string( - "predict_file", None, - "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") - -flags.DEFINE_string( - "init_checkpoint", None, - "Initial checkpoint (usually from a pre-trained BERT model).") - -flags.DEFINE_bool( - "do_lower_case", True, - "Whether to lower case the input text. Should be True for uncased " - "models and False for cased models.") - -flags.DEFINE_integer( - "max_seq_length", 384, - "The maximum total input sequence length after WordPiece tokenization. " - "Sequences longer than this will be truncated, and sequences shorter " - "than this will be padded.") - -flags.DEFINE_integer( - "doc_stride", 128, - "When splitting up a long document into chunks, how much stride to " - "take between chunks.") - -flags.DEFINE_integer( - "max_query_length", 64, - "The maximum number of tokens for the question. Questions longer than " - "this will be truncated to this length.") - -flags.DEFINE_bool("do_train", False, "Whether to run training.") - -flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.") - -flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") - -flags.DEFINE_integer("predict_batch_size", 8, - "Total batch size for predictions.") - -flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") - -flags.DEFINE_float("num_train_epochs", 3.0, - "Total number of training epochs to perform.") - -flags.DEFINE_float( - "warmup_proportion", 0.1, - "Proportion of training to perform linear learning rate warmup for. " - "E.g., 0.1 = 10% of training.") - -flags.DEFINE_integer("save_checkpoints_steps", 1000, - "How often to save the model checkpoint.") - -flags.DEFINE_integer("iterations_per_loop", 1000, - "How many steps to make in each estimator call.") - -flags.DEFINE_integer( - "n_best_size", 20, - "The total number of n-best predictions to generate in the " - "nbest_predictions.json output file.") - -flags.DEFINE_integer( - "max_answer_length", 30, - "The maximum length of an answer that can be generated. This is needed " - "because the start and end predictions are not conditioned on one another.") - -flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") - -tf.flags.DEFINE_string( - "tpu_name", None, - "The Cloud TPU to use for training. This should be either the name " - "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 " - "url.") - -tf.flags.DEFINE_string( - "tpu_zone", None, - "[Optional] GCE zone where the Cloud TPU is located in. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string( - "gcp_project", None, - "[Optional] Project name for the Cloud TPU-enabled project. If not " - "specified, we will attempt to automatically detect the GCE project from " - "metadata.") - -tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") - -flags.DEFINE_integer( - "num_tpu_cores", 8, - "Only used if `use_tpu` is True. Total number of TPU cores to use.") - -flags.DEFINE_bool( - "verbose_logging", False, - "If true, all of the warnings related to data processing will be printed. " - "A number of warnings are expected for a normal SQuAD evaluation.") - -flags.DEFINE_bool( - "version_2_with_negative", False, - "If true, the SQuAD examples contain some that do not have an answer.") - -flags.DEFINE_float( - "null_score_diff_threshold", 0.0, - "If null_score - best_non_null is greater than the threshold predict null.") - - -class SquadExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - doc_tokens, - orig_answer_text=None, - start_position=None, - end_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.doc_tokens = doc_tokens - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) - s += ", question_text: %s" % ( - tokenization.printable_text(self.question_text)) - s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", end_position: %d" % (self.end_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - return s - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tokens, - token_to_orig_map, - token_is_max_context, - input_ids, - input_mask, - segment_ids, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tokens = tokens - self.token_to_orig_map = token_to_orig_map - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - - -def read_squad_examples(input_file, is_training): - """Read a SQuAD json file into a list of SquadExample.""" - is_bioasq=True # for BioASQ - - with tf.gfile.Open(input_file, "r") as reader: - #if is_bioasq: - #input_data = [{u'paragraphs':json.load(reader)["questions"], u'title':'bioASQ'}] # to fit the shape of squad code - #else: - input_data = json.load(reader)["data"] - - def is_whitespace(c): - if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: - return True - return False - - examples = [] - for entry in input_data: - for paragraph in entry["paragraphs"]: - paragraph_text = paragraph["context"] - doc_tokens = [] - char_to_word_offset = [] - prev_is_whitespace = True - if is_bioasq: - paragraph_text.replace('/',' ') # need review - for c in paragraph_text: - if is_whitespace(c): - prev_is_whitespace = True - else: - if prev_is_whitespace: - doc_tokens.append(c) - else: - doc_tokens[-1] += c - prev_is_whitespace = False - char_to_word_offset.append(len(doc_tokens) - 1) - - for qa in paragraph["qas"]: - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - end_position = None - orig_answer_text = None - is_impossible = False - if is_training: - - if FLAGS.version_2_with_negative: - is_impossible = qa["is_impossible"] - if (len(qa["answers"]) != 1) and (not is_impossible): - raise ValueError( - "For training, each question should have exactly 1 answer.") - if not is_impossible: - answer = qa["answers"][0] - orig_answer_text = answer["text"] - answer_offset = answer["answer_start"] - answer_length = len(orig_answer_text) - start_position = char_to_word_offset[answer_offset] - end_position = char_to_word_offset[answer_offset + answer_length - - 1] - # Only add answers where the text can be exactly recovered from the - # document. If this CAN'T happen it's likely due to weird Unicode - # stuff so we will just skip the example. - # - # Note that this means for training mode, every example is NOT - # guaranteed to be preserved. - actual_text = " ".join( - doc_tokens[start_position:(end_position + 1)]) - cleaned_answer_text = " ".join( - tokenization.whitespace_tokenize(orig_answer_text)) - if actual_text.find(cleaned_answer_text) == -1: - tf.logging.warning("Could not find answer: '%s' vs. '%s'", - actual_text, cleaned_answer_text) - continue - else: - start_position = -1 - end_position = -1 - orig_answer_text = "" - - example = SquadExample( - qas_id=qas_id, - question_text=question_text, - doc_tokens=doc_tokens, - orig_answer_text=orig_answer_text, - start_position=start_position, - end_position=end_position, - is_impossible=is_impossible) - examples.append(example) - - return examples - - -def convert_examples_to_features(examples, tokenizer, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn): - """Loads a data file into a list of `InputBatch`s.""" - - unique_id = 1000000000 - - for (example_index, example) in enumerate(examples): - query_tokens = tokenizer.tokenize(example.question_text) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - tok_to_orig_index = [] - orig_to_tok_index = [] - all_doc_tokens = [] - for (i, token) in enumerate(example.doc_tokens): - orig_to_tok_index.append(len(all_doc_tokens)) - sub_tokens = tokenizer.tokenize(token) - for sub_token in sub_tokens: - tok_to_orig_index.append(i) - all_doc_tokens.append(sub_token) - - tok_start_position = None - tok_end_position = None - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - if is_training and not example.is_impossible: - tok_start_position = orig_to_tok_index[example.start_position] - if example.end_position < len(example.doc_tokens) - 1: - tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 - else: - tok_end_position = len(all_doc_tokens) - 1 - (tok_start_position, tok_end_position) = _improve_answer_span( - all_doc_tokens, tok_start_position, tok_end_position, tokenizer, - example.orig_answer_text) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_to_orig_map = {} - token_is_max_context = {} - segment_ids = [] - tokens.append("[CLS]") - segment_ids.append(0) - for token in query_tokens: - tokens.append(token) - segment_ids.append(0) - tokens.append("[SEP]") - segment_ids.append(0) - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(1) - tokens.append("[SEP]") - segment_ids.append(1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - start_position = None - end_position = None - if is_training and not example.is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - out_of_span = True - if out_of_span: - start_position = 0 - end_position = 0 - else: - doc_offset = len(query_tokens) + 2 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and example.is_impossible: - start_position = 0 - end_position = 0 - - if example_index < 20: - tf.logging.info("*** Example ***") - tf.logging.info("unique_id: %s" % (unique_id)) - tf.logging.info("example_index: %s" % (example_index)) - tf.logging.info("doc_span_index: %s" % (doc_span_index)) - tf.logging.info("tokens: %s" % " ".join( - [tokenization.printable_text(x) for x in tokens])) - tf.logging.info("token_to_orig_map: %s" % " ".join( - ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) - tf.logging.info("token_is_max_context: %s" % " ".join([ - "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) - ])) - tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - tf.logging.info( - "input_mask: %s" % " ".join([str(x) for x in input_mask])) - tf.logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - if is_training and example.is_impossible: - tf.logging.info("impossible example") - if is_training and not example.is_impossible: - answer_text = " ".join(tokens[start_position:(end_position + 1)]) - tf.logging.info("start_position: %d" % (start_position)) - tf.logging.info("end_position: %d" % (end_position)) - tf.logging.info( - "answer: %s" % (tokenization.printable_text(answer_text))) - - feature = InputFeatures( - unique_id=unique_id, - example_index=example_index, - doc_span_index=doc_span_index, - tokens=tokens, - token_to_orig_map=token_to_orig_map, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - start_position=start_position, - end_position=end_position, - is_impossible=example.is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - - -def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, - orig_answer_text): - """Returns tokenized answer spans that better match the annotated answer.""" - - # The SQuAD annotations are character based. We first project them to - # whitespace-tokenized words. But then after WordPiece tokenization, we can - # often find a "better match". For example: - # - # Question: What year was John Smith born? - # Context: The leader was John Smith (1895-1943). - # Answer: 1895 - # - # The original whitespace-tokenized answer will be "(1895-1943).". However - # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match - # the exact answer, 1895. - # - # However, this is not always possible. Consider the following: - # - # Question: What country is the top exporter of electornics? - # Context: The Japanese electronics industry is the lagest in the world. - # Answer: Japan - # - # In this case, the annotator chose "Japan" as a character sub-span of - # the word "Japanese". Since our WordPiece tokenizer does not split - # "Japanese", we just use "Japanese" as the annotation. This is fairly rare - # in SQuAD, but does happen. - tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) - - for new_start in range(input_start, input_end + 1): - for new_end in range(input_end, new_start - 1, -1): - text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) - if text_span == tok_answer_text: - return (new_start, new_end) - - return (input_start, input_end) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - - -def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, - use_one_hot_embeddings): - """Creates a classification model.""" - model = modeling.BertModel( - config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - token_type_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - final_hidden = model.get_sequence_output() - - final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3) - batch_size = final_hidden_shape[0] - seq_length = final_hidden_shape[1] - hidden_size = final_hidden_shape[2] - - output_weights = tf.get_variable( - "cls/squad/output_weights", [2, hidden_size], - initializer=tf.truncated_normal_initializer(stddev=0.02)) - - output_bias = tf.get_variable( - "cls/squad/output_bias", [2], initializer=tf.zeros_initializer()) - - final_hidden_matrix = tf.reshape(final_hidden, - [batch_size * seq_length, hidden_size]) - logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True) - logits = tf.nn.bias_add(logits, output_bias) - - logits = tf.reshape(logits, [batch_size, seq_length, 2]) - logits = tf.transpose(logits, [2, 0, 1]) - - unstacked_logits = tf.unstack(logits, axis=0) - - (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1]) - - return (start_logits, end_logits) - - -def model_fn_builder(bert_config, init_checkpoint, learning_rate, - num_train_steps, num_warmup_steps, use_tpu, - use_one_hot_embeddings): - """Returns `model_fn` closure for TPUEstimator.""" - - def model_fn(features, labels, mode, params): # pylint: disable=unused-argument - """The `model_fn` for TPUEstimator.""" - - tf.logging.info("*** Features ***") - for name in sorted(features.keys()): - tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) - - unique_ids = features["unique_ids"] - input_ids = features["input_ids"] - input_mask = features["input_mask"] - segment_ids = features["segment_ids"] - - is_training = (mode == tf.estimator.ModeKeys.TRAIN) - - (start_logits, end_logits) = create_model( - bert_config=bert_config, - is_training=is_training, - input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - use_one_hot_embeddings=use_one_hot_embeddings) - - tvars = tf.trainable_variables() - - initialized_variable_names = {} - scaffold_fn = None - if init_checkpoint: - (assignment_map, initialized_variable_names - ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if use_tpu: - - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - tf.logging.info("**** Trainable Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - - output_spec = None - if mode == tf.estimator.ModeKeys.TRAIN: - seq_length = modeling.get_shape_list(input_ids)[1] - - def compute_loss(logits, positions): - one_hot_positions = tf.one_hot( - positions, depth=seq_length, dtype=tf.float32) - log_probs = tf.nn.log_softmax(logits, axis=-1) - loss = -tf.reduce_mean( - tf.reduce_sum(one_hot_positions * log_probs, axis=-1)) - return loss - - start_positions = features["start_positions"] - end_positions = features["end_positions"] - - start_loss = compute_loss(start_logits, start_positions) - end_loss = compute_loss(end_logits, end_positions) - - total_loss = (start_loss + end_loss) / 2.0 - - train_op = optimization.create_optimizer( - total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) - - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - loss=total_loss, - train_op=train_op, - scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, loss=total_loss, train_op=train_op) - - elif mode == tf.estimator.ModeKeys.PREDICT: - predictions = { - "unique_ids": unique_ids, - "start_logits": start_logits, - "end_logits": end_logits, - } - if use_tpu: - output_spec = tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) - else: - output_spec = tf.estimator.EstimatorSpec( - mode=mode, predictions=predictions) - - else: - raise ValueError( - "Only TRAIN and PREDICT modes are supported: %s" % (mode)) - - return output_spec - - return model_fn - - -def input_fn_builder(input_file, seq_length, is_training, drop_remainder): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.int64), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.to_int32(t) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - batch_size = params["batch_size"] - - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - d = tf.data.TFRecordDataset(input_file) - if is_training: - d = d.repeat() - d = d.shuffle(buffer_size=100) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - drop_remainder=drop_remainder)) - - return d - - return input_fn - - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, filename, is_training): - self.filename = filename - self.is_training = is_training - self.num_features = 0 - self._writer = tf.python_io.TFRecordWriter(filename) - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_int_feature(feature.input_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_int_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - self._writer.write(tf_example.SerializeToString()) - - def close(self): - self._writer.close() - - -def validate_flags_or_throw(bert_config): - """Validate the input FLAGS or throw an exception.""" - tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, - FLAGS.init_checkpoint) - - if not FLAGS.do_train and not FLAGS.do_predict: - raise ValueError("At least one of `do_train` or `do_predict` must be True.") - - if FLAGS.do_train: - if not FLAGS.train_file: - raise ValueError( - "If `do_train` is True, then `train_file` must be specified.") - if FLAGS.do_predict: - if not FLAGS.predict_file: - raise ValueError( - "If `do_predict` is True, then `predict_file` must be specified.") - - if FLAGS.max_seq_length > bert_config.max_position_embeddings: - raise ValueError( - "Cannot use sequence length %d because the BERT model " - "was only trained up to sequence length %d" % - (FLAGS.max_seq_length, bert_config.max_position_embeddings)) - - if FLAGS.max_seq_length <= FLAGS.max_query_length + 3: - raise ValueError( - "The max_seq_length (%d) must be greater than max_query_length " - "(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length)) - - -def serving_input_receiver_fn(): - """ - An input receiver that expects a serialized tf.Example. - Here input builder is just for serving_input_receiver_fn, - Use placeholder to replace the real data. - """ - feature_spec = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.float32), - "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), - } - serialized_tf_example = tf.placeholder(dtype=tf.string, - shape=[FLAGS.predict_batch_size], - name='input_example_tensor') - receiver_tensors = {'examples': serialized_tf_example} - features = tf.parse_example(serialized_tf_example, feature_spec) - return tf.estimator.export.ServingInputReceiver(features, receiver_tensors) - - -def save_biobert_model(): - tf.logging.set_verbosity(tf.logging.INFO) - - bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) - - validate_flags_or_throw(bert_config) - - tf.gfile.MakeDirs(FLAGS.output_dir) - - tokenizer = tokenization.FullTokenizer( - vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) - - tpu_cluster_resolver = None - if FLAGS.use_tpu and FLAGS.tpu_name: - tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) - - is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - cluster=tpu_cluster_resolver, - master=FLAGS.master, - model_dir=FLAGS.output_dir, - save_checkpoints_steps=FLAGS.save_checkpoints_steps, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations_per_loop, - num_shards=FLAGS.num_tpu_cores, - per_host_input_for_training=is_per_host)) - - train_examples = None - num_train_steps = None - num_warmup_steps = None - - model_fn = model_fn_builder( - bert_config=bert_config, - init_checkpoint=FLAGS.init_checkpoint, - learning_rate=FLAGS.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=num_warmup_steps, - use_tpu=FLAGS.use_tpu, - use_one_hot_embeddings=FLAGS.use_tpu) - - # If TPU is not available, this will fall back to normal Estimator on CPU - # or GPU. - if FLAGS.use_tpu: - estimator = tf.contrib.tpu.TPUEstimator( - use_tpu=FLAGS.use_tpu, - model_fn=model_fn, - config=run_config, - train_batch_size=FLAGS.train_batch_size, - predict_batch_size=FLAGS.predict_batch_size) - else: - estimator = tf.estimator.Estimator( - model_fn=model_fn, - config=run_config) - - - export_model_path = estimator.export_savedmodel(FLAGS.export_dir_base, serving_input_receiver_fn) - tf.logging.info('Exported to {}'.format(export_model_path)) - return export_model_path - - -if __name__ == "__main__": - flags.mark_flag_as_required("vocab_file") - flags.mark_flag_as_required("bert_config_file") - flags.mark_flag_as_required("output_dir") - model_path = save_biobert_model() - print(model_path) diff --git a/build/lib/caireCovid/biobert/tokenization.py b/build/lib/caireCovid/biobert/tokenization.py deleted file mode 100644 index dc476a6..0000000 --- a/build/lib/caireCovid/biobert/tokenization.py +++ /dev/null @@ -1,399 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tokenization classes.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import re -import unicodedata -import six -import tensorflow as tf - - -def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): - """Checks whether the casing config is consistent with the checkpoint name.""" - - # The casing has to be passed in by the user and there is no explicit check - # as to whether it matches the checkpoint. The casing information probably - # should have been stored in the bert_config.json file, but it's not, so - # we have to heuristically detect it to validate. - - if not init_checkpoint: - return - - m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) - if m is None: - return - - model_name = m.group(1) - - lower_models = [ - "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", - "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" - ] - - cased_models = [ - "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", - "multi_cased_L-12_H-768_A-12" - ] - - is_bad_config = False - if model_name in lower_models and not do_lower_case: - is_bad_config = True - actual_flag = "False" - case_name = "lowercased" - opposite_flag = "True" - - if model_name in cased_models and do_lower_case: - is_bad_config = True - actual_flag = "True" - case_name = "cased" - opposite_flag = "False" - - if is_bad_config: - raise ValueError( - "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " - "However, `%s` seems to be a %s model, so you " - "should pass in `--do_lower_case=%s` so that the fine-tuning matches " - "how the model was pre-training. If this error is wrong, please " - "just comment out this check." % (actual_flag, init_checkpoint, - model_name, case_name, opposite_flag)) - - -def convert_to_unicode(text): - """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" - if six.PY3: - if isinstance(text, str): - return text - elif isinstance(text, bytes): - return text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - elif six.PY2: - if isinstance(text, str): - return text.decode("utf-8", "ignore") - elif isinstance(text, unicode): - return text - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - else: - raise ValueError("Not running on Python2 or Python 3?") - - -def printable_text(text): - """Returns text encoded in a way suitable for print or `tf.logging`.""" - - # These functions want `str` for both Python2 and Python3, but in one case - # it's a Unicode string and in the other it's a byte string. - if six.PY3: - if isinstance(text, str): - return text - elif isinstance(text, bytes): - return text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - elif six.PY2: - if isinstance(text, str): - return text - elif isinstance(text, unicode): - return text.encode("utf-8") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - else: - raise ValueError("Not running on Python2 or Python 3?") - - -def load_vocab(vocab_file): - """Loads a vocabulary file into a dictionary.""" - vocab = collections.OrderedDict() - index = 0 - with tf.gfile.GFile(vocab_file, "r") as reader: - while True: - token = convert_to_unicode(reader.readline()) - if not token: - break - token = token.strip() - vocab[token] = index - index += 1 - return vocab - - -def convert_by_vocab(vocab, items): - """Converts a sequence of [tokens|ids] using the vocab.""" - output = [] - for item in items: - output.append(vocab[item]) - return output - - -def convert_tokens_to_ids(vocab, tokens): - return convert_by_vocab(vocab, tokens) - - -def convert_ids_to_tokens(inv_vocab, ids): - return convert_by_vocab(inv_vocab, ids) - - -def whitespace_tokenize(text): - """Runs basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -class FullTokenizer(object): - """Runs end-to-end tokenziation.""" - - def __init__(self, vocab_file, do_lower_case=True): - self.vocab = load_vocab(vocab_file) - self.inv_vocab = {v: k for k, v in self.vocab.items()} - self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) - self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) - - def tokenize(self, text): - split_tokens = [] - for token in self.basic_tokenizer.tokenize(text): - for sub_token in self.wordpiece_tokenizer.tokenize(token): - split_tokens.append(sub_token) - - return split_tokens - - def convert_tokens_to_ids(self, tokens): - return convert_by_vocab(self.vocab, tokens) - - def convert_ids_to_tokens(self, ids): - return convert_by_vocab(self.inv_vocab, ids) - - -class BasicTokenizer(object): - """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" - - def __init__(self, do_lower_case=True): - """Constructs a BasicTokenizer. - - Args: - do_lower_case: Whether to lower case the input. - """ - self.do_lower_case = do_lower_case - - def tokenize(self, text): - """Tokenizes a piece of text.""" - text = convert_to_unicode(text) - text = self._clean_text(text) - - # This was added on November 1st, 2018 for the multilingual and Chinese - # models. This is also applied to the English models now, but it doesn't - # matter since the English models were not trained on any Chinese data - # and generally don't have any Chinese data in them (there are Chinese - # characters in the vocabulary because Wikipedia does have some Chinese - # words in the English Wikipedia.). - text = self._tokenize_chinese_chars(text) - - orig_tokens = whitespace_tokenize(text) - split_tokens = [] - for token in orig_tokens: - if self.do_lower_case: - token = token.lower() - token = self._run_strip_accents(token) - split_tokens.extend(self._run_split_on_punc(token)) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Strips accents from a piece of text.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text): - """Splits punctuation on a piece of text.""" - chars = list(text) - i = 0 - start_new_word = True - output = [] - while i < len(chars): - char = chars[i] - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - - return ["".join(x) for x in output] - - def _tokenize_chinese_chars(self, text): - """Adds whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Checks whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ((cp >= 0x4E00 and cp <= 0x9FFF) or # - (cp >= 0x3400 and cp <= 0x4DBF) or # - (cp >= 0x20000 and cp <= 0x2A6DF) or # - (cp >= 0x2A700 and cp <= 0x2B73F) or # - (cp >= 0x2B740 and cp <= 0x2B81F) or # - (cp >= 0x2B820 and cp <= 0x2CEAF) or - (cp >= 0xF900 and cp <= 0xFAFF) or # - (cp >= 0x2F800 and cp <= 0x2FA1F)): # - return True - - return False - - def _clean_text(self, text): - """Performs invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xfffd or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - -class WordpieceTokenizer(object): - """Runs WordPiece tokenziation.""" - - def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): - self.vocab = vocab - self.unk_token = unk_token - self.max_input_chars_per_word = max_input_chars_per_word - - def tokenize(self, text): - """Tokenizes a piece of text into its word pieces. - - This uses a greedy longest-match-first algorithm to perform tokenization - using the given vocabulary. - - For example: - input = "unaffable" - output = ["un", "##aff", "##able"] - - Args: - text: A single token or whitespace separated tokens. This should have - already been passed through `BasicTokenizer. - - Returns: - A list of wordpiece tokens. - """ - - text = convert_to_unicode(text) - - output_tokens = [] - for token in whitespace_tokenize(text): - chars = list(token) - if len(chars) > self.max_input_chars_per_word: - output_tokens.append(self.unk_token) - continue - - is_bad = False - start = 0 - sub_tokens = [] - while start < len(chars): - end = len(chars) - cur_substr = None - while start < end: - substr = "".join(chars[start:end]) - if start > 0: - substr = "##" + substr - if substr in self.vocab: - cur_substr = substr - break - end -= 1 - if cur_substr is None: - is_bad = True - break - sub_tokens.append(cur_substr) - start = end - - if is_bad: - output_tokens.append(self.unk_token) - else: - output_tokens.extend(sub_tokens) - return output_tokens - - -def _is_whitespace(char): - """Checks whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically contorl characters but we treat them - # as whitespace since they are generally considered as such. - if char == " " or char == "\t" or char == "\n" or char == "\r": - return True - cat = unicodedata.category(char) - if cat == "Zs": - return True - return False - - -def _is_control(char): - """Checks whether `chars` is a control character.""" - # These are technically control characters but we count them as whitespace - # characters. - if char == "\t" or char == "\n" or char == "\r": - return False - cat = unicodedata.category(char) - if cat.startswith("C"): - return True - return False - - -def _is_punctuation(char): - """Checks whether `chars` is a punctuation character.""" - cp = ord(char) - # We treat all non-letter/number ASCII as punctuation. - # Characters such as "^", "$", and "`" are not in the Unicode - # Punctuation class but we treat them as punctuation anyways, for - # consistency. - if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or - (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False diff --git a/build/lib/caireCovid/main.py b/build/lib/caireCovid/main.py deleted file mode 100644 index c312f52..0000000 --- a/build/lib/caireCovid/main.py +++ /dev/null @@ -1,33 +0,0 @@ -import os -import sys - -import json -from retrieval import information_retrieval -from qa import QaModule, print_answers_in_file - -all_results, data_for_qa = information_retrieval("question_generation/task1_question.json") - -qa_model = QaModule(["mrqa", "biobert"]) - -answers = qa_model.getAnswers(data_for_qa) - -# print_answers_in_file(answers) -format_answer = {} -for item in answers: - format_answer[item["question"]] = item["data"] - -with open("data.json", "w") as f: - json.dump(format_answer, f) - -# Final output for synthesis -# List [{ -# "question": "xxxx", -# "data": -# { -# "answer": ["answer1", "answer2", ...], -# "confidence": [confidence1, confidence2, ...], -# "title": [title1, title2, ...], -# "doi": [doi1, doi2, ...] -# "sha": [sha1, sha2, ...] -# } -# }] diff --git a/build/lib/caireCovid/mrqa/__init__.py b/build/lib/caireCovid/mrqa/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/build/lib/caireCovid/mrqa/data_utils.py b/build/lib/caireCovid/mrqa/data_utils.py deleted file mode 100644 index f08ce2e..0000000 --- a/build/lib/caireCovid/mrqa/data_utils.py +++ /dev/null @@ -1,915 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json -import os -import random - -from absl import flags -import absl.logging as _logging # pylint: disable=unused-import - -import numpy as np - - -import tensorflow as tf - -from .prepro_utils import preprocess_text, encode_ids -import sentencepiece as spm - - -special_symbols = { - "" : 0, - "" : 1, - "" : 2, - "" : 3, - "" : 4, - "" : 5, - "" : 6, - "" : 7, - "" : 8, -} - -VOCAB_SIZE = 32000 -UNK_ID = special_symbols[""] -CLS_ID = special_symbols[""] -SEP_ID = special_symbols[""] -MASK_ID = special_symbols[""] -EOD_ID = special_symbols[""] - - -def _int64_feature(values): - return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) - - -def _float_feature(values): - return tf.train.Feature(float_list=tf.train.FloatList(value=values)) - - -def format_filename(prefix, bsz_per_host, seq_len, bi_data, suffix, - mask_alpha=5, mask_beta=1, reuse_len=None, uncased=False, - fixed_num_predict=None): - """docs.""" - if reuse_len is None: - reuse_len_str = "" - else: - reuse_len_str = "reuse-{}.".format(reuse_len) - if not uncased: - uncased_str = "" - else: - uncased_str = "uncased." - if bi_data: - bi_data_str = "bi" - else: - bi_data_str = "uni" - if fixed_num_predict is not None: - fnp_str = "fnp-{}.".format(fixed_num_predict) - else: - fnp_str = "" - - file_name = "{}.bsz-{}.seqlen-{}.{}{}{}.alpha-{}.beta-{}.{}{}".format( - prefix, bsz_per_host, seq_len, reuse_len_str, uncased_str, bi_data_str, - mask_alpha, mask_beta, fnp_str, suffix) - - return file_name - - -def _create_data(idx, input_paths): - # Load sentence-piece model - sp = spm.SentencePieceProcessor() - sp.Load(FLAGS.sp_path) - - input_shards = [] - total_line_cnt = 0 - for input_path in input_paths: - input_data, sent_ids = [], [] - sent_id, line_cnt = True, 0 - tf.logging.info("Processing %s", input_path) - for line in tf.gfile.Open(input_path): - if line_cnt % 100000 == 0: - tf.logging.info("Loading line %d", line_cnt) - line_cnt += 1 - - if not line.strip(): - if FLAGS.use_eod: - sent_id = not sent_id - cur_sent = [EOD_ID] - else: - continue - else: - if FLAGS.from_raw_text: - cur_sent = preprocess_text(line.strip(), lower=FLAGS.uncased) - cur_sent = encode_ids(sp, cur_sent) - else: - cur_sent = list(map(int, line.strip().split())) - - input_data.extend(cur_sent) - sent_ids.extend([sent_id] * len(cur_sent)) - sent_id = not sent_id - - tf.logging.info("Finish with line %d", line_cnt) - if line_cnt == 0: - continue - - input_data = np.array(input_data, dtype=np.int64) - sent_ids = np.array(sent_ids, dtype=np.bool) - - total_line_cnt += line_cnt - input_shards.append((input_data, sent_ids)) - - tf.logging.info("[Task %d] Total number line: %d", idx, total_line_cnt) - - tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") - - filenames, num_batch = [], 0 - - # Randomly shuffle input shards (with a fixed but distinct random seed) - np.random.seed(100 * FLAGS.task + FLAGS.pass_id) - - perm_indices = np.random.permutation(len(input_shards)) - tf.logging.info("Using perm indices %s for pass %d", - perm_indices.tolist(), FLAGS.pass_id) - - input_data_list, sent_ids_list = [], [] - prev_sent_id = None - for perm_idx in perm_indices: - input_data, sent_ids = input_shards[perm_idx] - # make sure the `send_ids[0] == not prev_sent_id` - if prev_sent_id is not None and sent_ids[0] == prev_sent_id: - sent_ids = np.logical_not(sent_ids) - - # append to temporary list - input_data_list.append(input_data) - sent_ids_list.append(sent_ids) - - # update `prev_sent_id` - prev_sent_id = sent_ids[-1] - - input_data = np.concatenate(input_data_list) - sent_ids = np.concatenate(sent_ids_list) - - file_name, cur_num_batch = create_tfrecords( - save_dir=tfrecord_dir, - basename="{}-{}-{}".format(FLAGS.split, idx, FLAGS.pass_id), - data=[input_data, sent_ids], - bsz_per_host=FLAGS.bsz_per_host, - seq_len=FLAGS.seq_len, - bi_data=FLAGS.bi_data, - sp=sp, - ) - - filenames.append(file_name) - num_batch += cur_num_batch - - record_info = { - "filenames": filenames, - "num_batch": num_batch - } - - return record_info - - -def create_data(_): - # Validate FLAGS - assert FLAGS.bsz_per_host % FLAGS.num_core_per_host == 0 - if not FLAGS.use_tpu: - FLAGS.num_core_per_host = 1 # forced to be one - - # Make workdirs - if not tf.gfile.Exists(FLAGS.save_dir): - tf.gfile.MakeDirs(FLAGS.save_dir) - - tfrecord_dir = os.path.join(FLAGS.save_dir, "tfrecords") - if not tf.gfile.Exists(tfrecord_dir): - tf.gfile.MakeDirs(tfrecord_dir) - - # Create and dump corpus_info from task 0 - if FLAGS.task == 0: - corpus_info = { - "vocab_size": VOCAB_SIZE, - "bsz_per_host": FLAGS.bsz_per_host, - "num_core_per_host": FLAGS.num_core_per_host, - "seq_len": FLAGS.seq_len, - "reuse_len": FLAGS.reuse_len, - "uncased": FLAGS.uncased, - "bi_data": FLAGS.bi_data, - "mask_alpha": FLAGS.mask_alpha, - "mask_beta": FLAGS.mask_beta, - "num_predict": FLAGS.num_predict, - "use_eod": FLAGS.use_eod, - "sp_path": FLAGS.sp_path, - "input_glob": FLAGS.input_glob, - } - corpus_info_path = os.path.join(FLAGS.save_dir, "corpus_info.json") - with tf.gfile.Open(corpus_info_path, "w") as fp: - json.dump(corpus_info, fp) - - # Interleavely split the work into FLAGS.num_task splits - file_paths = sorted(tf.gfile.Glob(FLAGS.input_glob)) - tf.logging.info("Use glob: %s", FLAGS.input_glob) - tf.logging.info("Find %d files: %s", len(file_paths), file_paths) - - task_file_paths = file_paths[FLAGS.task::FLAGS.num_task] - if not task_file_paths: - tf.logging.info("Exit: task %d has no file to process.", FLAGS.task) - return - - tf.logging.info("Task %d process %d files: %s", - FLAGS.task, len(task_file_paths), task_file_paths) - record_info = _create_data(FLAGS.task, task_file_paths) - - record_prefix = "record_info-{}-{}-{}".format( - FLAGS.split, FLAGS.task, FLAGS.pass_id) - record_name = format_filename( - prefix=record_prefix, - bsz_per_host=FLAGS.bsz_per_host, - seq_len=FLAGS.seq_len, - mask_alpha=FLAGS.mask_alpha, - mask_beta=FLAGS.mask_beta, - reuse_len=FLAGS.reuse_len, - bi_data=FLAGS.bi_data, - suffix="json", - uncased=FLAGS.uncased, - fixed_num_predict=FLAGS.num_predict) - record_info_path = os.path.join(tfrecord_dir, record_name) - - with tf.gfile.Open(record_info_path, "w") as fp: - json.dump(record_info, fp) - - -def batchify(data, bsz_per_host, sent_ids=None): - num_step = len(data) // bsz_per_host - data = data[:bsz_per_host * num_step] - data = data.reshape(bsz_per_host, num_step) - if sent_ids is not None: - sent_ids = sent_ids[:bsz_per_host * num_step] - sent_ids = sent_ids.reshape(bsz_per_host, num_step) - - if sent_ids is not None: - return data, sent_ids - return data - - -def _split_a_and_b(data, sent_ids, begin_idx, tot_len, extend_target=False): - """Split two segments from `data` starting from the index `begin_idx`.""" - - data_len = data.shape[0] - if begin_idx + tot_len >= data_len: - tf.logging.info("[_split_a_and_b] returns None: " - "begin_idx %d + tot_len %d >= data_len %d", - begin_idx, tot_len, data_len) - return None - - end_idx = begin_idx + 1 - cut_points = [] - while end_idx < data_len: - if sent_ids[end_idx] != sent_ids[end_idx - 1]: - if end_idx - begin_idx >= tot_len: break - cut_points.append(end_idx) - end_idx += 1 - - a_begin = begin_idx - if len(cut_points) == 0 or random.random() < 0.5: - label = 0 - if len(cut_points) == 0: - a_end = end_idx - else: - a_end = random.choice(cut_points) - - b_len = max(1, tot_len - (a_end - a_begin)) - # (zihang): `data_len - 1` to account for extend_target - b_begin = random.randint(0, data_len - 1 - b_len) - b_end = b_begin + b_len - while b_begin > 0 and sent_ids[b_begin - 1] == sent_ids[b_begin]: - b_begin -= 1 - # (zihang): `data_len - 1` to account for extend_target - while b_end < data_len - 1 and sent_ids[b_end - 1] == sent_ids[b_end]: - b_end += 1 - - new_begin = a_end - else: - label = 1 - a_end = random.choice(cut_points) - b_begin = a_end - b_end = end_idx - - new_begin = b_end - - while a_end - a_begin + b_end - b_begin > tot_len: - if a_end - a_begin > b_end - b_begin: - # delete the right side only for the LM objective - a_end -= 1 - else: - b_end -= 1 - - ret = [data[a_begin: a_end], data[b_begin: b_end], label, new_begin] - - if extend_target: - if a_end >= data_len or b_end >= data_len: - tf.logging.info("[_split_a_and_b] returns None: " - "a_end %d or b_end %d >= data_len %d", - a_end, b_end, data_len) - return None - a_target = data[a_begin + 1: a_end + 1] - b_target = data[b_begin: b_end + 1] - ret.extend([a_target, b_target]) - - return ret - - -def _is_start_piece(piece): - special_pieces = set(list('!"#$%&\"()*+,-./:;?@[\\]^_`{|}~')) - if (piece.startswith("▁") or piece.startswith("<") - or piece in special_pieces): - return True - else: - return False - - -def _sample_mask(sp, seg, reverse=False, max_gram=5, goal_num_predict=None): - """Sample `goal_num_predict` tokens for partial prediction. - About `mask_beta` tokens are chosen in a context of `mask_alpha` tokens.""" - - seg_len = len(seg) - mask = np.array([False] * seg_len, dtype=np.bool) - - num_predict = 0 - - ngrams = np.arange(1, max_gram + 1, dtype=np.int64) - pvals = 1. / np.arange(1, max_gram + 1) - pvals /= pvals.sum(keepdims=True) - - if reverse: - seg = np.flip(seg, 0) - - cur_len = 0 - while cur_len < seg_len: - if goal_num_predict is not None and num_predict >= goal_num_predict: break - - n = np.random.choice(ngrams, p=pvals) - if goal_num_predict is not None: - n = min(n, goal_num_predict - num_predict) - ctx_size = (n * FLAGS.mask_alpha) // FLAGS.mask_beta - l_ctx = np.random.choice(ctx_size) - r_ctx = ctx_size - l_ctx - - # Find the start position of a complete token - beg = cur_len + l_ctx - while beg < seg_len and not _is_start_piece(sp.IdToPiece(seg[beg].item())): - beg += 1 - if beg >= seg_len: - break - - # Find the end position of the n-gram (start pos of the n+1-th gram) - end = beg + 1 - cnt_ngram = 1 - while end < seg_len: - if _is_start_piece(sp.IdToPiece(seg[beg].item())): - cnt_ngram += 1 - if cnt_ngram > n: - break - end += 1 - if end >= seg_len: - break - - # Update - mask[beg:end] = True - num_predict += end - beg - - cur_len = end + r_ctx - - while goal_num_predict is not None and num_predict < goal_num_predict: - i = np.random.randint(seg_len) - if not mask[i]: - mask[i] = True - num_predict += 1 - - if reverse: - mask = np.flip(mask, 0) - - return mask - - -def create_tfrecords(save_dir, basename, data, bsz_per_host, seq_len, - bi_data, sp): - data, sent_ids = data[0], data[1] - - num_core = FLAGS.num_core_per_host - bsz_per_core = bsz_per_host // num_core - - if bi_data: - assert bsz_per_host % (2 * FLAGS.num_core_per_host) == 0 - fwd_data, fwd_sent_ids = batchify(data, bsz_per_host // 2, sent_ids) - - fwd_data = fwd_data.reshape(num_core, 1, bsz_per_core // 2, -1) - fwd_sent_ids = fwd_sent_ids.reshape(num_core, 1, bsz_per_core // 2, -1) - - bwd_data = fwd_data[:, :, :, ::-1] - bwd_sent_ids = fwd_sent_ids[:, :, :, ::-1] - - data = np.concatenate( - [fwd_data, bwd_data], 1).reshape(bsz_per_host, -1) - sent_ids = np.concatenate( - [fwd_sent_ids, bwd_sent_ids], 1).reshape(bsz_per_host, -1) - else: - data, sent_ids = batchify(data, bsz_per_host, sent_ids) - - tf.logging.info("Raw data shape %s.", data.shape) - - file_name = format_filename( - prefix=basename, - bsz_per_host=bsz_per_host, - seq_len=seq_len, - bi_data=bi_data, - suffix="tfrecords", - mask_alpha=FLAGS.mask_alpha, - mask_beta=FLAGS.mask_beta, - reuse_len=FLAGS.reuse_len, - uncased=FLAGS.uncased, - fixed_num_predict=FLAGS.num_predict - ) - save_path = os.path.join(save_dir, file_name) - record_writer = tf.python_io.TFRecordWriter(save_path) - tf.logging.info("Start writing %s.", save_path) - - num_batch = 0 - reuse_len = FLAGS.reuse_len - - # [sep] x 2 + [cls] - assert reuse_len < seq_len - 3 - - data_len = data.shape[1] - sep_array = np.array([SEP_ID], dtype=np.int64) - cls_array = np.array([CLS_ID], dtype=np.int64) - - i = 0 - while i + seq_len <= data_len: - if num_batch % 500 == 0: - tf.logging.info("Processing batch %d", num_batch) - - all_ok = True - features = [] - for idx in range(bsz_per_host): - inp = data[idx, i: i + reuse_len] - tgt = data[idx, i + 1: i + reuse_len + 1] - - results = _split_a_and_b( - data[idx], - sent_ids[idx], - begin_idx=i + reuse_len, - tot_len=seq_len - reuse_len - 3, - extend_target=True) - if results is None: - tf.logging.info("Break out with seq idx %d", i) - all_ok = False - break - - # unpack the results - (a_data, b_data, label, _, a_target, b_target) = tuple(results) - - # sample ngram spans to predict - reverse = bi_data and (idx // (bsz_per_core // 2)) % 2 == 1 - if FLAGS.num_predict is None: - num_predict_0 = num_predict_1 = None - else: - num_predict_1 = FLAGS.num_predict // 2 - num_predict_0 = FLAGS.num_predict - num_predict_1 - mask_0 = _sample_mask(sp, inp, reverse=reverse, - goal_num_predict=num_predict_0) - mask_1 = _sample_mask(sp, np.concatenate([a_data, sep_array, b_data, - sep_array, cls_array]), - reverse=reverse, goal_num_predict=num_predict_1) - - # concatenate data - cat_data = np.concatenate([inp, a_data, sep_array, b_data, - sep_array, cls_array]) - seg_id = ([0] * (reuse_len + a_data.shape[0]) + [0] + - [1] * b_data.shape[0] + [1] + [2]) - assert cat_data.shape[0] == seq_len - assert mask_0.shape[0] == seq_len // 2 - assert mask_1.shape[0] == seq_len // 2 - - # the last two CLS's are not used, just for padding purposes - tgt = np.concatenate([tgt, a_target, b_target, cls_array, cls_array]) - assert tgt.shape[0] == seq_len - - is_masked = np.concatenate([mask_0, mask_1], 0) - if FLAGS.num_predict is not None: - assert np.sum(is_masked) == FLAGS.num_predict - - feature = { - "input": _int64_feature(cat_data), - "is_masked": _int64_feature(is_masked), - "target": _int64_feature(tgt), - "seg_id": _int64_feature(seg_id), - "label": _int64_feature([label]), - } - features.append(feature) - - if all_ok: - assert len(features) == bsz_per_host - for feature in features: - example = tf.train.Example(features=tf.train.Features(feature=feature)) - record_writer.write(example.SerializeToString()) - num_batch += 1 - else: - break - - i += reuse_len - - record_writer.close() - tf.logging.info("Done writing %s. Num of batches: %d", save_path, num_batch) - - return save_path, num_batch - - -################ -# get_input_fn # -################ -def _convert_example(example, use_bfloat16): - """Cast int64 into int32 and float32 to bfloat16 if use_bfloat16.""" - for key in list(example.keys()): - val = example[key] - if tf.keras.backend.is_sparse(val): - val = tf.sparse.to_dense(val) - if val.dtype == tf.int64: - val = tf.cast(val, tf.int32) - if use_bfloat16 and val.dtype == tf.float32: - val = tf.cast(val, tf.bfloat16) - - example[key] = val - - -def parse_files_to_dataset(parser, file_names, split, num_batch, num_hosts, - host_id, num_core_per_host, bsz_per_core): - # list of file pathes - num_files = len(file_names) - num_files_per_host = num_files // num_hosts - my_start_file_id = host_id * num_files_per_host - my_end_file_id = (host_id + 1) * num_files_per_host - if host_id == num_hosts - 1: - my_end_file_id = num_files - file_paths = file_names[my_start_file_id: my_end_file_id] - tf.logging.info("Host %d handles %d files", host_id, len(file_paths)) - - assert split == "train" - dataset = tf.data.Dataset.from_tensor_slices(file_paths) - - # file-level shuffle - if len(file_paths) > 1: - dataset = dataset.shuffle(len(file_paths)) - - # Note: we cannot perform sample-level shuffle here because this will violate - # the consecutive requirement of data stream. - dataset = tf.data.TFRecordDataset(dataset) - - # (zihang): since we are doing online preprocessing, the parsed result of - # the same input at each time will be different. Thus, cache processed data - # is not helpful. It will use a lot of memory and lead to contrainer OOM. - # So, change to cache non-parsed raw data instead. - dataset = dataset.cache().map(parser).repeat() - dataset = dataset.batch(bsz_per_core, drop_remainder=True) - dataset = dataset.prefetch(num_core_per_host * bsz_per_core) - - return dataset - - -def _local_perm(inputs, targets, is_masked, perm_size, seq_len): - """ - Sample a permutation of the factorization order, and create an - attention mask accordingly. - - Args: - inputs: int64 Tensor in shape [seq_len], input ids. - targets: int64 Tensor in shape [seq_len], target ids. - is_masked: bool Tensor in shape [seq_len]. True means being selected - for partial prediction. - perm_size: the length of longest permutation. Could be set to be reuse_len. - Should not be larger than reuse_len or there will be data leaks. - seq_len: int, sequence length. - """ - - # Generate permutation indices - index = tf.range(seq_len, dtype=tf.int64) - index = tf.transpose(tf.reshape(index, [-1, perm_size])) - index = tf.random_shuffle(index) - index = tf.reshape(tf.transpose(index), [-1]) - - # `perm_mask` and `target_mask` - # non-functional tokens - non_func_tokens = tf.logical_not(tf.logical_or( - tf.equal(inputs, SEP_ID), - tf.equal(inputs, CLS_ID))) - - non_mask_tokens = tf.logical_and(tf.logical_not(is_masked), non_func_tokens) - masked_or_func_tokens = tf.logical_not(non_mask_tokens) - - # Set the permutation indices of non-masked (& non-funcional) tokens to the - # smallest index (-1): - # (1) they can be seen by all other positions - # (2) they cannot see masked positions, so there won"t be information leak - smallest_index = -tf.ones([seq_len], dtype=tf.int64) - rev_index = tf.where(non_mask_tokens, smallest_index, index) - - # Create `target_mask`: non-funcional and maksed tokens - # 1: use mask as input and have loss - # 0: use token (or [SEP], [CLS]) as input and do not have loss - target_tokens = tf.logical_and(masked_or_func_tokens, non_func_tokens) - target_mask = tf.cast(target_tokens, tf.float32) - - # Create `perm_mask` - # `target_tokens` cannot see themselves - self_rev_index = tf.where(target_tokens, rev_index, rev_index + 1) - - # 1: cannot attend if i <= j and j is not non-masked (masked_or_func_tokens) - # 0: can attend if i > j or j is non-masked - perm_mask = tf.logical_and( - self_rev_index[:, None] <= rev_index[None, :], - masked_or_func_tokens) - perm_mask = tf.cast(perm_mask, tf.float32) - - # new target: [next token] for LM and [curr token] (self) for PLM - new_targets = tf.concat([inputs[0: 1], targets[: -1]], - axis=0) - - # construct inputs_k - inputs_k = inputs - - # construct inputs_q - inputs_q = target_mask - - return perm_mask, new_targets, target_mask, inputs_k, inputs_q - - -def get_dataset(params, num_hosts, num_core_per_host, split, file_names, - num_batch, seq_len, reuse_len, perm_size, mask_alpha, - mask_beta, use_bfloat16=False, num_predict=None): - - bsz_per_core = params["batch_size"] - if num_hosts > 1: - host_id = params["context"].current_host - else: - host_id = 0 - - #### Function used to parse tfrecord - def parser(record): - """function used to parse tfrecord.""" - - record_spec = { - "input": tf.FixedLenFeature([seq_len], tf.int64), - "target": tf.FixedLenFeature([seq_len], tf.int64), - "seg_id": tf.FixedLenFeature([seq_len], tf.int64), - "label": tf.FixedLenFeature([1], tf.int64), - "is_masked": tf.FixedLenFeature([seq_len], tf.int64), - } - - # retrieve serialized example - example = tf.parse_single_example( - serialized=record, - features=record_spec) - - inputs = example.pop("input") - target = example.pop("target") - is_masked = tf.cast(example.pop("is_masked"), tf.bool) - - non_reuse_len = seq_len - reuse_len - assert perm_size <= reuse_len and perm_size <= non_reuse_len - - perm_mask_0, target_0, target_mask_0, input_k_0, input_q_0 = _local_perm( - inputs[:reuse_len], - target[:reuse_len], - is_masked[:reuse_len], - perm_size, - reuse_len) - - perm_mask_1, target_1, target_mask_1, input_k_1, input_q_1 = _local_perm( - inputs[reuse_len:], - target[reuse_len:], - is_masked[reuse_len:], - perm_size, - non_reuse_len) - - perm_mask_0 = tf.concat([perm_mask_0, tf.ones([reuse_len, non_reuse_len])], - axis=1) - perm_mask_1 = tf.concat([tf.zeros([non_reuse_len, reuse_len]), perm_mask_1], - axis=1) - perm_mask = tf.concat([perm_mask_0, perm_mask_1], axis=0) - target = tf.concat([target_0, target_1], axis=0) - target_mask = tf.concat([target_mask_0, target_mask_1], axis=0) - input_k = tf.concat([input_k_0, input_k_1], axis=0) - input_q = tf.concat([input_q_0, input_q_1], axis=0) - - if num_predict is not None: - indices = tf.range(seq_len, dtype=tf.int64) - bool_target_mask = tf.cast(target_mask, tf.bool) - indices = tf.boolean_mask(indices, bool_target_mask) - - ##### extra padding due to CLS/SEP introduced after prepro - actual_num_predict = tf.shape(indices)[0] - pad_len = num_predict - actual_num_predict - - ##### target_mapping - target_mapping = tf.one_hot(indices, seq_len, dtype=tf.float32) - paddings = tf.zeros([pad_len, seq_len], dtype=target_mapping.dtype) - target_mapping = tf.concat([target_mapping, paddings], axis=0) - example["target_mapping"] = tf.reshape(target_mapping, - [num_predict, seq_len]) - - ##### target - target = tf.boolean_mask(target, bool_target_mask) - paddings = tf.zeros([pad_len], dtype=target.dtype) - target = tf.concat([target, paddings], axis=0) - example["target"] = tf.reshape(target, [num_predict]) - - ##### target mask - target_mask = tf.concat( - [tf.ones([actual_num_predict], dtype=tf.float32), - tf.zeros([pad_len], dtype=tf.float32)], - axis=0) - example["target_mask"] = tf.reshape(target_mask, [num_predict]) - else: - example["target"] = tf.reshape(target, [seq_len]) - example["target_mask"] = tf.reshape(target_mask, [seq_len]) - - # reshape back to fixed shape - example["perm_mask"] = tf.reshape(perm_mask, [seq_len, seq_len]) - example["input_k"] = tf.reshape(input_k, [seq_len]) - example["input_q"] = tf.reshape(input_q, [seq_len]) - - _convert_example(example, use_bfloat16) - - for k, v in example.items(): - tf.logging.info("%s: %s", k, v) - - return example - - # Get dataset - dataset = parse_files_to_dataset( - parser=parser, - file_names=file_names, - split=split, - num_batch=num_batch, - num_hosts=num_hosts, - host_id=host_id, - num_core_per_host=num_core_per_host, - bsz_per_core=bsz_per_core) - - return dataset - - -def get_input_fn( - tfrecord_dir, - split, - bsz_per_host, - seq_len, - reuse_len, - bi_data, - num_hosts=1, - num_core_per_host=1, - perm_size=None, - mask_alpha=None, - mask_beta=None, - uncased=False, - num_passes=None, - use_bfloat16=False, - num_predict=None): - - # Merge all record infos into a single one - record_glob_base = format_filename( - prefix="record_info-{}-*".format(split), - bsz_per_host=bsz_per_host, - seq_len=seq_len, - bi_data=bi_data, - suffix="json", - mask_alpha=mask_alpha, - mask_beta=mask_beta, - reuse_len=reuse_len, - uncased=uncased, - fixed_num_predict=num_predict) - - record_info = {"num_batch": 0, "filenames": []} - - tfrecord_dirs = tfrecord_dir.split(",") - tf.logging.info("Use the following tfrecord dirs: %s", tfrecord_dirs) - - for idx, record_dir in enumerate(tfrecord_dirs): - record_glob = os.path.join(record_dir, record_glob_base) - tf.logging.info("[%d] Record glob: %s", idx, record_glob) - - record_paths = sorted(tf.gfile.Glob(record_glob)) - tf.logging.info("[%d] Num of record info path: %d", - idx, len(record_paths)) - - cur_record_info = {"num_batch": 0, "filenames": []} - - for record_info_path in record_paths: - if num_passes is not None: - record_info_name = os.path.basename(record_info_path) - fields = record_info_name.split(".")[0].split("-") - pass_id = int(fields[-1]) - if len(fields) == 5 and pass_id >= num_passes: - tf.logging.info("Skip pass %d: %s", pass_id, record_info_name) - continue - - with tf.gfile.Open(record_info_path, "r") as fp: - info = json.load(fp) - if num_passes is not None: - eff_num_passes = min(num_passes, len(info["filenames"])) - ratio = eff_num_passes / len(info["filenames"]) - cur_record_info["num_batch"] += int(info["num_batch"] * ratio) - cur_record_info["filenames"] += info["filenames"][:eff_num_passes] - else: - cur_record_info["num_batch"] += info["num_batch"] - cur_record_info["filenames"] += info["filenames"] - - # overwrite directory for `cur_record_info` - new_filenames = [] - for filename in cur_record_info["filenames"]: - basename = os.path.basename(filename) - new_filename = os.path.join(record_dir, basename) - new_filenames.append(new_filename) - cur_record_info["filenames"] = new_filenames - - tf.logging.info("[Dir %d] Number of chosen batches: %s", - idx, cur_record_info["num_batch"]) - tf.logging.info("[Dir %d] Number of chosen files: %s", - idx, len(cur_record_info["filenames"])) - tf.logging.info(cur_record_info["filenames"]) - - # add `cur_record_info` to global `record_info` - record_info["num_batch"] += cur_record_info["num_batch"] - record_info["filenames"] += cur_record_info["filenames"] - - tf.logging.info("Total number of batches: %d", - record_info["num_batch"]) - tf.logging.info("Total number of files: %d", - len(record_info["filenames"])) - tf.logging.info(record_info["filenames"]) - - def input_fn(params): - """docs.""" - assert params["batch_size"] * num_core_per_host == bsz_per_host - - dataset = get_dataset( - params=params, - num_hosts=num_hosts, - num_core_per_host=num_core_per_host, - split=split, - file_names=record_info["filenames"], - num_batch=record_info["num_batch"], - seq_len=seq_len, - reuse_len=reuse_len, - perm_size=perm_size, - mask_alpha=mask_alpha, - mask_beta=mask_beta, - use_bfloat16=use_bfloat16, - num_predict=num_predict) - - return dataset - - return input_fn, record_info - - -if __name__ == "__main__": - FLAGS = flags.FLAGS - flags.DEFINE_bool("use_tpu", True, help="whether to use TPUs") - flags.DEFINE_integer("bsz_per_host", 32, help="batch size per host.") - flags.DEFINE_integer("num_core_per_host", 8, help="num TPU cores per host.") - - flags.DEFINE_integer("seq_len", 512, - help="Sequence length.") - flags.DEFINE_integer("reuse_len", 256, - help="Number of token that can be reused as memory. " - "Could be half of `seq_len`.") - flags.DEFINE_bool("uncased", True, help="Use uncased inputs or not.") - flags.DEFINE_bool("bi_data", True, - help="whether to create bidirectional data") - flags.DEFINE_integer("mask_alpha", default=6, - help="How many tokens to form a group.") - flags.DEFINE_integer("mask_beta", default=1, - help="How many tokens to mask within each group.") - flags.DEFINE_bool("use_eod", True, - help="whether to append EOD at the end of a doc.") - flags.DEFINE_bool("from_raw_text", True, - help="Whether the input is raw text or encoded ids.") - flags.DEFINE_integer("num_predict", default=85, - help="Num of tokens to predict.") - - flags.DEFINE_string("input_glob", "data/example/*.txt", - help="Input file glob.") - flags.DEFINE_string("sp_path", "", help="Path to the sentence piece model.") - flags.DEFINE_string("save_dir", "proc_data/example", - help="Directory for saving the processed data.") - flags.DEFINE_enum("split", "train", ["train", "dev", "test"], - help="Save the data as which split.") - - flags.DEFINE_integer("pass_id", 0, help="ID of the current pass." - "Different passes sample different negative segment.") - flags.DEFINE_integer("num_task", 1, help="Number of total tasks.") - flags.DEFINE_integer("task", 0, help="The Task ID. This value is used when " - "using multiple workers to identify each worker.") - - tf.logging.set_verbosity(tf.logging.INFO) - tf.app.run(create_data) diff --git a/build/lib/caireCovid/mrqa/function_builder.py b/build/lib/caireCovid/mrqa/function_builder.py deleted file mode 100644 index 56a6129..0000000 --- a/build/lib/caireCovid/mrqa/function_builder.py +++ /dev/null @@ -1,395 +0,0 @@ -"""doc.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import os -import tensorflow as tf -from .modeling import (lm_loss, classification_loss, regression_loss) -from .xlnet import (XLNetConfig, create_run_config, XLNetModel) - -# import attn_utils - -def construct_scalar_host_call( - monitor_dict, - model_dir, - prefix="", - reduce_fn=None): - """ - Construct host calls to monitor training progress on TPUs. - """ - - metric_names = list(monitor_dict.keys()) - - def host_call_fn(global_step, *args): - """actual host call function.""" - step = global_step[0] - with tf.contrib.summary.create_file_writer( - logdir=model_dir, filename_suffix=".host_call").as_default(): - with tf.contrib.summary.always_record_summaries(): - for i, name in enumerate(metric_names): - if reduce_fn is None: - scalar = args[i][0] - else: - scalar = reduce_fn(args[i]) - with tf.contrib.summary.record_summaries_every_n_global_steps( - 100, global_step=step): - tf.contrib.summary.scalar(prefix + name, scalar, step=step) - - return tf.contrib.summary.all_summary_ops() - - global_step_tensor = tf.reshape(tf.train.get_or_create_global_step(), [1]) - other_tensors = [tf.reshape(monitor_dict[key], [1]) for key in metric_names] - - return host_call_fn, [global_step_tensor] + other_tensors - - -def two_stream_loss(FLAGS, features, labels, mems, is_training): - """Pretraining loss with two-stream attention Transformer-XL.""" - - #### Unpack input - mem_name = "mems" - mems = mems.get(mem_name, None) - - inp_k = tf.transpose(features["input_k"], [1, 0]) - inp_q = tf.transpose(features["input_q"], [1, 0]) - - seg_id = tf.transpose(features["seg_id"], [1, 0]) - - inp_mask = None - perm_mask = tf.transpose(features["perm_mask"], [1, 2, 0]) - - if FLAGS.num_predict is not None: - # [num_predict x tgt_len x bsz] - target_mapping = tf.transpose(features["target_mapping"], [1, 2, 0]) - else: - target_mapping = None - - # target for LM loss - tgt = tf.transpose(features["target"], [1, 0]) - - # target mask for LM loss - tgt_mask = tf.transpose(features["target_mask"], [1, 0]) - - # construct xlnet config and save to model_dir - xlnet_config = XLNetConfig(FLAGS=FLAGS) - xlnet_config.to_json(os.path.join(FLAGS.model_dir, "config.json")) - - # construct run config from FLAGS - run_config = create_run_config(is_training, False, FLAGS) - - xlnet_model = XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp_k, - seg_ids=seg_id, - input_mask=inp_mask, - mems=mems, - perm_mask=perm_mask, - target_mapping=target_mapping, - inp_q=inp_q) - - output = xlnet_model.get_sequence_output() - new_mems = {mem_name: xlnet_model.get_new_memory()} - lookup_table = xlnet_model.get_embedding_table() - - initializer = xlnet_model.get_initializer() - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - # LM loss - lm_loss = lm_loss( - hidden=output, - target=tgt, - n_token=xlnet_config.n_token, - d_model=xlnet_config.d_model, - initializer=initializer, - lookup_table=lookup_table, - tie_weight=True, - bi_data=run_config.bi_data, - use_tpu=run_config.use_tpu) - - #### Quantity to monitor - monitor_dict = {} - - if FLAGS.use_bfloat16: - tgt_mask = tf.cast(tgt_mask, tf.float32) - lm_loss = tf.cast(lm_loss, tf.float32) - - total_loss = tf.reduce_sum(lm_loss * tgt_mask) / tf.reduce_sum(tgt_mask) - monitor_dict["total_loss"] = total_loss - - return total_loss, new_mems, monitor_dict - - -def get_loss(FLAGS, features, labels, mems, is_training): - """Pretraining loss with two-stream attention Transformer-XL.""" - if FLAGS.use_bfloat16: - with tf.tpu.bfloat16_scope(): - return two_stream_loss(FLAGS, features, labels, mems, is_training) - else: - return two_stream_loss(FLAGS, features, labels, mems, is_training) - - -def get_classification_loss( - FLAGS, features, n_class, is_training): - """Loss for downstream classification tasks.""" - - bsz_per_core = tf.shape(features["input_ids"])[0] - - inp = tf.transpose(features["input_ids"], [1, 0]) - seg_id = tf.transpose(features["segment_ids"], [1, 0]) - inp_mask = tf.transpose(features["input_mask"], [1, 0]) - label = tf.reshape(features["label_ids"], [bsz_per_core]) - - xlnet_config = XLNetConfig(json_path=FLAGS.model_config_path) - run_config = create_run_config(is_training, True, FLAGS) - - xlnet_model = XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - - summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - - if FLAGS.cls_scope is not None and FLAGS.cls_scope: - cls_scope = "classification_{}".format(FLAGS.cls_scope) - else: - cls_scope = "classification_{}".format(FLAGS.task_name.lower()) - - per_example_loss, logits = classification_loss( - hidden=summary, - labels=label, - n_class=n_class, - initializer=xlnet_model.get_initializer(), - scope=cls_scope, - return_logits=True) - - total_loss = tf.reduce_mean(per_example_loss) - - return total_loss, per_example_loss, logits - - -def get_regression_loss( - FLAGS, features, is_training): - """Loss for downstream regression tasks.""" - - bsz_per_core = tf.shape(features["input_ids"])[0] - - inp = tf.transpose(features["input_ids"], [1, 0]) - seg_id = tf.transpose(features["segment_ids"], [1, 0]) - inp_mask = tf.transpose(features["input_mask"], [1, 0]) - label = tf.reshape(features["label_ids"], [bsz_per_core]) - - xlnet_config = XLNetConfig(json_path=FLAGS.model_config_path) - run_config = create_run_config(is_training, True, FLAGS) - - xlnet_model = XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - - summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - per_example_loss, logits = regression_loss( - hidden=summary, - labels=label, - initializer=xlnet_model.get_initializer(), - scope="regression_{}".format(FLAGS.task_name.lower()), - return_logits=True) - - total_loss = tf.reduce_mean(per_example_loss) - - return total_loss, per_example_loss, logits - - -def get_qa_outputs(FLAGS, features, is_training): - """Loss for downstream span-extraction QA tasks such as SQuAD.""" - - inp = tf.transpose(features["input_ids"], [1, 0]) - seg_id = tf.transpose(features["segment_ids"], [1, 0]) - inp_mask = tf.transpose(features["input_mask"], [1, 0]) - cls_index = tf.reshape(features["cls_index"], [-1]) - - seq_len = tf.shape(inp)[0] - - xlnet_config = XLNetConfig(json_path=FLAGS.model_config_path) - run_config = create_run_config(is_training, True, FLAGS) - - seg_id = tf.cast(seg_id, tf.int32) - xlnet_model = XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - output = xlnet_model.get_sequence_output() - initializer = xlnet_model.get_initializer() - - return_dict = {} - - # invalid position mask such as query and special symbols (PAD, SEP, CLS) - p_mask = features["p_mask"] - - # logit of the start position - with tf.variable_scope("start_logits"): - start_logits = tf.layers.dense( - output, - 1, - kernel_initializer=initializer) - start_logits = tf.transpose(tf.squeeze(start_logits, -1), [1, 0]) - start_logits_masked = start_logits * (1 - p_mask) - 1e30 * p_mask - start_log_probs = tf.nn.log_softmax(start_logits_masked, -1) - - # logit of the end position - with tf.variable_scope("end_logits"): - if is_training: - # during training, compute the end logits based on the - # ground truth of the start position - - start_positions = tf.reshape(features["start_positions"], [-1]) - start_index = tf.one_hot(start_positions, depth=seq_len, axis=-1, - dtype=tf.float32) - start_features = tf.einsum("lbh,bl->bh", output, start_index) - start_features = tf.tile(start_features[None], [seq_len, 1, 1]) - end_logits = tf.layers.dense( - tf.concat([output, start_features], axis=-1), xlnet_config.d_model, - kernel_initializer=initializer, activation=tf.tanh, name="dense_0") - end_logits = tf.contrib.layers.layer_norm( - end_logits, begin_norm_axis=-1) - - end_logits = tf.layers.dense( - end_logits, 1, - kernel_initializer=initializer, - name="dense_1") - end_logits = tf.transpose(tf.squeeze(end_logits, -1), [1, 0]) - end_logits_masked = end_logits * (1 - p_mask) - 1e30 * p_mask - end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) - else: - # during inference, compute the end logits based on beam search - - start_top_log_probs, start_top_index = tf.nn.top_k( - start_log_probs, k=FLAGS.start_n_top) - start_index = tf.one_hot(start_top_index, - depth=seq_len, axis=-1, dtype=tf.float32) - start_features = tf.einsum("lbh,bkl->bkh", output, start_index) - end_input = tf.tile(output[:, :, None], - [1, 1, FLAGS.start_n_top, 1]) - start_features = tf.tile(start_features[None], - [seq_len, 1, 1, 1]) - end_input = tf.concat([end_input, start_features], axis=-1) - end_logits = tf.layers.dense( - end_input, - xlnet_config.d_model, - kernel_initializer=initializer, - activation=tf.tanh, - name="dense_0") - end_logits = tf.contrib.layers.layer_norm(end_logits, - begin_norm_axis=-1) - end_logits = tf.layers.dense( - end_logits, - 1, - kernel_initializer=initializer, - name="dense_1") - end_logits = tf.reshape(end_logits, [seq_len, -1, FLAGS.start_n_top]) - end_logits = tf.transpose(end_logits, [1, 2, 0]) - end_logits_masked = end_logits * ( - 1 - p_mask[:, None]) - 1e30 * p_mask[:, None] - end_log_probs = tf.nn.log_softmax(end_logits_masked, -1) - end_top_log_probs, end_top_index = tf.nn.top_k( - end_log_probs, k=FLAGS.end_n_top) - end_top_log_probs = tf.reshape( - end_top_log_probs, - [-1, FLAGS.start_n_top * FLAGS.end_n_top]) - end_top_index = tf.reshape( - end_top_index, - [-1, FLAGS.start_n_top * FLAGS.end_n_top]) - - if is_training: - return_dict["start_log_probs"] = start_log_probs - return_dict["end_log_probs"] = end_log_probs - else: - return_dict["start_top_log_probs"] = start_top_log_probs - return_dict["start_top_index"] = start_top_index - return_dict["end_top_log_probs"] = end_top_log_probs - return_dict["end_top_index"] = end_top_index - - # an additional layer to predict answerability - with tf.variable_scope("answer_class"): - # get the representation of CLS - cls_index = tf.one_hot(cls_index, seq_len, axis=-1, dtype=tf.float32) - cls_feature = tf.einsum("lbh,bl->bh", output, cls_index) - - # get the representation of START - start_p = tf.nn.softmax(start_logits_masked, axis=-1, - name="softmax_start") - start_feature = tf.einsum("lbh,bl->bh", output, start_p) - - # note(zhiliny): no dependency on end_feature so that we can obtain - # one single `cls_logits` for each sample - ans_feature = tf.concat([start_feature, cls_feature], -1) - ans_feature = tf.layers.dense( - ans_feature, - xlnet_config.d_model, - activation=tf.tanh, - kernel_initializer=initializer, name="dense_0") - ans_feature = tf.layers.dropout(ans_feature, FLAGS.dropout, - training=is_training) - cls_logits = tf.layers.dense( - ans_feature, - 1, - kernel_initializer=initializer, - name="dense_1", - use_bias=False) - cls_logits = tf.squeeze(cls_logits, -1) - - return_dict["cls_logits"] = cls_logits - - return return_dict - - -def get_race_loss(FLAGS, features, is_training): - """Loss for downstream multi-choice QA tasks such as RACE.""" - - bsz_per_core = tf.shape(features["input_ids"])[0] - - def _transform_features(feature): - out = tf.reshape(feature, [bsz_per_core, 4, -1]) - out = tf.transpose(out, [2, 0, 1]) - out = tf.reshape(out, [-1, bsz_per_core * 4]) - return out - - inp = _transform_features(features["input_ids"]) - seg_id = _transform_features(features["segment_ids"]) - inp_mask = _transform_features(features["input_mask"]) - label = tf.reshape(features["label_ids"], [bsz_per_core]) - - xlnet_config = XLNetConfig(json_path=FLAGS.model_config_path) - run_config = create_run_config(is_training, True, FLAGS) - - xlnet_model = XLNetModel( - xlnet_config=xlnet_config, - run_config=run_config, - input_ids=inp, - seg_ids=seg_id, - input_mask=inp_mask) - summary = xlnet_model.get_pooled_out(FLAGS.summary_type, FLAGS.use_summ_proj) - - with tf.variable_scope("logits"): - logits = tf.layers.dense(summary, 1, - kernel_initializer=xlnet_model.get_initializer()) - logits = tf.reshape(logits, [bsz_per_core, 4]) - - one_hot_target = tf.one_hot(label, 4) - per_example_loss = -tf.reduce_sum( - tf.nn.log_softmax(logits) * one_hot_target, -1) - total_loss = tf.reduce_mean(per_example_loss) - - return total_loss, per_example_loss, logits diff --git a/build/lib/caireCovid/mrqa/model_utils.py b/build/lib/caireCovid/mrqa/model_utils.py deleted file mode 100644 index fd8d6d8..0000000 --- a/build/lib/caireCovid/mrqa/model_utils.py +++ /dev/null @@ -1,399 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import os -import re -import numpy as np -import six -from os.path import join -from six.moves import zip - -from absl import flags - -import tensorflow as tf - - -def configure_tpu(FLAGS): - if FLAGS.use_tpu: - tpu_cluster = tf.contrib.cluster_resolver.TPUClusterResolver( - FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project) - master = tpu_cluster.get_master() - else: - tpu_cluster = None - master = FLAGS.master - - session_config = tf.ConfigProto(allow_soft_placement=True) - # Uncomment the following line if you hope to monitor GPU RAM growth - # session_config.gpu_options.allow_growth = True - - if FLAGS.use_tpu: - strategy = None - tf.logging.info('Use TPU without distribute strategy.') - elif FLAGS.num_core_per_host == 1: - strategy = None - tf.logging.info('Single device mode.') - else: - strategy = tf.contrib.distribute.MirroredStrategy( - num_gpus=FLAGS.num_core_per_host) - tf.logging.info('Use MirroredStrategy with %d devices.', - strategy.num_replicas_in_sync) - - per_host_input = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 - run_config = tf.contrib.tpu.RunConfig( - master=master, - model_dir=FLAGS.model_dir, - session_config=session_config, - tpu_config=tf.contrib.tpu.TPUConfig( - iterations_per_loop=FLAGS.iterations, - num_shards=FLAGS.num_hosts * FLAGS.num_core_per_host, - per_host_input_for_training=per_host_input), - keep_checkpoint_max=FLAGS.max_save, - save_checkpoints_secs=None, - save_checkpoints_steps=FLAGS.save_steps, - train_distribute=strategy - ) - return run_config - - -def init_from_checkpoint(FLAGS, global_vars=False): - tvars = tf.global_variables() if global_vars else tf.trainable_variables() - initialized_variable_names = {} - scaffold_fn = None - if FLAGS.init_checkpoint is not None: - if FLAGS.init_checkpoint.endswith("latest"): - ckpt_dir = os.path.dirname(FLAGS.init_checkpoint) - init_checkpoint = tf.train.latest_checkpoint(ckpt_dir) - else: - init_checkpoint = FLAGS.init_checkpoint - - tf.logging.info("Initialize from the ckpt {}".format(init_checkpoint)) - - (assignment_map, initialized_variable_names - ) = get_assignment_map_from_checkpoint(tvars, init_checkpoint) - if FLAGS.use_tpu: - def tpu_scaffold(): - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - return tf.train.Scaffold() - - scaffold_fn = tpu_scaffold - else: - tf.train.init_from_checkpoint(init_checkpoint, assignment_map) - - # Log customized initialization - tf.logging.info("**** Global Variables ****") - for var in tvars: - init_string = "" - if var.name in initialized_variable_names: - init_string = ", *INIT_FROM_CKPT*" - tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, - init_string) - return scaffold_fn - - -def get_train_op(FLAGS, total_loss, grads_and_vars=None): - global_step = tf.train.get_or_create_global_step() - - # increase the learning rate linearly - if FLAGS.warmup_steps > 0: - warmup_lr = (tf.cast(global_step, tf.float32) - / tf.cast(FLAGS.warmup_steps, tf.float32) - * FLAGS.learning_rate) - else: - warmup_lr = 0.0 - - # decay the learning rate - if FLAGS.decay_method == "poly": - decay_lr = tf.train.polynomial_decay( - FLAGS.learning_rate, - global_step=global_step - FLAGS.warmup_steps, - decay_steps=FLAGS.train_steps - FLAGS.warmup_steps, - end_learning_rate=FLAGS.learning_rate * FLAGS.min_lr_ratio) - elif FLAGS.decay_method == "cos": - decay_lr = tf.train.cosine_decay( - FLAGS.learning_rate, - global_step=global_step - FLAGS.warmup_steps, - decay_steps=FLAGS.train_steps - FLAGS.warmup_steps, - alpha=FLAGS.min_lr_ratio) - else: - raise ValueError(FLAGS.decay_method) - - learning_rate = tf.where(global_step < FLAGS.warmup_steps, - warmup_lr, decay_lr) - - if (FLAGS.weight_decay > 0 and not FLAGS.use_tpu and - FLAGS.num_core_per_host > 1): - raise ValueError("Do not support `weight_decay > 0` with multi-gpu " - "training so far.") - - if FLAGS.weight_decay == 0: - optimizer = tf.train.AdamOptimizer( - learning_rate=learning_rate, - epsilon=FLAGS.adam_epsilon) - else: - optimizer = AdamWeightDecayOptimizer( - learning_rate=learning_rate, - epsilon=FLAGS.adam_epsilon, - exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], - weight_decay_rate=FLAGS.weight_decay) - - if FLAGS.use_tpu: - optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer) - - if grads_and_vars is None: - grads_and_vars = optimizer.compute_gradients(total_loss) - gradients, variables = zip(*grads_and_vars) - clipped, gnorm = tf.clip_by_global_norm(gradients, FLAGS.clip) - - if getattr(FLAGS, "lr_layer_decay_rate", 1.0) != 1.0: - n_layer = 0 - for i in range(len(clipped)): - m = re.search(r"model/transformer/layer_(\d+?)/", variables[i].name) - if not m: continue - n_layer = max(n_layer, int(m.group(1)) + 1) - - for i in range(len(clipped)): - for l in range(n_layer): - if "model/transformer/layer_{}/".format(l) in variables[i].name: - abs_rate = FLAGS.lr_layer_decay_rate ** (n_layer - 1 - l) - clipped[i] *= abs_rate - tf.logging.info("Apply mult {:.4f} to layer-{} grad of {}".format( - abs_rate, l, variables[i].name)) - break - - train_op = optimizer.apply_gradients( - zip(clipped, variables), global_step=global_step) - - # Manually increment `global_step` for AdamWeightDecayOptimizer - if FLAGS.weight_decay > 0: - new_global_step = global_step + 1 - train_op = tf.group(train_op, [global_step.assign(new_global_step)]) - - return train_op, learning_rate, gnorm - - -def clean_ckpt(_): - input_ckpt = FLAGS.clean_input_ckpt - output_model_dir = FLAGS.clean_output_model_dir - - tf.reset_default_graph() - - var_list = tf.contrib.framework.list_variables(input_ckpt) - var_values, var_dtypes = {}, {} - for (name, shape) in var_list: - if not name.startswith("global_step") and "adam" not in name.lower(): - var_values[name] = None - tf.logging.info("Include {}".format(name)) - else: - tf.logging.info("Exclude {}".format(name)) - - tf.logging.info("Loading from {}".format(input_ckpt)) - reader = tf.contrib.framework.load_checkpoint(input_ckpt) - for name in var_values: - tensor = reader.get_tensor(name) - var_dtypes[name] = tensor.dtype - var_values[name] = tensor - - with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): - tf_vars = [ - tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) - for v in var_values - ] - placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] - assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] - global_step = tf.Variable( - 0, name="global_step", trainable=False, dtype=tf.int64) - saver = tf.train.Saver(tf.all_variables()) - - if not tf.gfile.Exists(output_model_dir): - tf.gfile.MakeDirs(output_model_dir) - - # Build a model consisting only of variables, set them to the average values. - with tf.Session() as sess: - sess.run(tf.initialize_all_variables()) - for p, assign_op, (name, value) in zip(placeholders, assign_ops, - six.iteritems(var_values)): - sess.run(assign_op, {p: value}) - - # Use the built saver to save the averaged checkpoint. - saver.save(sess, join(output_model_dir, "model.ckpt"), - global_step=global_step) - - -def avg_checkpoints(model_dir, output_model_dir, last_k): - tf.reset_default_graph() - - checkpoint_state = tf.train.get_checkpoint_state(model_dir) - checkpoints = checkpoint_state.all_model_checkpoint_paths[- last_k:] - var_list = tf.contrib.framework.list_variables(checkpoints[0]) - var_values, var_dtypes = {}, {} - for (name, shape) in var_list: - if not name.startswith("global_step"): - var_values[name] = np.zeros(shape) - for checkpoint in checkpoints: - reader = tf.contrib.framework.load_checkpoint(checkpoint) - for name in var_values: - tensor = reader.get_tensor(name) - var_dtypes[name] = tensor.dtype - var_values[name] += tensor - tf.logging.info("Read from checkpoint %s", checkpoint) - for name in var_values: # Average. - var_values[name] /= len(checkpoints) - - with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): - tf_vars = [ - tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) - for v in var_values - ] - placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] - assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] - global_step = tf.Variable( - 0, name="global_step", trainable=False, dtype=tf.int64) - saver = tf.train.Saver(tf.all_variables()) - - # Build a model consisting only of variables, set them to the average values. - with tf.Session() as sess: - sess.run(tf.initialize_all_variables()) - for p, assign_op, (name, value) in zip(placeholders, assign_ops, - six.iteritems(var_values)): - sess.run(assign_op, {p: value}) - # Use the built saver to save the averaged checkpoint. - saver.save(sess, join(output_model_dir, "model.ckpt"), - global_step=global_step) - - -def get_assignment_map_from_checkpoint(tvars, init_checkpoint): - """Compute the union of the current variables and checkpoint variables.""" - assignment_map = {} - initialized_variable_names = {} - - name_to_variable = collections.OrderedDict() - for var in tvars: - name = var.name - m = re.match("^(.*):\\d+$", name) - if m is not None: - name = m.group(1) - name_to_variable[name] = var - - init_vars = tf.train.list_variables(init_checkpoint) - - assignment_map = collections.OrderedDict() - for x in init_vars: - (name, var) = (x[0], x[1]) - # tf.logging.info('original name: %s', name) - if name not in name_to_variable: - continue - # assignment_map[name] = name - assignment_map[name] = name_to_variable[name] - initialized_variable_names[name] = 1 - initialized_variable_names[name + ":0"] = 1 - - return (assignment_map, initialized_variable_names) - - -class AdamWeightDecayOptimizer(tf.train.Optimizer): - """A basic Adam optimizer that includes "correct" L2 weight decay.""" - - def __init__(self, - learning_rate, - weight_decay_rate=0.0, - beta_1=0.9, - beta_2=0.999, - epsilon=1e-6, - exclude_from_weight_decay=None, - include_in_weight_decay=["r_s_bias", "r_r_bias", "r_w_bias"], - name="AdamWeightDecayOptimizer"): - """Constructs a AdamWeightDecayOptimizer.""" - super(AdamWeightDecayOptimizer, self).__init__(False, name) - - self.learning_rate = learning_rate - self.weight_decay_rate = weight_decay_rate - self.beta_1 = beta_1 - self.beta_2 = beta_2 - self.epsilon = epsilon - self.exclude_from_weight_decay = exclude_from_weight_decay - self.include_in_weight_decay = include_in_weight_decay - - def apply_gradients(self, grads_and_vars, global_step=None, name=None): - """See base class.""" - assignments = [] - for (grad, param) in grads_and_vars: - if grad is None or param is None: - continue - - param_name = self._get_variable_name(param.name) - - m = tf.get_variable( - name=param_name + "/adam_m", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - v = tf.get_variable( - name=param_name + "/adam_v", - shape=param.shape.as_list(), - dtype=tf.float32, - trainable=False, - initializer=tf.zeros_initializer()) - - # Standard Adam update. - next_m = ( - tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)) - next_v = ( - tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, - tf.square(grad))) - - update = next_m / (tf.sqrt(next_v) + self.epsilon) - - # Just adding the square of the weights to the loss function is *not* - # the correct way of using L2 regularization/weight decay with Adam, - # since that will interact with the m and v parameters in strange ways. - # - # Instead we want ot decay the weights in a manner that doesn't interact - # with the m/v parameters. This is equivalent to adding the square - # of the weights to the loss with plain (non-momentum) SGD. - if self._do_use_weight_decay(param_name): - update += self.weight_decay_rate * param - - update_with_lr = self.learning_rate * update - - next_param = param - update_with_lr - - assignments.extend( - [param.assign(next_param), - m.assign(next_m), - v.assign(next_v)]) - - return tf.group(*assignments, name=name) - - def _do_use_weight_decay(self, param_name): - """Whether to use L2 weight decay for `param_name`.""" - if not self.weight_decay_rate: - return False - for r in self.include_in_weight_decay: - if re.search(r, param_name) is not None: - return True - - if self.exclude_from_weight_decay: - for r in self.exclude_from_weight_decay: - if re.search(r, param_name) is not None: - tf.logging.info('Adam WD excludes {}'.format(param_name)) - return False - return True - - def _get_variable_name(self, param_name): - """Get the variable name from the tensor name.""" - m = re.match("^(.*):\\d+$", param_name) - if m is not None: - param_name = m.group(1) - return param_name - - -if __name__ == "__main__": - flags.DEFINE_string("clean_input_ckpt", "", "input ckpt for cleaning") - flags.DEFINE_string("clean_output_model_dir", "", "output dir for cleaned ckpt") - - FLAGS = flags.FLAGS - - tf.app.run(clean_ckpt) diff --git a/build/lib/caireCovid/mrqa/modeling.py b/build/lib/caireCovid/mrqa/modeling.py deleted file mode 100644 index a9385e4..0000000 --- a/build/lib/caireCovid/mrqa/modeling.py +++ /dev/null @@ -1,783 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - - -def gelu(x): - """Gaussian Error Linear Unit. - - This is a smoother version of the RELU. - Original paper: https://arxiv.org/abs/1606.08415 - Args: - x: float Tensor to perform activation. - - Returns: - `x` with the GELU activation applied. - """ - cdf = 0.5 * (1.0 + tf.tanh( - (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) - return x * cdf - - -def embedding_lookup(x, n_token, d_embed, initializer, use_tpu=True, - scope='embedding', reuse=None, dtype=tf.float32): - """TPU and GPU embedding_lookup function.""" - with tf.variable_scope(scope, reuse=reuse): - lookup_table = tf.get_variable('lookup_table', [n_token, d_embed], - dtype=dtype, initializer=initializer) - if use_tpu: - one_hot_idx = tf.one_hot(x, n_token, dtype=dtype) - if one_hot_idx.shape.ndims == 2: - return tf.einsum('in,nd->id', one_hot_idx, lookup_table), lookup_table - else: - return tf.einsum('ibn,nd->ibd', one_hot_idx, lookup_table), lookup_table - else: - return tf.nn.embedding_lookup(lookup_table, x), lookup_table - - -def positional_embedding(pos_seq, inv_freq, bsz=None): - sinusoid_inp = tf.einsum('i,d->id', pos_seq, inv_freq) - pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], -1) - pos_emb = pos_emb[:, None, :] - - if bsz is not None: - pos_emb = tf.tile(pos_emb, [1, bsz, 1]) - - return pos_emb - - -def positionwise_ffn(inp, d_model, d_inner, dropout, kernel_initializer, - activation_type='relu', scope='ff', is_training=True, - reuse=None): - """Position-wise Feed-forward Network.""" - if activation_type == 'relu': - activation = tf.nn.relu - elif activation_type == 'gelu': - activation = gelu - else: - raise ValueError('Unsupported activation type {}'.format(activation_type)) - - output = inp - with tf.variable_scope(scope, reuse=reuse): - output = tf.layers.dense(output, d_inner, activation=activation, - kernel_initializer=kernel_initializer, - name='layer_1') - output = tf.layers.dropout(output, dropout, training=is_training, - name='drop_1') - output = tf.layers.dense(output, d_model, - kernel_initializer=kernel_initializer, - name='layer_2') - output = tf.layers.dropout(output, dropout, training=is_training, - name='drop_2') - output = tf.contrib.layers.layer_norm(output + inp, begin_norm_axis=-1, - scope='LayerNorm') - return output - - -def head_projection(h, d_model, n_head, d_head, kernel_initializer, name): - """Project hidden states to a specific head with a 4D-shape.""" - proj_weight = tf.get_variable('{}/kernel'.format(name), - [d_model, n_head, d_head], dtype=h.dtype, - initializer=kernel_initializer) - head = tf.einsum('ibh,hnd->ibnd', h, proj_weight) - - return head - - -def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training, - kernel_initializer, residual=True): - """Post-attention processing.""" - # post-attention projection (back to `d_model`) - proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head], - dtype=h.dtype, initializer=kernel_initializer) - attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, proj_o) - - attn_out = tf.layers.dropout(attn_out, dropout, training=is_training) - if residual: - output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1, - scope='LayerNorm') - else: - output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1, - scope='LayerNorm') - - return output - - -def abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, is_training, - scale): - """Core absolute positional attention operations.""" - - attn_score = tf.einsum('ibnd,jbnd->ijbn', q_head, k_head) - attn_score *= scale - if attn_mask is not None: - attn_score = attn_score - 1e30 * attn_mask - - # attention probability - attn_prob = tf.nn.softmax(attn_score, 1) - attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training) - - # attention output - attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head) - - return attn_vec - - -def rel_attn_core(q_head, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, - r_w_bias, r_r_bias, r_s_bias, attn_mask, dropatt, is_training, - scale): - """Core relative positional attention operations.""" - - # content based attention score - ac = tf.einsum('ibnd,jbnd->ijbn', q_head + r_w_bias, k_head_h) - - # position based attention score - bd = tf.einsum('ibnd,jbnd->ijbn', q_head + r_r_bias, k_head_r) - bd = rel_shift(bd, klen=tf.shape(ac)[1]) - - # segment based attention score - if seg_mat is None: - ef = 0 - else: - ef = tf.einsum('ibnd,snd->ibns', q_head + r_s_bias, seg_embed) - ef = tf.einsum('ijbs,ibns->ijbn', seg_mat, ef) - - # merge attention scores and perform masking - attn_score = (ac + bd + ef) * scale - if attn_mask is not None: - # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask - attn_score = attn_score - 1e30 * attn_mask - - # attention probability - attn_prob = tf.nn.softmax(attn_score, 1) - attn_prob = tf.layers.dropout(attn_prob, dropatt, training=is_training) - - # attention output - attn_vec = tf.einsum('ijbn,jbnd->ibnd', attn_prob, v_head_h) - - return attn_vec - - -def rel_shift(x, klen=-1): - """perform relative shift to form the relative attention score.""" - x_size = tf.shape(x) - - x = tf.reshape(x, [x_size[1], x_size[0], x_size[2], x_size[3]]) - x = tf.slice(x, [1, 0, 0, 0], [-1, -1, -1, -1]) - x = tf.reshape(x, [x_size[0], x_size[1] - 1, x_size[2], x_size[3]]) - x = tf.slice(x, [0, 0, 0, 0], [-1, klen, -1, -1]) - - return x - - -def _create_mask(qlen, mlen, dtype=tf.float32, same_length=False): - """create causal attention mask.""" - attn_mask = tf.ones([qlen, qlen], dtype=dtype) - mask_u = tf.matrix_band_part(attn_mask, 0, -1) - mask_dia = tf.matrix_band_part(attn_mask, 0, 0) - attn_mask_pad = tf.zeros([qlen, mlen], dtype=dtype) - ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) - if same_length: - mask_l = tf.matrix_band_part(attn_mask, -1, 0) - ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) - - return ret - - -def _cache_mem(curr_out, prev_mem, mem_len, reuse_len=None): - """cache hidden states into memory.""" - if mem_len is None or mem_len == 0: - return None - else: - if reuse_len is not None and reuse_len > 0: - curr_out = curr_out[:reuse_len] - - if prev_mem is None: - new_mem = curr_out[-mem_len:] - else: - new_mem = tf.concat([prev_mem, curr_out], 0)[-mem_len:] - - return tf.stop_gradient(new_mem) - - -def relative_positional_encoding(qlen, klen, d_model, clamp_len, attn_type, - bi_data, bsz=None, dtype=None): - """create relative positional encoding.""" - freq_seq = tf.range(0, d_model, 2.0) - if dtype is not None and dtype != tf.float32: - freq_seq = tf.cast(freq_seq, dtype=dtype) - inv_freq = 1 / (10000 ** (freq_seq / d_model)) - - if attn_type == 'bi': - # beg, end = klen - 1, -qlen - beg, end = klen, -qlen - elif attn_type == 'uni': - # beg, end = klen - 1, -1 - beg, end = klen, -1 - else: - raise ValueError('Unknown `attn_type` {}.'.format(attn_type)) - - if bi_data: - fwd_pos_seq = tf.range(beg, end, -1.0) - bwd_pos_seq = tf.range(-beg, -end, 1.0) - - if dtype is not None and dtype != tf.float32: - fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) - bwd_pos_seq = tf.cast(bwd_pos_seq, dtype=dtype) - - if clamp_len > 0: - fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len) - bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -clamp_len, clamp_len) - - if bsz is not None: - # With bi_data, the batch size should be divisible by 2. - assert bsz%2 == 0 - fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz//2) - bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq, bsz//2) - else: - fwd_pos_emb = positional_embedding(fwd_pos_seq, inv_freq) - bwd_pos_emb = positional_embedding(bwd_pos_seq, inv_freq) - - pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1) - else: - fwd_pos_seq = tf.range(beg, end, -1.0) - if dtype is not None and dtype != tf.float32: - fwd_pos_seq = tf.cast(fwd_pos_seq, dtype=dtype) - if clamp_len > 0: - fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -clamp_len, clamp_len) - pos_emb = positional_embedding(fwd_pos_seq, inv_freq, bsz) - - return pos_emb - - -def multihead_attn(q, k, v, attn_mask, d_model, n_head, d_head, dropout, - dropatt, is_training, kernel_initializer, residual=True, - scope='abs_attn', reuse=None): - """Standard multi-head attention with absolute positional embedding.""" - - scale = 1 / (d_head ** 0.5) - with tf.variable_scope(scope, reuse=reuse): - # attention heads - q_head = head_projection( - q, d_model, n_head, d_head, kernel_initializer, 'q') - k_head = head_projection( - k, d_model, n_head, d_head, kernel_initializer, 'k') - v_head = head_projection( - v, d_model, n_head, d_head, kernel_initializer, 'v') - - # attention vector - attn_vec = abs_attn_core(q_head, k_head, v_head, attn_mask, dropatt, - is_training, scale) - - # post processing - output = post_attention(v, attn_vec, d_model, n_head, d_head, dropout, - is_training, kernel_initializer, residual) - - return output - - - -def rel_multihead_attn(h, r, r_w_bias, r_r_bias, seg_mat, r_s_bias, seg_embed, - attn_mask, mems, d_model, n_head, d_head, dropout, - dropatt, is_training, kernel_initializer, - scope='rel_attn', reuse=None): - """Multi-head attention with relative positional encoding.""" - - scale = 1 / (d_head ** 0.5) - with tf.variable_scope(scope, reuse=reuse): - if mems is not None and mems.shape.ndims > 1: - cat = tf.concat([mems, h], 0) - else: - cat = h - - # content heads - q_head_h = head_projection( - h, d_model, n_head, d_head, kernel_initializer, 'q') - k_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'k') - v_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'v') - - # positional heads - k_head_r = head_projection( - r, d_model, n_head, d_head, kernel_initializer, 'r') - - # core attention ops - attn_vec = rel_attn_core( - q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask, dropatt, is_training, scale) - - # post processing - output = post_attention(h, attn_vec, d_model, n_head, d_head, dropout, - is_training, kernel_initializer) - - return output - - -def two_stream_rel_attn(h, g, r, mems, r_w_bias, r_r_bias, seg_mat, r_s_bias, - seg_embed, attn_mask_h, attn_mask_g, target_mapping, - d_model, n_head, d_head, dropout, dropatt, is_training, - kernel_initializer, scope='rel_attn'): - """Two-stream attention with relative positional encoding.""" - - scale = 1 / (d_head ** 0.5) - with tf.variable_scope(scope, reuse=False): - - # content based attention score - if mems is not None and mems.shape.ndims > 1: - cat = tf.concat([mems, h], 0) - else: - cat = h - - # content-based key head - k_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'k') - - # content-based value head - v_head_h = head_projection( - cat, d_model, n_head, d_head, kernel_initializer, 'v') - - # position-based key head - k_head_r = head_projection( - r, d_model, n_head, d_head, kernel_initializer, 'r') - - ##### h-stream - # content-stream query head - q_head_h = head_projection( - h, d_model, n_head, d_head, kernel_initializer, 'q') - - # core attention ops - attn_vec_h = rel_attn_core( - q_head_h, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask_h, dropatt, is_training, scale) - - # post processing - output_h = post_attention(h, attn_vec_h, d_model, n_head, d_head, dropout, - is_training, kernel_initializer) - - with tf.variable_scope(scope, reuse=True): - ##### g-stream - # query-stream query head - q_head_g = head_projection( - g, d_model, n_head, d_head, kernel_initializer, 'q') - - # core attention ops - if target_mapping is not None: - q_head_g = tf.einsum('mbnd,mlb->lbnd', q_head_g, target_mapping) - attn_vec_g = rel_attn_core( - q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale) - attn_vec_g = tf.einsum('lbnd,mlb->mbnd', attn_vec_g, target_mapping) - else: - attn_vec_g = rel_attn_core( - q_head_g, k_head_h, v_head_h, k_head_r, seg_embed, seg_mat, r_w_bias, - r_r_bias, r_s_bias, attn_mask_g, dropatt, is_training, scale) - - # post processing - output_g = post_attention(g, attn_vec_g, d_model, n_head, d_head, dropout, - is_training, kernel_initializer) - - return output_h, output_g - - -def transformer_xl(inp_k, n_token, n_layer, d_model, n_head, - d_head, d_inner, dropout, dropatt, attn_type, - bi_data, initializer, is_training, mem_len=None, - inp_q=None, mems=None, - same_length=False, clamp_len=-1, untie_r=False, - use_tpu=True, input_mask=None, - perm_mask=None, seg_id=None, reuse_len=None, - ff_activation='relu', target_mapping=None, - use_bfloat16=False, scope='transformer', **kwargs): - """ - Defines a Transformer-XL computation graph with additional - support for XLNet. - - Args: - - inp_k: int32 Tensor in shape [len, bsz], the input token IDs. - seg_id: int32 Tensor in shape [len, bsz], the input segment IDs. - input_mask: float32 Tensor in shape [len, bsz], the input mask. - 0 for real tokens and 1 for padding. - mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory - from previous batches. The length of the list equals n_layer. - If None, no memory is used. - perm_mask: float32 Tensor in shape [len, len, bsz]. - If perm_mask[i, j, k] = 0, i attend to j in batch k; - if perm_mask[i, j, k] = 1, i does not attend to j in batch k. - If None, each position attends to all the others. - target_mapping: float32 Tensor in shape [num_predict, len, bsz]. - If target_mapping[i, j, k] = 1, the i-th predict in batch k is - on the j-th token. - Only used during pretraining for partial prediction. - Set to None during finetuning. - inp_q: float32 Tensor in shape [len, bsz]. - 1 for tokens with losses and 0 for tokens without losses. - Only used during pretraining for two-stream attention. - Set to None during finetuning. - - n_layer: int, the number of layers. - d_model: int, the hidden size. - n_head: int, the number of attention heads. - d_head: int, the dimension size of each attention head. - d_inner: int, the hidden size in feed-forward layers. - ff_activation: str, "relu" or "gelu". - untie_r: bool, whether to untie the biases in attention. - n_token: int, the vocab size. - - is_training: bool, whether in training mode. - use_tpu: bool, whether TPUs are used. - use_bfloat16: bool, use bfloat16 instead of float32. - dropout: float, dropout rate. - dropatt: float, dropout rate on attention probabilities. - init: str, the initialization scheme, either "normal" or "uniform". - init_range: float, initialize the parameters with a uniform distribution - in [-init_range, init_range]. Only effective when init="uniform". - init_std: float, initialize the parameters with a normal distribution - with mean 0 and stddev init_std. Only effective when init="normal". - mem_len: int, the number of tokens to cache. - reuse_len: int, the number of tokens in the currect batch to be cached - and reused in the future. - bi_data: bool, whether to use bidirectional input pipeline. - Usually set to True during pretraining and False during finetuning. - clamp_len: int, clamp all relative distances larger than clamp_len. - -1 means no clamping. - same_length: bool, whether to use the same attention length for each token. - summary_type: str, "last", "first", "mean", or "attn". The method - to pool the input to get a vector representation. - initializer: A tf initializer. - scope: scope name for the computation graph. - """ - tf.logging.info('memory input {}'.format(mems)) - tf_float = tf.bfloat16 if use_bfloat16 else tf.float32 - tf.logging.info('Use float type {}'.format(tf_float)) - - new_mems = [] - with tf.variable_scope(scope): - if untie_r: - r_w_bias = tf.get_variable('r_w_bias', [n_layer, n_head, d_head], - dtype=tf_float, initializer=initializer) - r_r_bias = tf.get_variable('r_r_bias', [n_layer, n_head, d_head], - dtype=tf_float, initializer=initializer) - else: - r_w_bias = tf.get_variable('r_w_bias', [n_head, d_head], - dtype=tf_float, initializer=initializer) - r_r_bias = tf.get_variable('r_r_bias', [n_head, d_head], - dtype=tf_float, initializer=initializer) - - bsz = tf.shape(inp_k)[1] - qlen = tf.shape(inp_k)[0] - mlen = tf.shape(mems[0])[0] if mems is not None else 0 - klen = mlen + qlen - - ##### Attention mask - # causal attention mask - if attn_type == 'uni': - attn_mask = _create_mask(qlen, mlen, tf_float, same_length) - attn_mask = attn_mask[:, :, None, None] - elif attn_type == 'bi': - attn_mask = None - else: - raise ValueError('Unsupported attention type: {}'.format(attn_type)) - - # data mask: input mask & perm mask - if input_mask is not None and perm_mask is not None: - data_mask = input_mask[None] + perm_mask - elif input_mask is not None and perm_mask is None: - data_mask = input_mask[None] - elif input_mask is None and perm_mask is not None: - data_mask = perm_mask - else: - data_mask = None - - if data_mask is not None: - # all mems can be attended to - mems_mask = tf.zeros([tf.shape(data_mask)[0], mlen, bsz], - dtype=tf_float) - data_mask = tf.concat([mems_mask, data_mask], 1) - if attn_mask is None: - attn_mask = data_mask[:, :, :, None] - else: - attn_mask += data_mask[:, :, :, None] - - if attn_mask is not None: - attn_mask = tf.cast(attn_mask > 0, dtype=tf_float) # change attn_mask into float type - - if attn_mask is not None: - non_tgt_mask = -tf.eye(qlen, dtype=tf_float) - non_tgt_mask = tf.concat([tf.zeros([qlen, mlen], dtype=tf_float), - non_tgt_mask], axis=-1) - non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, - dtype=tf_float) - else: - non_tgt_mask = None - - ##### Word embedding - word_emb_k, lookup_table = embedding_lookup( - x=inp_k, - n_token=n_token, - d_embed=d_model, - initializer=initializer, - use_tpu=use_tpu, - dtype=tf_float, - scope='word_embedding') - - if inp_q is not None: - with tf.variable_scope('mask_emb'): - mask_emb = tf.get_variable('mask_emb', [1, 1, d_model], dtype=tf_float) - if target_mapping is not None: - word_emb_q = tf.tile(mask_emb, [tf.shape(target_mapping)[0], bsz, 1]) - else: - inp_q_ext = inp_q[:, :, None] - word_emb_q = inp_q_ext * mask_emb + (1 - inp_q_ext) * word_emb_k - output_h = tf.layers.dropout(word_emb_k, dropout, training=is_training) - if inp_q is not None: - output_g = tf.layers.dropout(word_emb_q, dropout, training=is_training) - - ##### Segment embedding - if seg_id is not None: - if untie_r: - r_s_bias = tf.get_variable('r_s_bias', [n_layer, n_head, d_head], - dtype=tf_float, initializer=initializer) - else: - # default case (tie) - r_s_bias = tf.get_variable('r_s_bias', [n_head, d_head], - dtype=tf_float, initializer=initializer) - - seg_embed = tf.get_variable('seg_embed', [n_layer, 2, n_head, d_head], - dtype=tf_float, initializer=initializer) - - # Convert `seg_id` to one-hot `seg_mat` - mem_pad = tf.zeros([mlen, bsz], dtype=tf.int32) - cat_ids = tf.concat([mem_pad, seg_id], 0) - - # `1` indicates not in the same segment [qlen x klen x bsz] - seg_mat = tf.cast( - tf.logical_not(tf.equal(seg_id[:, None], cat_ids[None, :])), - tf.int32) - seg_mat = tf.one_hot(seg_mat, 2, dtype=tf_float) - else: - seg_mat = None - - ##### Positional encoding - pos_emb = relative_positional_encoding( - qlen, klen, d_model, clamp_len, attn_type, bi_data, - bsz=bsz, dtype=tf_float) - pos_emb = tf.layers.dropout(pos_emb, dropout, training=is_training) - - ##### Attention layers - if mems is None: - mems = [None] * n_layer - - for i in range(n_layer): - # cache new mems - new_mems.append(_cache_mem(output_h, mems[i], mem_len, reuse_len)) - - # segment bias - if seg_id is None: - r_s_bias_i = None - seg_embed_i = None - else: - r_s_bias_i = r_s_bias if not untie_r else r_s_bias[i] - seg_embed_i = seg_embed[i] - - with tf.variable_scope('layer_{}'.format(i)): - if inp_q is not None: - output_h, output_g = two_stream_rel_attn( - h=output_h, - g=output_g, - r=pos_emb, - r_w_bias=r_w_bias if not untie_r else r_w_bias[i], - r_r_bias=r_r_bias if not untie_r else r_r_bias[i], - seg_mat=seg_mat, - r_s_bias=r_s_bias_i, - seg_embed=seg_embed_i, - attn_mask_h=non_tgt_mask, - attn_mask_g=attn_mask, - mems=mems[i], - target_mapping=target_mapping, - d_model=d_model, - n_head=n_head, - d_head=d_head, - dropout=dropout, - dropatt=dropatt, - is_training=is_training, - kernel_initializer=initializer) - reuse = True - else: - reuse = False - - output_h = rel_multihead_attn( - h=output_h, - r=pos_emb, - r_w_bias=r_w_bias if not untie_r else r_w_bias[i], - r_r_bias=r_r_bias if not untie_r else r_r_bias[i], - seg_mat=seg_mat, - r_s_bias=r_s_bias_i, - seg_embed=seg_embed_i, - attn_mask=non_tgt_mask, - mems=mems[i], - d_model=d_model, - n_head=n_head, - d_head=d_head, - dropout=dropout, - dropatt=dropatt, - is_training=is_training, - kernel_initializer=initializer, - reuse=reuse) - - if inp_q is not None: - output_g = positionwise_ffn( - inp=output_g, - d_model=d_model, - d_inner=d_inner, - dropout=dropout, - kernel_initializer=initializer, - activation_type=ff_activation, - is_training=is_training) - - output_h = positionwise_ffn( - inp=output_h, - d_model=d_model, - d_inner=d_inner, - dropout=dropout, - kernel_initializer=initializer, - activation_type=ff_activation, - is_training=is_training, - reuse=reuse) - - if inp_q is not None: - output = tf.layers.dropout(output_g, dropout, training=is_training) - else: - output = tf.layers.dropout(output_h, dropout, training=is_training) - - return output, new_mems, lookup_table - - -def lm_loss(hidden, target, n_token, d_model, initializer, lookup_table=None, - tie_weight=False, bi_data=True, use_tpu=False): - """doc.""" - - with tf.variable_scope('lm_loss'): - if tie_weight: - assert lookup_table is not None, \ - 'lookup_table cannot be None for tie_weight' - softmax_w = lookup_table - else: - softmax_w = tf.get_variable('weight', [n_token, d_model], - dtype=hidden.dtype, initializer=initializer) - - softmax_b = tf.get_variable('bias', [n_token], dtype=hidden.dtype, - initializer=tf.zeros_initializer()) - - logits = tf.einsum('ibd,nd->ibn', hidden, softmax_w) + softmax_b - - if use_tpu: - one_hot_target = tf.one_hot(target, n_token, dtype=logits.dtype) - loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) - else: - loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, - logits=logits) - - return loss - - -def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout, - dropatt, input_mask, is_training, initializer, - scope=None, reuse=None, use_proj=True): - - """ - Different classification tasks may not may not share the same parameters - to summarize the sequence features. - - If shared, one can keep the `scope` to the default value `None`. - Otherwise, one should specify a different `scope` for each task. - """ - - with tf.variable_scope(scope, 'sequnece_summary', reuse=reuse): - if summary_type == 'last': - summary = hidden[-1] - elif summary_type == 'first': - summary = hidden[0] - elif summary_type == 'mean': - summary = tf.reduce_mean(hidden, axis=0) - elif summary_type == 'attn': - bsz = tf.shape(hidden)[1] - - summary_bias = tf.get_variable('summary_bias', [d_model], - dtype=hidden.dtype, - initializer=initializer) - summary_bias = tf.tile(summary_bias[None, None], [1, bsz, 1]) - - if input_mask is not None: - input_mask = input_mask[None, :, :, None] - - summary = multihead_attn(summary_bias, hidden, hidden, input_mask, - d_model, n_head, d_head, dropout, dropatt, - is_training, initializer, residual=False) - summary = summary[0] - else: - raise ValueError('Unsupported summary type {}'.format(summary_type)) - - # use another projection as in BERT - if use_proj: - summary = tf.layers.dense( - summary, - d_model, - activation=tf.tanh, - kernel_initializer=initializer, - name='summary') - - # dropout - summary = tf.layers.dropout( - summary, dropout, training=is_training, - name='dropout') - - return summary - - -def classification_loss(hidden, labels, n_class, initializer, scope, reuse=None, - return_logits=False): - """ - Different classification tasks should use different scope names to ensure - different dense layers (parameters) are used to produce the logits. - - An exception will be in transfer learning, where one hopes to transfer - the classification weights. - """ - - with tf.variable_scope(scope, reuse=reuse): - logits = tf.layers.dense( - hidden, - n_class, - kernel_initializer=initializer, - name='logit') - - one_hot_target = tf.one_hot(labels, n_class, dtype=hidden.dtype) - loss = -tf.reduce_sum(tf.nn.log_softmax(logits) * one_hot_target, -1) - - if return_logits: - return loss, logits - - return loss - - -def regression_loss(hidden, labels, initializer, scope, reuse=None, - return_logits=False): - with tf.variable_scope(scope, reuse=reuse): - logits = tf.layers.dense( - hidden, - 1, - kernel_initializer=initializer, - name='logit') - - logits = tf.squeeze(logits, axis=-1) - loss = tf.square(logits - labels) - - if return_logits: - return loss, logits - - return loss - diff --git a/build/lib/caireCovid/mrqa/multiqa_utils.py b/build/lib/caireCovid/mrqa/multiqa_utils.py deleted file mode 100644 index 09b8f93..0000000 --- a/build/lib/caireCovid/mrqa/multiqa_utils.py +++ /dev/null @@ -1,111 +0,0 @@ -import argparse -import collections -import json -import numpy as np -import os -import re -import string -import sys - -OPTS = None - -def make_qid_to_has_ans(dataset): - qid_to_has_ans = {} - for entry in dataset: - qid_to_has_ans[entry['qid']] = bool(entry['answers']) - return qid_to_has_ans - -def normalize_answer(s): - """Lower text and remove punctuation, articles and extra whitespace.""" - def remove_articles(text): - regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) - return re.sub(regex, ' ', text) - def white_space_fix(text): - return ' '.join(text.split()) - def remove_punc(text): - exclude = set(string.punctuation) - return ''.join(ch for ch in text if ch not in exclude) - def lower(text): - return text.lower() - return white_space_fix(remove_articles(remove_punc(lower(s)))) - -def get_tokens(s): - if not s: return [] - return normalize_answer(s).split() - -def compute_exact(a_gold, a_pred): - return int(normalize_answer(a_gold) == normalize_answer(a_pred)) - -def compute_f1(a_gold, a_pred): - gold_toks = get_tokens(a_gold) - pred_toks = get_tokens(a_pred) - common = collections.Counter(gold_toks) & collections.Counter(pred_toks) - num_same = sum(common.values()) - if len(gold_toks) == 0 or len(pred_toks) == 0: - # If either is no-answer, then F1 is 1 if they agree, 0 otherwise - return int(gold_toks == pred_toks) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(pred_toks) - recall = 1.0 * num_same / len(gold_toks) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - -def get_raw_scores(dataset, preds): - exact_scores = {} - f1_scores = {} - - for qa in dataset: - qid = qa['qid'] - gold_answers = [a['text'] for a in qa['detected_answers'] - if normalize_answer(a['text'])] - if not gold_answers: - # For unanswerable questions, only correct answer is empty string - gold_answers = [''] - if qid not in preds: - print('Missing prediction for %s' % qid) - continue - a_pred = preds[qid] - # Take max over all gold answers - exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers) - f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers) - return exact_scores, f1_scores - -def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): - num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) - cur_score = num_no_ans - best_score = cur_score - best_thresh = 0.0 - qid_list = sorted(na_probs, key=lambda k: na_probs[k]) - for i, qid in enumerate(qid_list): - if qid not in scores: continue - if qid_to_has_ans[qid]: - diff = scores[qid] - else: - if preds[qid]: - diff = -1 - else: - diff = 0 - cur_score += diff - if cur_score > best_score: - best_score = cur_score - best_thresh = na_probs[qid] - - has_ans_score, has_ans_cnt = 0, 0 - for qid in qid_list: - if not qid_to_has_ans[qid]: continue - has_ans_cnt += 1 - - if qid not in scores: continue - has_ans_score += scores[qid] - return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt - -def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): - best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) - best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) - main_eval['best_exact'] = best_exact - main_eval['best_exact_thresh'] = exact_thresh - main_eval['best_f1'] = best_f1 - main_eval['best_f1_thresh'] = f1_thresh - main_eval['has_ans_exact'] = has_ans_exact - main_eval['has_ans_f1'] = has_ans_f1 \ No newline at end of file diff --git a/build/lib/caireCovid/mrqa/predictor_kaggle.py b/build/lib/caireCovid/mrqa/predictor_kaggle.py deleted file mode 100644 index 61dba8d..0000000 --- a/build/lib/caireCovid/mrqa/predictor_kaggle.py +++ /dev/null @@ -1,920 +0,0 @@ -# coding=utf-8 -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from absl import flags -import absl.logging as _logging # pylint: disable=unused-import - -import collections -import os -import time -import math -import json -import six -import random -import gc -import sys -import pprint - -import numpy as np - -if six.PY2: - import cPickle as pickle -else: - import pickle -import jsonlines - -import tensorflow as tf -import sentencepiece as spm -from .prepro_utils import preprocess_text, encode_ids, encode_pieces, printable_text -from .data_utils import SEP_ID, CLS_ID, VOCAB_SIZE - -SPIECE_UNDERLINE = u'▁' - -SEG_ID_P = 0 -SEG_ID_Q = 1 -SEG_ID_CLS = 2 -SEG_ID_PAD = 3 - - -class MultiqaExample(object): - """A single training/test example for simple sequence classification. - - For examples without an answer, the start and end position are -1. - """ - - def __init__(self, - qas_id, - question_text, - paragraph_text, - orig_answer_text=None, - start_position=None, - send_position=None, - is_impossible=False): - self.qas_id = qas_id - self.question_text = question_text - self.paragraph_text = paragraph_text - self.orig_answer_text = orig_answer_text - self.start_position = start_position - self.send_position = send_position - self.is_impossible = is_impossible - - def __str__(self): - return self.__repr__() - - def __repr__(self): - s = "" - s += "qas_id: %s" % (printable_text(self.qas_id)) - s += ", question_text: %s" % ( - printable_text(self.question_text)) - s += ", paragraph_text: [%s]" % (" ".join(self.paragraph_text)) - if self.start_position: - s += ", start_position: %d" % (self.start_position) - if self.start_position: - s += ", is_impossible: %r" % (self.is_impossible) - if self.start_position: - s += ", send_position: %d" % (self.send_position) - return s - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, - unique_id, - example_index, - doc_span_index, - tok_start_to_orig_index, - tok_end_to_orig_index, - token_is_max_context, - input_ids, - input_mask, - p_mask, - segment_ids, - paragraph_len, - cls_index, - start_position=None, - end_position=None, - is_impossible=None): - self.unique_id = unique_id - self.example_index = example_index - self.doc_span_index = doc_span_index - self.tok_start_to_orig_index = tok_start_to_orig_index # paragraph - self.tok_end_to_orig_index = tok_end_to_orig_index # paragraph - self.token_is_max_context = token_is_max_context - self.input_ids = input_ids # context+question - self.input_mask = input_mask - self.p_mask = p_mask - self.segment_ids = segment_ids - self.paragraph_len = paragraph_len - self.cls_index = cls_index - self.start_position = start_position - self.end_position = end_position - self.is_impossible = is_impossible - -def read_kaggle_data(input_file, is_training): - """Read a QA data jsonl file into a list of Examples.""" - with tf.gfile.Open(input_file, "r") as reader: - data = json.load(reader) - - input_data = data['data'] - examples = [] - for item in input_data: - paragraphs = item['paragraphs'] - for entry in paragraphs: - assert type(entry) == dict - assert u'context' in entry - assert u'qas' in entry - - paragraph_text = entry["context"] - - for qa in entry["qas"]: - assert u'id' in qa - assert u'question' in qa - - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - send_position = None - orig_answer_text = None - is_impossible = False - - example = MultiqaExample( - qas_id=qas_id, - question_text=question_text, - paragraph_text=paragraph_text, - orig_answer_text=orig_answer_text, - start_position=start_position, - send_position=send_position, - is_impossible=is_impossible) - examples.append(example) - return examples - -def arrange_kaggle_data(input_data, is_training): - """Read a QA data jsonl file into a list of Examples.""" - examples = [] - for entry in input_data: - - assert type(entry) == dict - assert u'context' in entry - assert u'qas' in entry - - paragraph_text = entry["context"] - - for qa in entry["qas"]: - assert u'id' in qa - assert u'question' in qa - - qas_id = qa["id"] - question_text = qa["question"] - start_position = None - send_position = None - orig_answer_text = None - is_impossible = False - - example = MultiqaExample( - qas_id=qas_id, - question_text=question_text, - paragraph_text=paragraph_text, - orig_answer_text=orig_answer_text, - start_position=start_position, - send_position=send_position, - is_impossible=is_impossible) - examples.append(example) - return examples - -def _convert_index(index, pos, M=None, is_start=True): - if index[pos] is not None: - return index[pos] - N = len(index) - rear = pos - while rear < N - 1 and index[rear] is None: - rear += 1 - front = pos - while front > 0 and index[front] is None: - front -= 1 - assert index[front] is not None or index[rear] is not None - if index[front] is None: - if index[rear] >= 1: - if is_start: - return 0 - else: - return index[rear] - 1 - return index[rear] - if index[rear] is None: - if M is not None and index[front] < M - 1: - if is_start: - return index[front] + 1 - else: - return M - 1 - return index[front] - if is_start: - if index[rear] > index[front] + 1: - return index[front] + 1 - else: - return index[rear] - else: - if index[rear] > index[front] + 1: - return index[rear] - 1 - else: - return index[front] - -def convert_examples_to_features(examples, sp_model, max_seq_length, - doc_stride, max_query_length, is_training, - output_fn, FLAGS): - """Loads a data file into a list of `InputBatch`s.""" - - cnt_pos, cnt_neg = 0, 0 - unique_id = 1000000000 - max_N, max_M = 1024, 1024 - f = np.zeros((max_N, max_M), dtype=np.float32) - - for (example_index, example) in enumerate(examples): - - if example_index % 100 == 0: - tf.logging.info('Converting {}/{} pos {} neg {}'.format( - example_index, len(examples), cnt_pos, cnt_neg)) - - query_tokens = encode_ids( - sp_model, - preprocess_text(example.question_text, lower=FLAGS.uncased)) - - if len(query_tokens) > max_query_length: - query_tokens = query_tokens[0:max_query_length] - - paragraph_text = example.paragraph_text - para_tokens = encode_pieces( - sp_model, - preprocess_text(example.paragraph_text, lower=FLAGS.uncased)) - - chartok_to_tok_index = [] - tok_start_to_chartok_index = [] - tok_end_to_chartok_index = [] - char_cnt = 0 - for i, token in enumerate(para_tokens): - chartok_to_tok_index.extend([i] * len(token)) - tok_start_to_chartok_index.append(char_cnt) - char_cnt += len(token) - tok_end_to_chartok_index.append(char_cnt - 1) - - tok_cat_text = ''.join(para_tokens).replace(SPIECE_UNDERLINE, ' ') - N, M = len(paragraph_text), len(tok_cat_text) - - if N > max_N or M > max_M: - max_N = max(N, max_N) - max_M = max(M, max_M) - f = np.zeros((max_N, max_M), dtype=np.float32) - gc.collect() - - g = {} - - def _lcs_match(max_dist): - f.fill(0) - g.clear() - - ### longest common sub sequence - # f[i, j] = max(f[i - 1, j], f[i, j - 1], f[i - 1, j - 1] + match(i, j)) - for i in range(N): - - # note(zhiliny): - # unlike standard LCS, this is specifically optimized for the setting - # because the mismatch between sentence pieces and original text will - # be small - for j in range(i - max_dist, i + max_dist): - if j >= M or j < 0: continue - - if i > 0: - g[(i, j)] = 0 - f[i, j] = f[i - 1, j] - - if j > 0 and f[i, j - 1] > f[i, j]: - g[(i, j)] = 1 - f[i, j] = f[i, j - 1] - - f_prev = f[i - 1, j - 1] if i > 0 and j > 0 else 0 - if (preprocess_text(paragraph_text[i], lower=FLAGS.uncased, - remove_space=False) - == tok_cat_text[j] - and f_prev + 1 > f[i, j]): - g[(i, j)] = 2 - f[i, j] = f_prev + 1 - - max_dist = abs(N - M) + 5 - for _ in range(2): - _lcs_match(max_dist) - if f[N - 1, M - 1] > 0.8 * N: break - max_dist *= 2 - - orig_to_chartok_index = [None] * N - chartok_to_orig_index = [None] * M - i, j = N - 1, M - 1 - while i >= 0 and j >= 0: - if (i, j) not in g: break - if g[(i, j)] == 2: - orig_to_chartok_index[i] = j - chartok_to_orig_index[j] = i - i, j = i - 1, j - 1 - elif g[(i, j)] == 1: - j = j - 1 - else: - i = i - 1 - - if all(v is None for v in orig_to_chartok_index) or f[N - 1, M - 1] < 0.8 * N: - print('MISMATCH DETECTED!') - continue - - tok_start_to_orig_index = [] - tok_end_to_orig_index = [] - for i in range(len(para_tokens)): - start_chartok_pos = tok_start_to_chartok_index[i] - end_chartok_pos = tok_end_to_chartok_index[i] - start_orig_pos = _convert_index(chartok_to_orig_index, start_chartok_pos, - N, is_start=True) - end_orig_pos = _convert_index(chartok_to_orig_index, end_chartok_pos, - N, is_start=False) - - tok_start_to_orig_index.append(start_orig_pos) - tok_end_to_orig_index.append(end_orig_pos) - - if not is_training: - tok_start_position = tok_end_position = None - - if is_training and example.is_impossible: - tok_start_position = -1 - tok_end_position = -1 - - if is_training and not example.is_impossible: - start_position = example.start_position - # end_position = start_position + len(example.orig_answer_text) - 1 - end_position = example.send_position - - start_chartok_pos = _convert_index(orig_to_chartok_index, start_position, - is_start=True) - tok_start_position = chartok_to_tok_index[start_chartok_pos] - - end_chartok_pos = _convert_index(orig_to_chartok_index, end_position, - is_start=False) - tok_end_position = chartok_to_tok_index[end_chartok_pos] - assert tok_start_position <= tok_end_position - - def _piece_to_id(x): - if six.PY2 and isinstance(x, unicode): - x = x.encode('utf-8') - return sp_model.PieceToId(x) - - all_doc_tokens = list(map(_piece_to_id, para_tokens)) - - # The -3 accounts for [CLS], [SEP] and [SEP] - max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 - - # We can have documents that are longer than the maximum sequence length. - # To deal with this we do a sliding window approach, where we take chunks - # of the up to our max length with a stride of `doc_stride`. - _DocSpan = collections.namedtuple( # pylint: disable=invalid-name - "DocSpan", ["start", "length"]) - doc_spans = [] - start_offset = 0 - while start_offset < len(all_doc_tokens): - length = len(all_doc_tokens) - start_offset - if length > max_tokens_for_doc: - length = max_tokens_for_doc - doc_spans.append(_DocSpan(start=start_offset, length=length)) - if start_offset + length == len(all_doc_tokens): - break - start_offset += min(length, doc_stride) - - for (doc_span_index, doc_span) in enumerate(doc_spans): - tokens = [] - token_is_max_context = {} - segment_ids = [] - p_mask = [] - - cur_tok_start_to_orig_index = [] - cur_tok_end_to_orig_index = [] - - for i in range(doc_span.length): - split_token_index = doc_span.start + i - - cur_tok_start_to_orig_index.append( - tok_start_to_orig_index[split_token_index]) - cur_tok_end_to_orig_index.append( - tok_end_to_orig_index[split_token_index]) - - is_max_context = _check_is_max_context(doc_spans, doc_span_index, - split_token_index) - token_is_max_context[len(tokens)] = is_max_context - tokens.append(all_doc_tokens[split_token_index]) - segment_ids.append(SEG_ID_P) - p_mask.append(0) - - paragraph_len = len(tokens) - - tokens.append(SEP_ID) - segment_ids.append(SEG_ID_P) - p_mask.append(1) - - # note(zhiliny): we put P before Q - # because during pretraining, B is always shorter than A - for token in query_tokens: - tokens.append(token) - segment_ids.append(SEG_ID_Q) - p_mask.append(1) - tokens.append(SEP_ID) - segment_ids.append(SEG_ID_Q) - p_mask.append(1) - - cls_index = len(segment_ids) - tokens.append(CLS_ID) - segment_ids.append(SEG_ID_CLS) - p_mask.append(0) - - input_ids = tokens - - # The mask has 0 for real tokens and 1 for padding tokens. Only real - # tokens are attended to. - input_mask = [0] * len(input_ids) - - # Zero-pad up to the sequence length. - while len(input_ids) < max_seq_length: - input_ids.append(0) - input_mask.append(1) - segment_ids.append(SEG_ID_PAD) - p_mask.append(1) - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - assert len(p_mask) == max_seq_length - - span_is_impossible = example.is_impossible - start_position = None - end_position = None - if is_training and not span_is_impossible: - # For training, if our document chunk does not contain an annotation - # we throw it out, since there is nothing to predict. - doc_start = doc_span.start - doc_end = doc_span.start + doc_span.length - 1 - out_of_span = False - if not (tok_start_position >= doc_start and - tok_end_position <= doc_end): - # print("out of span") - # print("{}|{}|{}|{}".format(doc_start,tok_start_position,tok_end_position,doc_end)) - out_of_span = True - if out_of_span: - # continue - start_position = 0 - end_position = 0 - span_is_impossible = True - else: - # note(zhiliny): we put P before Q, so doc_offset should be zero. - # doc_offset = len(query_tokens) + 2 - doc_offset = 0 - start_position = tok_start_position - doc_start + doc_offset - end_position = tok_end_position - doc_start + doc_offset - - if is_training and span_is_impossible: - start_position = cls_index - end_position = cls_index - - # note(zhiliny): With multi processing, - # the example_index is actually the index within the current process - # therefore we use example_index=None to avoid being used in the future. - # The current code does not use example_index of training data. - if is_training: - feat_example_index = None - else: - feat_example_index = example_index - - feature = InputFeatures( - unique_id=unique_id, - example_index=feat_example_index, - doc_span_index=doc_span_index, - tok_start_to_orig_index=cur_tok_start_to_orig_index, - tok_end_to_orig_index=cur_tok_end_to_orig_index, - token_is_max_context=token_is_max_context, - input_ids=input_ids, - input_mask=input_mask, - p_mask=p_mask, - segment_ids=segment_ids, - paragraph_len=paragraph_len, - cls_index=cls_index, - start_position=start_position, - end_position=end_position, - is_impossible=span_is_impossible) - - # Run callback - output_fn(feature) - - unique_id += 1 - if span_is_impossible: - cnt_neg += 1 - else: - cnt_pos += 1 - - tf.logging.info("Total number of instances: {} = pos {} neg {}".format( - cnt_pos + cnt_neg, cnt_pos, cnt_neg)) - - -def _check_is_max_context(doc_spans, cur_span_index, position): - """Check if this is the 'max context' doc span for the token.""" - - # Because of the sliding window approach taken to scoring documents, a single - # token can appear in multiple documents. E.g. - # Doc: the man went to the store and bought a gallon of milk - # Span A: the man went to the - # Span B: to the store and bought - # Span C: and bought a gallon of - # ... - # - # Now the word 'bought' will have two scores from spans B and C. We only - # want to consider the score with "maximum context", which we define as - # the *minimum* of its left and right context (the *sum* of left and - # right context will always be the same, of course). - # - # In the example the maximum context for 'bought' would be span C since - # it has 1 left context and 3 right context, while span B has 4 left context - # and 0 right context. - best_score = None - best_span_index = None - for (span_index, doc_span) in enumerate(doc_spans): - end = doc_span.start + doc_span.length - 1 - if position < doc_span.start: - continue - if position > end: - continue - num_left_context = position - doc_span.start - num_right_context = end - position - score = min(num_left_context, num_right_context) + 0.01 * doc_span.length - if best_score is None or score > best_score: - best_score = score - best_span_index = span_index - - return cur_span_index == best_span_index - -class FeatureWriter(object): - """Writes InputFeature to TF example file.""" - - def __init__(self, is_training): - self.is_training = is_training - self.num_features = 0 - - def process_feature(self, feature): - """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" - self.num_features += 1 - - def create_int_feature(values): - feature = tf.train.Feature( - int64_list=tf.train.Int64List(value=list(values))) - return feature - - def create_float_feature(values): - f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values))) - return f - - features = collections.OrderedDict() - features["unique_ids"] = create_int_feature([feature.unique_id]) - features["input_ids"] = create_int_feature(feature.input_ids) - features["input_mask"] = create_float_feature(feature.input_mask) - features["p_mask"] = create_float_feature(feature.p_mask) - features["segment_ids"] = create_int_feature(feature.segment_ids) - - features["cls_index"] = create_int_feature([feature.cls_index]) - - if self.is_training: - features["start_positions"] = create_int_feature([feature.start_position]) - features["end_positions"] = create_int_feature([feature.end_position]) - impossible = 0 - if feature.is_impossible: - impossible = 1 - features["is_impossible"] = create_float_feature([impossible]) - - tf_example = tf.train.Example(features=tf.train.Features(feature=features)) - return tf_example.SerializeToString() - -RawResult = collections.namedtuple("RawResult", - ["unique_id", "start_top_log_probs", "start_top_index", - "end_top_log_probs", "end_top_index", "cls_logits"]) - -_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name - "PrelimPrediction", - ["feature_index", "start_index", "end_index", - "start_log_prob", "end_log_prob"]) - -_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name - "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]) - -def get_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, FLAGS): - """Write final predictions to the json file and log-odds of null if needed.""" - tf.logging.info("Getting predictions") - - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - - all_predictions = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - score_null = 1000000 # large and positive - - for (feature_index, feature) in enumerate(features): - result = unique_id_to_result[feature.unique_id] - - cur_null_score = result.cls_logits - - # if we could have irrelevant answers, get the min score of irrelevant - score_null = min(score_null, cur_null_score) - - for i in range(FLAGS.start_n_top): - for j in range(FLAGS.end_n_top): - start_log_prob = result.start_top_log_probs[i] - start_index = result.start_top_index[i] - - j_index = i * FLAGS.end_n_top + j - - end_log_prob = result.end_top_log_probs[j_index] - end_index = result.end_top_index[j_index] - - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= feature.paragraph_len - 1: - continue - if end_index >= feature.paragraph_len - 1: - continue - - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_log_prob=start_log_prob, - end_log_prob=end_log_prob)) - - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_log_prob + x.end_log_prob), - reverse=True) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - - tok_start_to_orig_index = feature.tok_start_to_orig_index - tok_end_to_orig_index = feature.tok_end_to_orig_index - start_orig_pos = tok_start_to_orig_index[pred.start_index] - end_orig_pos = tok_end_to_orig_index[pred.end_index] - - paragraph_text = example.paragraph_text - final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() - - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_log_prob=pred.start_log_prob, - end_log_prob=pred.end_log_prob)) - - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append( - _NbestPrediction(text="", start_log_prob=-1e6, - end_log_prob=-1e6)) - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_log_prob + entry.end_log_prob) - if not best_non_null_entry: - best_non_null_entry = entry - - assert best_non_null_entry is not None - - all_predictions[example.qas_id] = best_non_null_entry.text - - return all_predictions - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for i in range(len(index_and_score)): - if i >= n_best_size: - break - best_indexes.append(index_and_score[i][0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs - - -def input_fn_builder(input_glob, seq_length, is_training, drop_remainder, - num_hosts, num_threads=8): - """Creates an `input_fn` closure to be passed to TPUEstimator.""" - - name_to_features = { - "unique_ids": tf.FixedLenFeature([], tf.int64), - "input_ids": tf.FixedLenFeature([seq_length], tf.int64), - "input_mask": tf.FixedLenFeature([seq_length], tf.float32), - "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), - "cls_index": tf.FixedLenFeature([], tf.int64), - "p_mask": tf.FixedLenFeature([seq_length], tf.float32) - } - - if is_training: - name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64) - name_to_features["is_impossible"] = tf.FixedLenFeature([], tf.float32) - - tf.logging.info("Input tfrecord file glob {}".format(input_glob)) - global_input_paths = tf.gfile.Glob(input_glob) - tf.logging.info("Find {} input paths {}".format( - len(global_input_paths), global_input_paths)) - - def _decode_record(record, name_to_features): - """Decodes a record to a TensorFlow example.""" - example = tf.parse_single_example(record, name_to_features) - - # tf.Example only supports tf.int64, but the TPU only supports tf.int32. - # So cast all int64 to int32. - for name in list(example.keys()): - t = example[name] - if t.dtype == tf.int64: - t = tf.cast(t, tf.int32) - example[name] = t - - return example - - def input_fn(params): - """The actual input function.""" - if FLAGS.use_tpu: - batch_size = params["batch_size"] - elif is_training: - batch_size = FLAGS.train_batch_size - else: - batch_size = FLAGS.predict_batch_size - - # Split tfrecords across hosts - if num_hosts > 1: - host_id = params["context"].current_host - num_files = len(global_input_paths) - if num_files >= num_hosts: - num_files_per_host = (num_files + num_hosts - 1) // num_hosts - my_start_file_id = host_id * num_files_per_host - my_end_file_id = min((host_id + 1) * num_files_per_host, num_files) - input_paths = global_input_paths[my_start_file_id: my_end_file_id] - tf.logging.info("Host {} handles {} files".format(host_id, - len(input_paths))) - else: - input_paths = global_input_paths - - if len(input_paths) == 1: - d = tf.data.TFRecordDataset(input_paths[0]) - # For training, we want a lot of parallel reading and shuffling. - # For eval, we want no shuffling and parallel reading doesn't matter. - if is_training: - d = d.shuffle(buffer_size=FLAGS.shuffle_buffer) - d = d.repeat() - else: - d = tf.data.Dataset.from_tensor_slices(input_paths) - # file level shuffle - d = d.shuffle(len(input_paths)).repeat() - - # `cycle_length` is the number of parallel files that get read. - cycle_length = min(num_threads, len(input_paths)) - - d = d.apply( - tf.contrib.data.parallel_interleave( - tf.data.TFRecordDataset, - sloppy=is_training, - cycle_length=cycle_length)) - - if is_training: - # sample level shuffle - d = d.shuffle(buffer_size=FLAGS.shuffle_buffer) - - d = d.apply( - tf.contrib.data.map_and_batch( - lambda record: _decode_record(record, name_to_features), - batch_size=batch_size, - num_parallel_batches=num_threads, - drop_remainder=drop_remainder)) - d = d.prefetch(1024) - - return d - - return input_fn - - -def mrqa_predictor(FLAGS, predict_fn, data): - """ - Get prediction with the data got fron mrqa official request. - """ - tf.logging.set_verbosity(tf.logging.INFO) - - sp_model = spm.SentencePieceProcessor() - sp_model.Load(FLAGS.spiece_model_file) - - tf.logging.info("Got Data from IR system...") - eval_data = arrange_kaggle_data(data, is_training=False) - - eval_writer = FeatureWriter(is_training=False) - eval_features = [] - eval_features_inp = [] - - def append_feature(feature): - eval_features.append(feature) - eval_features_inp.append(eval_writer.process_feature(feature)) - - convert_examples_to_features( - examples=eval_data, - sp_model=sp_model, - max_seq_length=FLAGS.max_seq_length, - doc_stride=FLAGS.doc_stride, - max_query_length=FLAGS.max_query_length, - is_training=False, - output_fn=append_feature, - FLAGS=FLAGS) - - # predict_fn = tf.contrib.predictor.from_saved_model(FLAGS.export_dir_base) - - cur_results = [] - - for num, eval_feature in enumerate(eval_features_inp): - result = predict_fn({"examples":[eval_feature]}) - - if len(cur_results) % 1000 == 0: - tf.logging.info("Processing example: %d" % (len(cur_results))) - - unique_id = int(result["unique_ids"]) - start_top_log_probs = ( - [float(x) for x in result["start_top_log_probs"].flat]) - start_top_index = [int(x) for x in result["start_top_index"].flat] - end_top_log_probs = ( - [float(x) for x in result["end_top_log_probs"].flat]) - end_top_index = [int(x) for x in result["end_top_index"].flat] - - cls_logits = float(result["cls_logits"].flat[0]) - - cur_results.append( - RawResult( - unique_id=unique_id, - start_top_log_probs=start_top_log_probs, - start_top_index=start_top_index, - end_top_log_probs=end_top_log_probs, - end_top_index=end_top_index, - cls_logits=cls_logits)) - - ret = get_predictions(eval_data, eval_features, cur_results, - FLAGS.n_best_size, FLAGS.max_answer_length, - FLAGS) - return dict(ret) - -if __name__ == "__main__": - pp = pprint.PrettyPrinter(indent=4) diff --git a/build/lib/caireCovid/mrqa/prepro_utils.py b/build/lib/caireCovid/mrqa/prepro_utils.py deleted file mode 100644 index 1d8ac83..0000000 --- a/build/lib/caireCovid/mrqa/prepro_utils.py +++ /dev/null @@ -1,138 +0,0 @@ -# coding=utf-8 -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import unicodedata -import six -from functools import partial - - -SPIECE_UNDERLINE = '▁' - - -def printable_text(text): - """Returns text encoded in a way suitable for print or `tf.logging`.""" - - # These functions want `str` for both Python2 and Python3, but in one case - # it's a Unicode string and in the other it's a byte string. - if six.PY3: - if isinstance(text, str): - return text - elif isinstance(text, bytes): - return text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - elif six.PY2: - if isinstance(text, str): - return text - elif isinstance(text, unicode): - return text.encode("utf-8") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - else: - raise ValueError("Not running on Python2 or Python 3?") - - -def print_(*args): - new_args = [] - for arg in args: - if isinstance(arg, list): - s = [printable_text(i) for i in arg] - s = ' '.join(s) - new_args.append(s) - else: - new_args.append(printable_text(arg)) - print(*new_args) - - -def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False): - if remove_space: - outputs = ' '.join(inputs.strip().split()) - else: - outputs = inputs - outputs = outputs.replace("``", '"').replace("''", '"') - - if six.PY2 and isinstance(outputs, str): - outputs = outputs.decode('utf-8') - - if not keep_accents: - outputs = unicodedata.normalize('NFKD', outputs) - outputs = ''.join([c for c in outputs if not unicodedata.combining(c)]) - if lower: - outputs = outputs.lower() - - return outputs - - -def encode_pieces(sp_model, text, return_unicode=True, sample=False): - # return_unicode is used only for py2 - - # note(zhiliny): in some systems, sentencepiece only accepts str for py2 - if six.PY2 and isinstance(text, unicode): - text = text.encode('utf-8') - - if not sample: - pieces = sp_model.EncodeAsPieces(text) - else: - pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) - new_pieces = [] - for piece in pieces: - if len(piece) > 1 and piece[-1] == ',' and piece[-2].isdigit(): - cur_pieces = sp_model.EncodeAsPieces( - piece[:-1].replace(SPIECE_UNDERLINE, '')) - if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: - if len(cur_pieces[0]) == 1: - cur_pieces = cur_pieces[1:] - else: - cur_pieces[0] = cur_pieces[0][1:] - cur_pieces.append(piece[-1]) - new_pieces.extend(cur_pieces) - else: - new_pieces.append(piece) - - # note(zhiliny): convert back to unicode for py2 - if six.PY2 and return_unicode: - ret_pieces = [] - for piece in new_pieces: - if isinstance(piece, str): - piece = piece.decode('utf-8') - ret_pieces.append(piece) - new_pieces = ret_pieces - - return new_pieces - - -def encode_ids(sp_model, text, sample=False): - pieces = encode_pieces(sp_model, text, return_unicode=False, sample=sample) - ids = [sp_model.PieceToId(piece) for piece in pieces] - return ids - - -if __name__ == '__main__': - import sentencepiece as spm - - sp = spm.SentencePieceProcessor() - sp.load('sp10m.uncased.v3.model') - - print_(u'I was born in 2000, and this is falsé.') - print_(u'ORIGINAL', sp.EncodeAsPieces(u'I was born in 2000, and this is falsé.')) - print_(u'OURS', encode_pieces(sp, u'I was born in 2000, and this is falsé.')) - print(encode_ids(sp, u'I was born in 2000, and this is falsé.')) - print_('') - prepro_func = partial(preprocess_text, lower=True) - print_(prepro_func('I was born in 2000, and this is falsé.')) - print_('ORIGINAL', sp.EncodeAsPieces(prepro_func('I was born in 2000, and this is falsé.'))) - print_('OURS', encode_pieces(sp, prepro_func('I was born in 2000, and this is falsé.'))) - print(encode_ids(sp, prepro_func('I was born in 2000, and this is falsé.'))) - print_('') - print_('I was born in 2000, and this is falsé.') - print_('ORIGINAL', sp.EncodeAsPieces('I was born in 2000, and this is falsé.')) - print_('OURS', encode_pieces(sp, 'I was born in 2000, and this is falsé.')) - print(encode_ids(sp, 'I was born in 2000, and this is falsé.')) - print_('') - print_('I was born in 92000, and this is falsé.') - print_('ORIGINAL', sp.EncodeAsPieces('I was born in 92000, and this is falsé.')) - print_('OURS', encode_pieces(sp, 'I was born in 92000, and this is falsé.')) - print(encode_ids(sp, 'I was born in 92000, and this is falsé.')) - diff --git a/build/lib/caireCovid/mrqa/tpu_estimator.py b/build/lib/caireCovid/mrqa/tpu_estimator.py deleted file mode 100644 index cc0f801..0000000 --- a/build/lib/caireCovid/mrqa/tpu_estimator.py +++ /dev/null @@ -1,3522 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =================================================================== -"""TPUEstimator class.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import copy -import os -import signal -import sys -import threading -import time - -import numpy as np -import six -from six.moves import queue as Queue # pylint: disable=redefined-builtin -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result -from tensorflow.contrib.tpu.python.tpu import tensor_tracer -from tensorflow.contrib.tpu.python.ops import tpu_ops -from tensorflow.contrib.tpu.python.tpu import error_handling -from tensorflow.contrib.tpu.python.tpu import session_support -from tensorflow.contrib.tpu.python.tpu import tpu -from tensorflow.contrib.tpu.python.tpu import tpu_config -from tensorflow.contrib.tpu.python.tpu import tpu_context -from tensorflow.contrib.tpu.python.tpu import tpu_feed -from tensorflow.contrib.tpu.python.tpu import training_loop -from tensorflow.contrib.tpu.python.tpu import util as util_lib -from tensorflow.contrib.training.python.training import hparam -from tensorflow.core.framework import variable_pb2 -from tensorflow.core.framework.summary_pb2 import Summary -from tensorflow.core.protobuf import config_pb2 -from tensorflow.python.client import session as tf_session -from tensorflow.python.data.ops import dataset_ops -from tensorflow.python.data.util import nest as data_nest -from tensorflow.python.estimator import estimator as estimator_lib -from tensorflow.python.estimator import model_fn as model_fn_lib -from tensorflow.python.estimator.export import export_output as export_output_lib -from tensorflow.python.framework import constant_op -from tensorflow.python.framework import dtypes -from tensorflow.python.framework import errors -from tensorflow.python.framework import ops -from tensorflow.python.ops import array_ops -from tensorflow.python.ops import check_ops -from tensorflow.python.ops import control_flow_ops -from tensorflow.python.ops import init_ops -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import resource_variable_ops -from tensorflow.python.ops import state_ops -from tensorflow.python.ops import summary_ops_v2 as contrib_summary -from tensorflow.python.ops import variable_scope -from tensorflow.python.ops import variables -from tensorflow.python.platform import tf_logging as logging -from tensorflow.python.saved_model import tag_constants -from tensorflow.python.summary import summary -from tensorflow.python.training import basic_session_run_hooks -from tensorflow.python.training import evaluation -from tensorflow.python.training import session_run_hook -from tensorflow.python.training import training -from tensorflow.python.training import training_util -from tensorflow.python.util import function_utils -from tensorflow.python.util import nest -from tensorflow.python.util import tf_inspect - -_INITIAL_LOSS = 1e7 -_ZERO_LOSS = 0. -_TPU_ESTIMATOR = 'custom_tpu_estimator' -_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop' -_BATCH_SIZE_KEY = 'batch_size' -_CTX_KEY = 'context' -_USE_TPU_KEY = 'use_tpu' -_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum' -_ONE_GIGABYTE = 1024 * 1024 * 1024 -_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops' -_TPU_TRAIN_OP = '_tpu_train_op' -_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference' - -# Ideally _USE_TPU_KEY should be reserved as well. However there are already -# models that make use of this key, thus it can not be reserved now to prevent -# breakage. In the long run, we would like to mitigate this by migrating models -# off of using _USE_TPU_KEY. -_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY] - -# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is -# only used for per-core based deployments. For per-host based pipelines, if a -# user returns a Dataset instance it will be automatically wrapped in a -# tf.while_loop (This can be disabled by returning features and labels -# explicitly). -_WRAP_INPUT_FN_INTO_WHILE_LOOP = False - -ops.register_proto_function( - '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR), - proto_type=variable_pb2.VariableDef, - to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access - from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access - - -def _is_iterable(obj): - """A Python 2 and 3 compatible util to check whether `obj` is iterable.""" - try: - iter(obj) - return True - except TypeError: - return False - - -def _create_global_step(graph): - graph = graph or ops.get_default_graph() - if training.get_global_step(graph) is not None: - raise ValueError('"global_step" already exists.') - # Create in proper graph and base name_scope. - with graph.as_default() as g, g.name_scope(None): - return variable_scope.get_variable( - ops.GraphKeys.GLOBAL_STEP, - shape=[], - dtype=dtypes.int64, - initializer=init_ops.zeros_initializer(), - trainable=False, - use_resource=True, - collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) - - -def _create_or_get_iterations_per_loop(): - """Creates or gets the iterations_per_loop variable. - - In TPUEstimator, the user provided computation, the model_fn, is wrapped - inside a tf.while_loop for peak performance. The iterations of the loop are - specified by this variable, which adjusts its value on the CPU after each TPU - program execution and before the next TPU execution. - - The purpose of using a variable, rather then a constant, is to allow - TPUEstimator adapt the TPU training iterations according to the final steps - specified by users. For example, if the user sets the iterations_per_loop as 4 - in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop - variable will have the following value before each TPU training. - - - 1-th TPU execution: iterations_per_loop = 4 - - 2-th TPU execution: iterations_per_loop = 4 - - 3-th TPU execution: iterations_per_loop = 2 - - As model_fn increases the global step once per train_op invocation, the global - step is 10 after all TPU executions, matching the steps=10 inputs passed in by - users. - - Returns: - A TF non-trainable resource variable. - - Raises: - RuntimeError: If multi iterations_per_loop variables were found. - """ - graph = ops.get_default_graph() - collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) - iter_vars = graph.get_collection(collection_name) - if len(iter_vars) == 1: - return iter_vars[0] - elif len(iter_vars) > 1: - raise RuntimeError('Multiple iterations_per_loop_var in collection.') - - with ops.colocate_with(training_util.get_global_step()): - with variable_scope.variable_scope( - _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): - return variable_scope.get_variable( - _ITERATIONS_PER_LOOP_VAR, - initializer=init_ops.zeros_initializer(), - shape=[], - dtype=dtypes.int32, - trainable=False, - collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], - use_resource=True) - - -def _sync_variables_ops(ctx): - """Create varriables synchronization ops. - - Gets the variables back from TPU nodes. This means the variables updated - by TPU will now be *synced* to host memory. - In BROADCAST mode, we skip this sync since the variables are ususally too - big to transmit via RPC. - - Args: - ctx: A `_InternalTPUContext` instance with mode. - - Returns: - A list of sync ops. - """ - - if not ctx.is_input_broadcast_with_iterators(): - return [ - array_ops.check_numerics(v.read_value(), - 'Gradient for %s is NaN' % v.name).op - for v in variables.trainable_variables() - ] - else: - return [control_flow_ops.no_op()] - - -def _increase_eval_step_op(iterations_per_loop): - """Returns an op to increase the eval step for TPU evaluation. - - Args: - iterations_per_loop: Tensor. The number of eval steps running in TPU system - before returning to CPU host for each `Session.run`. - - Returns: - An operation - """ - eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access - # Estimator evaluate increases 1 by default. So, we increase the difference. - return state_ops.assign_add( - eval_step, - math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype), - use_locking=True) - - -def _extract_key_names(tensor_or_dict): - if isinstance(tensor_or_dict, dict): - return sorted(tensor_or_dict.keys()) - return [] - - -class _SIGNAL(object): - """Signal used to control the thread of infeed/outfeed. - - All preserved signals must be negative numbers. Positive numbers are used to - indicate the number of iterations for next training/evaluation loop. - """ - NEXT_BATCH = -1 - STOP = -2 - - -class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - """Ops and objects returned from a `model_fn` and passed to `TPUEstimator`. - - See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and - `export_outputs`. - - For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where - `metric_fn` runs on CPU to generate metrics and `tensors` represents the - `Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`. - To be precise, TPU evaluation expects a slightly different signature from the - `tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a - dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`. - The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The - `tensors` usually specify the model logits, which are transferred back from - TPU system to CPU host. All tensors must have be batch-major, i.e., the batch - size is the first dimension. Once all tensors are available at CPU host from - all shards, they are concatenated (on CPU) and passed as positional arguments - to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is - a dict. `metric_fn` takes the `tensors` and returns a dict from metric string - name to the result of calling a metric function, namely a `(metric_tensor, - update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the - `eval_metrics`. - - `scaffold_fn` is a function running on CPU to generate the `Scaffold`. This - function should not capture any Tensors in `model_fn`. - - `host_call` is a tuple of a `function` and a list or dictionary of `tensors` - to pass to that function and returns a list of Tensors. `host_call` currently - works for train() and evaluate(). The Tensors returned by the function is - executed on the CPU on every step, so there is communication overhead when - sending tensors from TPU to CPU. To reduce the overhead, try reducing the - size of the tensors. The `tensors` are concatenated along their major (batch) - dimension, and so must be >= rank 1. The `host_call` is useful for writing - summaries with `tf.contrib.summary.create_file_writer`. - """ - - def __new__(cls, - mode, - predictions=None, - loss=None, - train_op=None, - eval_metrics=None, - export_outputs=None, - scaffold_fn=None, - host_call=None, - training_hooks=None, - evaluation_hooks=None, - prediction_hooks=None): - """Creates a validated `TPUEstimatorSpec` instance.""" - host_calls = {} - if eval_metrics is not None: - host_calls['eval_metrics'] = eval_metrics - if host_call is not None: - host_calls['host_call'] = host_call - _OutfeedHostCall.validate(host_calls) - - training_hooks = tuple(training_hooks or []) - evaluation_hooks = tuple(evaluation_hooks or []) - prediction_hooks = tuple(prediction_hooks or []) - - for hook in training_hooks + evaluation_hooks + prediction_hooks: - if not isinstance(hook, session_run_hook.SessionRunHook): - raise TypeError('All hooks must be SessionRunHook instances, given: {}' - .format(hook)) - - return super(TPUEstimatorSpec, cls).__new__( - cls, - mode=mode, - predictions=predictions, - loss=loss, - train_op=train_op, - eval_metrics=eval_metrics, - export_outputs=export_outputs, - scaffold_fn=scaffold_fn, - host_call=host_call, - training_hooks=training_hooks, - evaluation_hooks=evaluation_hooks, - prediction_hooks=prediction_hooks) - - def as_estimator_spec(self): - """Creates an equivalent `EstimatorSpec` used by CPU train/eval.""" - host_calls = {} - if self.eval_metrics is not None: - host_calls['eval_metrics'] = self.eval_metrics - if self.host_call is not None: - host_calls['host_call'] = self.host_call - host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) - eval_metric_ops = None - if self.eval_metrics is not None: - eval_metric_ops = host_call_ret['eval_metrics'] - hooks = None - if self.host_call is not None: - hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])] - if tensor_tracer.TensorTracer.is_enabled(): - tt = tensor_tracer.TensorTracer() - tracing_calls = tt.trace_cpu(ops.get_default_graph()) - tracing_call_ret = _OutfeedHostCall.create_cpu_hostcall(tracing_calls) - tracing_functions = tracing_call_ret.values() - if tracing_functions: - if hooks: - hooks.extend([_OutfeedHostCallHook(tracing_functions)]) - else: - hooks = [_OutfeedHostCallHook(tracing_functions)] - hooks = tuple(hooks or []) - scaffold = self.scaffold_fn() if self.scaffold_fn else None - return model_fn_lib.EstimatorSpec( - mode=self.mode, - predictions=self.predictions, - loss=self.loss, - train_op=self.train_op, - eval_metric_ops=eval_metric_ops, - export_outputs=self.export_outputs, - scaffold=scaffold, - training_hooks=self.training_hooks + hooks, - evaluation_hooks=self.evaluation_hooks + hooks, - prediction_hooks=self.prediction_hooks + hooks) - - -class _OpQueueContext(object): - """Manages work queue and thread for a infeed/outfeed thread.""" - - def __init__(self, name, target, args): - self._name = name - self._queue = Queue.Queue() - args = (self,) + args - self._thread = threading.Thread(name=name, target=target, args=args) - self._thread.daemon = True - self._thread.start() - - def stop(self): - self._queue.put(_SIGNAL.STOP) - - def send_next_batch_signal(self, iterations): - self._queue.put(iterations) - - def read_iteration_counts(self): - while True: - iterations = self._queue.get(block=True) - logging.debug('%s read iterations %s', self._name, iterations) - if iterations == _SIGNAL.STOP: - logging.info('%s received shutdown signal, stopping.', self._name) - return - yield iterations - - def join(self): - logging.info('Shutting down %s thread.', self._name) - self.stop() - self._thread.join() - - -class _OpSignalOnceQueueContext(_OpQueueContext): - """Manages work queue and thread for a infeed/outfeed thread. - - This subclass only signals once. - """ - - def __init__(self, name, target, args): - super(_OpSignalOnceQueueContext, self).__init__(name, target, args) - self._has_signaled = False - - def send_next_batch_signal(self, iterations): - if not self._has_signaled: - self._queue.put(iterations) - self._has_signaled = True - - -class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook): - """A Session hook setting up the TPU initialization, infeed, and outfeed. - - This hook does two major things: - 1. initialize and shutdown TPU system. - 2. launch and join the threads for infeed enqueue and (optional) outfeed - dequeue. - """ - - def __init__(self, - ctx, - enqueue_ops, - dequeue_ops, - tpu_compile_op, - run_infeed_loop_on_coordinator=True, - rendezvous=None, - master=None, - session_config=None): - self._master_job = ctx.master_job - self._enqueue_ops = enqueue_ops - self._dequeue_ops = dequeue_ops - self._rendezvous = rendezvous - self._master = master - self._session_config = session_config - self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator - self._initial_infeed_sleep_secs = ( - ctx.config.tpu_config.initial_infeed_sleep_secs) - - self._feed_error = None - self._finished = False - self._should_initialize_tpu = True - self._tpu_compile_op = tpu_compile_op - - def begin(self): - logging.info('TPU job name %s', self._master_job) - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - self._init_ops = [] - if self._should_initialize_tpu: - self._finalize_ops = [tpu.shutdown_system(job=self._master_job)] - else: - self._finalize_ops = [] - - summary_writer_init_ops = contrib_summary.summary_writer_initializer_op() - self._init_ops.extend(summary_writer_init_ops) - # Get all the writer resources from the initializer, so we know what to - # flush. - for op in summary_writer_init_ops: - self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) - - def _run_infeed(self, queue_ctx, session): - logging.info('Starting infeed thread controller.') - if self._initial_infeed_sleep_secs: - logging.info('Infeed thread sleeping for %d seconds.', - self._initial_infeed_sleep_secs) - time.sleep(self._initial_infeed_sleep_secs) - logging.info('Infeed thread starting after sleep') - - with self._rendezvous.catch_errors(source='infeed', session=session): - if self._run_infeed_loop_on_coordinator: - for count, steps in enumerate(queue_ctx.read_iteration_counts()): - for i in xrange(steps): - logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) - session.run(self._enqueue_ops) - else: - for _ in queue_ctx.read_iteration_counts(): - session.run(self._enqueue_ops) - logging.info('Infeed thread finished, shutting down.') - - def _run_outfeed(self, queue_ctx, session): - logging.info('Starting outfeed thread controller.') - with self._rendezvous.catch_errors(source='outfeed', session=session): - for count, steps in enumerate(queue_ctx.read_iteration_counts()): - for i in xrange(steps): - logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i) - session.run(self._dequeue_ops) - logging.info('Outfeed thread finished, shutting down.') - - def _create_infeed_controller(self, name, target, args): - return _OpQueueContext(name=name, target=target, args=args) - - def _assertCompilationSucceeded(self, result, coord): - proto = tpu_compilation_result.CompilationResultProto() - proto.ParseFromString(result) - if proto.status_error_message: - logging.error('Compilation failed: {}'.format(proto.status_error_message)) - coord.request_stop() - else: - logging.info('Compilation succeeded') - - def after_create_session(self, session, coord): - if self._should_initialize_tpu: - logging.info('Init TPU system') - start = time.time() - with ops.Graph().as_default(): - with tf_session.Session( - self._master, config=self._session_config) as sess: - sess.run(tpu.initialize_system(job=self._master_job)) - logging.info('Initialized TPU in %d seconds', time.time() - start) - - session.run(self._init_ops, - options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000)) - - if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1': - logging.info('Compiling user program: this may take a while...') - self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord) - - self._infeed_controller = self._create_infeed_controller( - name='InfeedController', target=self._run_infeed, args=(session,)) - - self._outfeed_controller = _OpQueueContext( - name='OutfeedController', target=self._run_outfeed, args=(session,)) - - # Enable the worker watchdog to terminate workers on coordinator exit. - watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0')) - if watchdog_timeout > 0: - session_support.start_worker_watchdog(session, - shutdown_timeout=watchdog_timeout) - - def before_run(self, run_context): - self._feed_error = None - - iterations = run_context.session.run(self._iterations_per_loop_var) - - logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations) - self._infeed_controller.send_next_batch_signal(iterations) - - logging.info('Dequeue next (%d) batch(es) of data from outfeed.', - iterations) - self._outfeed_controller.send_next_batch_signal(iterations) - - def end(self, session): - self._finished = True - logging.info('Stop infeed thread controller') - self._infeed_controller.join() - self._rendezvous.record_done('infeed') - - logging.info('Stop output thread controller') - self._outfeed_controller.join() - self._rendezvous.record_done('outfeed') - - logging.info('Shutdown TPU system.') - session.run(self._finalize_ops) - - -class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook): - - def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, - rendezvous=None, master=None, session_config=None): - super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__( - ctx, - enqueue_ops, - dequeue_ops, - tpu_compile_op=tpu_compile_op, - run_infeed_loop_on_coordinator=False, - rendezvous=rendezvous, - master=master, - session_config=session_config) - - def _create_infeed_controller(self, name, target, args): - return _OpSignalOnceQueueContext(name=name, target=target, args=args) - - -class _TPUStopAtStepHook(session_run_hook.SessionRunHook): - """Hook that requests stop at a specified step. - - This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with - following differences for TPU training: - - 1. This hook sets the variable for iterations_per_loop, which is used by - `TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed. - As the hook execution order is not guaranteed, the variable update is - handled in `after_create_session` and `after_run` as - `TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`. - - 2. For each training loop (session.run), the global step could be increased - multiple times on TPU. The global step tensor value will be explicitly read - again in `after_run` to ensure the latest value is retrieved to avoid race - condition. - """ - - def __init__(self, iterations, num_steps=None, last_step=None): - """Initializes a `StopAtStepHook`. - - Args: - iterations: The number of iterations to run optimizer per training loop. - num_steps: Number of steps to execute. - last_step: Step after which to stop. - - Raises: - ValueError: If one of the arguments is invalid. - """ - if num_steps is None and last_step is None: - raise ValueError('One of num_steps or last_step must be specified.') - if num_steps is not None and last_step is not None: - raise ValueError('Only one of num_steps or last_step can be specified.') - self._num_steps = num_steps - self._last_step = last_step - self._iterations = iterations - - def _next_iterations(self, global_step, last_step): - gap = last_step - global_step - return min(gap, self._iterations) - - def begin(self): - self._global_step_tensor = training_util.get_global_step() - if self._global_step_tensor is None: - raise RuntimeError('Global step should be created.') - - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - - def after_create_session(self, session, coord): - global_step = session.run(self._global_step_tensor) - if self._last_step is None: - self._last_step = global_step + self._num_steps - - iterations = self._next_iterations(global_step, self._last_step) - - self._iterations_per_loop_var.load(iterations, session=session) - - def after_run(self, run_context, run_values): - # Global step cannot be retrieved via SessionRunArgs and before_run due to - # race condition. - global_step = run_context.session.run(self._global_step_tensor) - if global_step >= self._last_step: - run_context.request_stop() - else: - iterations = self._next_iterations(global_step, self._last_step) - self._iterations_per_loop_var.load( - iterations, session=run_context.session) - - -class _SetEvalIterationsHook(session_run_hook.SessionRunHook): - """Hook that requests stop at a specified step.""" - - def __init__(self, num_steps): - """Initializes a `_SetEvalIterationsHook`. - - Args: - num_steps: Number of steps to execute. - """ - self._num_steps = num_steps - - def begin(self): - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - - def after_create_session(self, session, coord): - self._iterations_per_loop_var.load(self._num_steps, session=session) - - -class _StoppingPredictHook(session_run_hook.SessionRunHook): - """Hook that requests stop according to the stopping signal in prediction.""" - - def __init__(self, scalar_stopping_signal): - self._scalar_stopping_signal = scalar_stopping_signal - - def begin(self): - self._iterations_per_loop_var = _create_or_get_iterations_per_loop() - - def after_create_session(self, session, coord): - # This is not necessary as we do not run infeed enqueue and outfeed dequeue - # in side threads for prediction model. But it makes the - # TPUInfeedOutfeedSessionHook prints nice message. - self._iterations_per_loop_var.load(1, session=session) - - def before_run(self, run_context): - return session_run_hook.SessionRunArgs(self._scalar_stopping_signal) - - def after_run(self, run_context, run_values): - _ = run_context - scalar_stopping_signal = run_values.results - if _StopSignals.should_stop(scalar_stopping_signal): - # NOTE(xiejw): In prediction, stopping signals are inserted for each - # batch. And we append one more batch to signal the system it should stop. - # The data flow might look like - # - # batch 0: images, labels, stop = 0 (user provided) - # batch 1: images, labels, stop = 0 (user provided) - # ... - # batch 99: images, labels, stop = 0 (user provided) - # batch 100: images, labels, stop = 1 (TPUEstimator appended) - # - # where the final batch (id = 100) is appended by TPUEstimator, so we - # should drop it before returning the predictions to user. - # To achieve that, we throw the OutOfRangeError in after_run. Once - # Monitored Session sees this error in SessionRunHook.after_run, the - # "current" prediction, i.e., batch with id=100, will be discarded - # immediately - raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') - - -def generate_per_core_enqueue_ops_fn_for_host( - ctx, input_fn, inputs_structure_recorder, host_device, host_id): - """Generates infeed enqueue ops for per-core input_fn on a single host.""" - captured_infeed_queue = _CapturedObject() - tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) - - def enqueue_ops_fn(): - """A fn returns enqueue_ops.""" - num_cores_per_host = ctx.num_of_cores_per_host - per_host_sharded_inputs = [] - for core_ordinal in range(num_cores_per_host): - with ops.name_scope('ordinal_%d' % (core_ordinal)): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, - input_device=host_device, - invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - if inputs.is_dataset: - raise TypeError( - '`input_fn` returning `Dataset` is not yet supported in ' - 'per-Core input pipeline deployment yet. Please set ' - 'TPUConfig.per_host_input_for_training to True or return ' - '`features` and `labels` from `input_fn`') - features, labels = inputs.features_and_labels() - - inputs_structure_recorder.validate_and_record_structure( - features, labels) - flattened_inputs = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels)) - per_host_sharded_inputs.append(flattened_inputs) - - infeed_queue = tpu_feed.InfeedQueue( - number_of_tuple_elements=len(per_host_sharded_inputs[0])) - captured_infeed_queue.capture(infeed_queue) - - per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( - per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl) - return per_host_enqueue_ops - - return enqueue_ops_fn, captured_infeed_queue - - -def generate_per_host_enqueue_ops_fn_for_host( - ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id): - """Generates infeed enqueue ops for per-host input_fn on a single host.""" - captured_infeed_queue = _CapturedObject() - - dataset_initializer = None - - with ops.device(device): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, input_device=device, invocation_index=host_id) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - - is_dataset = inputs.is_dataset - if ctx.mode == model_fn_lib.ModeKeys.PREDICT: - if not is_dataset: - raise TypeError( - 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' - '`features` and `labels`.') - if batch_axis is not None: - raise TypeError('For mode PREDICT, batch_axis is not supported yet.') - inputs = _InputsWithStoppingSignals( - dataset=inputs.dataset, - batch_size=ctx.batch_size_for_input_fn, - add_padding=True) - - if is_dataset: - dataset_initializer = inputs.dataset_initializer() - - tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) - - def enqueue_ops_fn(): - """A Fn returning the TPU infeed enqueue ops. - - By providing as a Fn, it can be invoked inside the tf.while_loop such that - the input pipeline for multiple iterations can be executed by one - Session.run call. - - Returns: - list of dict of ops. - """ - with ops.device(device): - num_of_replicas_per_host = ctx.num_of_replicas_per_host - # Convert user input to features and labels. If the user returns a - # dataset, it is initialized and the features and labels extracted via - # `dataset.iterator.get_next()` - features, labels = inputs.features_and_labels() - signals = inputs.signals() - - inputs_structure_recorder.validate_and_record_structure(features, labels) - unsharded_tensor_list = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels, signals)) - - infeed_queue = tpu_feed.InfeedQueue( - tuple_types=[t.dtype for t in unsharded_tensor_list], - tuple_shapes=[t.shape for t in unsharded_tensor_list], - shard_dimensions=batch_axis) - captured_infeed_queue.capture(infeed_queue) - infeed_queue.set_number_of_shards(num_of_replicas_per_host) - per_host_enqueue_ops = ( - infeed_queue.split_inputs_and_generate_enqueue_ops( - unsharded_tensor_list, - placement_function=lambda x: device, - tpu_ordinal_function=tpu_ordinal_function_impl)) - if signals is None: - return per_host_enqueue_ops - else: - return { - 'ops': per_host_enqueue_ops, - 'signals': signals, - } - - return enqueue_ops_fn, captured_infeed_queue, dataset_initializer - - -def generate_per_host_v2_enqueue_ops_fn_for_host( - ctx, input_fn, inputs_structure_recorder, device, host_id): - """Generates infeed enqueue ops for per-host input_fn on a single host.""" - captured_infeed_queue = _CapturedObject() - dataset_initializer = None - - with ops.device(device): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, input_device=device, invocation_index=host_id) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - - is_dataset = inputs.is_dataset - if not is_dataset: - raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 ' - 'input pipeline configuration.') - - if ctx.mode == model_fn_lib.ModeKeys.PREDICT: - inputs = _InputsWithStoppingSignals( - dataset=inputs.dataset, - batch_size=ctx.batch_size_for_input_fn, - add_padding=True, - num_invocations_per_step=ctx.num_of_replicas_per_host) - - dataset_initializer = inputs.dataset_initializer() - tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id) - - def enqueue_ops_fn(): - """Generates the per_host enqueue ops.""" - control_deps = [] - per_host_sharded_inputs = [] - num_replicas_per_host = ctx.num_of_replicas_per_host - cached_signals = None - with ops.device(device): - if not inputs.is_dataset: - raise TypeError('`input_fn` must return a `Dataset` for this mode.') - for _ in range(num_replicas_per_host): - # Use control dependencies to ensure a deterministic ordering. - with ops.control_dependencies(control_deps): - features, labels = inputs.features_and_labels() # Calls get_next() - signals = inputs.signals() - - # All the replicas share the replica 0's stopping singal. - # This avoids inconsistent state among different model replcias. - if cached_signals: - signals['stopping'] = cached_signals['stopping'] - else: - cached_signals = signals - - inputs_structure_recorder.validate_and_record_structure( - features, labels) - flattened_inputs = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels, signals)) - control_deps.extend(flattened_inputs) - per_host_sharded_inputs.append(flattened_inputs) - - if inputs_structure_recorder.flattened_input_dims: - input_partition_dims = inputs_structure_recorder.flattened_input_dims - if signals: - input_partition_dims += [None] * len(signals) - # pylint: disable=protected-access - infeed_queue = tpu_feed._PartitionedInfeedQueue( - number_of_tuple_elements=len(per_host_sharded_inputs[0]), - host_id=host_id, - input_partition_dims=input_partition_dims, - device_assignment=ctx.device_assignment) - per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( - per_host_sharded_inputs) - else: - infeed_queue = tpu_feed.InfeedQueue( - number_of_tuple_elements=len(per_host_sharded_inputs[0])) - per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( - per_host_sharded_inputs, - tpu_ordinal_function=tpu_ordinal_function_impl) - captured_infeed_queue.capture(infeed_queue) - - if signals is None: - return per_host_enqueue_ops - else: - return { - 'ops': per_host_enqueue_ops, - 'signals': signals, - } - - return enqueue_ops_fn, captured_infeed_queue, dataset_initializer - - -def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder, - num_hosts): - """Generates infeed enqueue ops for one input_fn on all the hosts.""" - captured_infeed_queue = _CapturedObject() - dataset_initializer = None - device_0 = ctx.tpu_host_placement_function(host_id=0) - with ops.device(device_0): - user_context = tpu_context.TPUContext( - internal_ctx=ctx, input_device=device_0, invocation_index=0) - inputs = _Inputs.from_input_fn(input_fn(user_context)) - - is_dataset = inputs.is_dataset - if ctx.mode == model_fn_lib.ModeKeys.PREDICT: - if not is_dataset: - raise TypeError( - 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' - '`features` and `labels`.') - - inputs = _InputsWithStoppingSignals( - dataset=inputs.dataset, - batch_size=ctx.batch_size_for_input_fn, - add_padding=True) - - if is_dataset: - dataset_initializer = inputs.dataset_initializer() - num_replicas_per_host = ctx.num_of_replicas_per_host - - def tpu_ordinal_function_impl(replica_id): - if ctx.device_assignment: - return ctx.device_assignment.tpu_ordinal(replica=replica_id) - else: - return replica_id % num_replicas_per_host - - def device_function_impl(replica_id): - return ctx.tpu_host_placement_function(replica_id=replica_id) - - def enqueue_ops_fn(): - """Generates enqueue ops for all the hosts.""" - broadcasted_inputs = [] - flattened_inputs = None # Cache result from input_fn. - signals = None - for host_id in xrange(num_hosts): - with ops.device(ctx.tpu_host_placement_function(host_id=host_id)): - for _ in xrange(ctx.num_of_replicas_per_host): - # Note: input_fn is only called once at host 0 for the first replica. - # The features and labels returned from that invocation are - # broadcasted to other replicas(including the replicas on other - # hosts). - if flattened_inputs is None: - features, labels = inputs.features_and_labels() # Calls get_next() - signals = inputs.signals() - - inputs_structure_recorder.validate_and_record_structure( - features, labels) - flattened_inputs = ( - inputs_structure_recorder.flatten_features_and_labels( - features, labels, signals)) - broadcasted_inputs.append(flattened_inputs) - - infeed_queue = tpu_feed.InfeedQueue( - number_of_tuple_elements=len(broadcasted_inputs[0])) - captured_infeed_queue.capture(infeed_queue) - enqueue_ops = infeed_queue.generate_enqueue_ops( - broadcasted_inputs, - tpu_ordinal_function=tpu_ordinal_function_impl, - placement_function=device_function_impl) - - if signals is None: - return enqueue_ops - else: - return { - 'ops': enqueue_ops, - 'signals': signals, - } - - return enqueue_ops_fn, captured_infeed_queue, dataset_initializer - - -class _InputPipeline(object): - """`_InputPipeline` handles invoking `input_fn` and piping to infeed queue. - - `_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from - call site. To be precise, based on the configuration in - `_InternalTPUContext`, it invokes `input_fn` for all cores (usually - multi-host TPU training) or for one host (usually for single-host TPU - evaluation), and sends all `features` and `labels` returned by `input_fn` to - TPU infeed. For per-core invocation, `features` and `labels` are piped to - infeed directly, one tuple for each core. For per-host invocation, `features` - and `labels` are split at host (with respect to `batch_axis`) and piped to all - cores accordingly. - - In addition, flatten/unflatten are handled by `_InputPipeline` also. Model - inputs returned by the `input_fn` can have one of the following forms: - 1. features - 2. (features, labels) - 3. ((arbitrarily nested structure of features), labels) - - Internally, form 1 is reformed to `(features, None)` as features and labels - are passed separately to underlying methods. For TPU training, TPUEstimator - may expect multiple `features` and `labels` tuples one for each core. - - TPUEstimator allows various different structures for inputs (namely `features` - and `labels`). Both `features` and `labels` can be any nested sturcture - supported by TF nest (namely, dict, tuples, namedtuples or any nested - structure of such of Tensors). `labels` could be `None` as well. - - These are flattened before they are passed to the infeed/outfeed library - as that expectes flattend lists. - """ - - class InputsStructureRecorder(object): - """The recorder to record inputs structure.""" - - def __init__(self, input_partition_dims=None): - # Holds the structure of inputs - self._feature_structure = {} - self._flattened_input_dims = None - - if input_partition_dims: - # This should have been validated in TPUConfig. - assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.' - if len(input_partition_dims) == 2: - self._feature_dims, self._label_dims = input_partition_dims - else: - self._feature_dims = input_partition_dims[0] - self._label_dims = None - - assert self._feature_dims is not None, ('input_partition_dims[0] must ' - 'not be None') - else: - self._feature_dims = None - self._label_dims = None - - # Internal state. - self._initialized = False - - @property - def flattened_input_dims(self): - assert self._initialized, 'InputsStructureRecorder is not initialized.' - return self._flattened_input_dims - - def has_labels(self): - return 'labels' in self._feature_structure - - def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims, - label_dims_names, label_names, has_labels): - """Flatten input dims with the same order as flattened input tensors.""" - flattened_input_dims = [] - if feature_dims_names: - # We need a fixed ordering for matching the tensors in features. - flattened_input_dims.extend( - [feature_dims[name] for name in feature_dims_names]) - else: - flattened_input_dims.append(feature_dims) - - if label_dims_names: - # We need a fixed ordering for matching the tensors in labels. - flattened_input_dims.extend( - [label_dims[name] for name in label_dims_names]) - else: - if label_names: - num_tensors_in_label = len(label_names) - else: - num_tensors_in_label = int(has_labels) - # Setting `None` in input_partition_dims[1] will apply `None` to - # all the tensors in labels, regardless of internal structure. - flattened_input_dims.extend([label_dims] * num_tensors_in_label) - - return flattened_input_dims - - def validate_and_record_structure(self, features, labels): - """Validates and records the structure of `features` and `labels`.""" - # Extract structure. - has_labels = labels is not None - feature_names = _extract_key_names(features) - label_names = _extract_key_names(labels) - - if not self._initialized: - # Record structure. - self._initialized = True - if self._feature_dims is not None: - feature_dims_names = _extract_key_names(self._feature_dims) - if feature_dims_names != feature_names: - raise ValueError( - 'TPUConfig.input_partition_dims[0] mismatched feature' - ' keys. Expected {}, got {}'.format(feature_names, - feature_dims_names)) - - label_dims_names = _extract_key_names(self._label_dims) - if self._label_dims is not None and label_dims_names != label_names: - raise ValueError( - 'TPUConfig.input_partition_dims[1] mismatched label' - ' keys. Expected {}, got {}'.format(label_names, - label_dims_names)) - - self._flattened_input_dims = self._flatten_input_dims( - self._feature_dims, feature_dims_names, self._label_dims, - label_dims_names, label_names, has_labels) - - def flatten_features_and_labels(self, features, labels, signals=None): - """Flattens the `features` and `labels` to a single tensor list.""" - self._feature_structure['features'] = features - if labels is not None: - self._feature_structure['labels'] = labels - if signals is not None: - self._feature_structure['signals'] = signals - return data_nest.flatten(self._feature_structure) - - def unflatten_features_and_labels(self, flattened_inputs): - """Restores the flattened inputs to original features and labels form. - - Args: - flattened_inputs: Flattened inputs for each shard. - - Returns: - A tuple of (`features`, `labels`), where `labels` could be None. - Each one, if present, should have identical structure (single tensor vs - dict) as the one returned by input_fn. - - Raises: - ValueError: If the number of expected tensors from `flattened_inputs` - mismatches the recorded structure. - """ - - unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure, - flattened_inputs) - return _Inputs( - unflattened_inputs['features'], - unflattened_inputs.get('labels'), - signals=unflattened_inputs.get('signals')) - - def __init__(self, input_fn, batch_axis, ctx): - """Constructor. - - Args: - input_fn: input fn for train or eval. - batch_axis: A python tuple of int values describing how each tensor - produced by the Estimator `input_fn` should be split across the TPU - compute shards. - ctx: A `_InternalTPUContext` instance with mode. - - Raises: - ValueError: If both `sharded_features` and `num_cores` are `None`. - """ - self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder( - ctx.input_partition_dims) - - self._sharded_per_core = ctx.is_input_sharded_per_core() - self._input_fn = input_fn - self._infeed_queue = None - self._ctx = ctx - self._batch_axis = batch_axis - - def generate_infeed_enqueue_ops_and_dequeue_fn(self): - """Generates infeed enqueue ops and dequeue_fn.""" - # While tf.while_loop is called, the body function, which invokes - # `enqueue_fn` passed in, is called to construct the graph. So, input_fn - # structure is recorded. - enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( - self._invoke_input_fn_and_record_structure()) - - self._validate_input_pipeline() - - def dequeue_fn(): - """dequeue_fn is used by TPU to retrieve the tensors.""" - # In the model-parallel case, both the host-side and device-side - # computations must agree on the core on which infeed takes place. We - # choose to perform infeed on logical core 0 of each replica. - values = self._infeed_queue.generate_dequeue_op(tpu_device=0) - # The unflatten process uses the structure information recorded above. - return self._inputs_structure_recorder.unflatten_features_and_labels( - values) - - return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator) - - def _invoke_input_fn_and_record_structure(self): - """Deploys the input pipeline and record input structure.""" - enqueue_ops = [] - infeed_queues = [] - all_dataset_initializers = [] - num_hosts = self._ctx.num_hosts - tpu_host_placement_fn = self._ctx.tpu_host_placement_function - - run_infeed_loop_on_coordinator = True - - if self._sharded_per_core: - # Per-Core input pipeline deployment. - # Invoke input pipeline for each core and placed on the corresponding - # host. - for host_id in range(num_hosts): - host_device = tpu_host_placement_fn(host_id=host_id) - with ops.device(host_device): - with ops.name_scope('input_pipeline_task%d' % (host_id)): - enqueue_ops_fn, captured_infeed_queue = ( - generate_per_core_enqueue_ops_fn_for_host( - self._ctx, self._input_fn, self._inputs_structure_recorder, - host_device, host_id)) - - if _WRAP_INPUT_FN_INTO_WHILE_LOOP: - run_infeed_loop_on_coordinator = False - enqueue_ops.append( - _wrap_computation_in_while_loop( - device=host_device, op_fn=enqueue_ops_fn)) - else: - enqueue_ops.append(enqueue_ops_fn()) - # Infeed_queue_getter must be called after enqueue_ops_fn is called. - infeed_queues.append(captured_infeed_queue.get()) - - elif self._ctx.is_input_broadcast_with_iterators(): - # Only calls input_fn in host 0. - host_device = tpu_host_placement_fn(host_id=0) - enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( - generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn, - self._inputs_structure_recorder, - num_hosts)) - if dataset_initializer: - all_dataset_initializers.append(dataset_initializer) - run_infeed_loop_on_coordinator = False - wrap_fn = ( - _wrap_computation_in_while_loop - if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else - _wrap_computation_in_while_loop_with_stopping_signals) - enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) - else: - enqueue_ops.append(enqueue_ops_fn()) - infeed_queues.append(captured_infeed_queue.get()) - else: - for host_id in range(num_hosts): - host_device = tpu_host_placement_fn(host_id=host_id) - with ops.device(host_device): - with ops.name_scope('input_pipeline_task%d' % (host_id)): - if self._ctx.is_input_per_host_with_iterators(): - enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( - generate_per_host_v2_enqueue_ops_fn_for_host( - self._ctx, self._input_fn, - self._inputs_structure_recorder, host_device, host_id)) - else: - enqueue_ops_fn, captured_infeed_queue, dataset_initializer = ( - generate_per_host_enqueue_ops_fn_for_host( - self._ctx, self._input_fn, - self._inputs_structure_recorder, self._batch_axis, - host_device, host_id)) - - # NOTE(xiejw): We dispatch here based on the return type of the - # users `input_fn`. - # - # 1. If input_fn returns a Dataset instance, we initialize the - # iterator outside of tf.while_loop, and call the iterator.get_next - # inside tf.while_loop. This should be always safe. - # - # 2. If input_fn returns (features, labels), it is too late to wrap - # them inside tf.while_loop, as resource initialization cannot be - # handled in TF control flow properly. In this case, we will use - # python loop to enqueue the data into TPU system. This may be - # slow compared to the previous case. - if dataset_initializer: - all_dataset_initializers.append(dataset_initializer) - run_infeed_loop_on_coordinator = False - wrap_fn = ( - _wrap_computation_in_while_loop - if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else - _wrap_computation_in_while_loop_with_stopping_signals) - enqueue_ops.append( - wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) - else: - enqueue_ops.append(enqueue_ops_fn()) - infeed_queues.append(captured_infeed_queue.get()) - # infeed_queue is used to generate dequeue ops. The only thing it uses for - # dequeue is dtypes and types. So, any one can be used. Here, grab the - # first one. - self._infeed_queue = infeed_queues[0] - return enqueue_ops, [ - util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers) - ], run_infeed_loop_on_coordinator - - def _validate_input_pipeline(self): - """Validates the input pipeline. - - Perform some sanity checks to log user friendly information. We should - error out to give users better error message. But, if - _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break - user code, so, log a warning. - - Raises: - RuntimeError: If the validation failed. - """ - if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS): - err_msg = ('Input pipeline contains one or more QueueRunners. ' - 'It could be slow and not scalable. Please consider ' - 'converting your input pipeline to use `tf.data` instead (see ' - 'https://www.tensorflow.org/guide/datasets for ' - 'instructions.') - if _WRAP_INPUT_FN_INTO_WHILE_LOOP: - raise RuntimeError(err_msg) - else: - logging.warn(err_msg) - - -class _ModelFnWrapper(object): - """A `model_fn` wrapper. - - This makes calling model_fn on CPU and TPU easier and more consistent and - performs necessary check and mutation required by TPU training and evaluation. - - In addition, this wrapper manages converting the `model_fn` to a single TPU - train and eval step. - """ - - def __init__(self, model_fn, train_cache_fn, eval_cache_fn, config, params, ctx): - self._model_fn = model_fn - self._train_cache_fn = train_cache_fn - self._eval_cache_fn = eval_cache_fn - self._config = config - self._params = params - self._ctx = ctx - - def call_without_tpu(self, features, labels, is_export_mode): - return self._call_model_fn(features, labels, is_export_mode=is_export_mode) - - def convert_to_single_tpu_train_step(self, dequeue_fn): - """Converts user provided model_fn` as a single train step on TPU. - - The user provided `model_fn` takes input tuple - (features, labels) and produces the EstimatorSpec with train_op and loss for - train `mode`. This usually represents a single train computation on CPU. - - For TPU training, a train (computation) step is first wrapped in a - tf.while_loop control flow to repeat for many times and then replicated to - all TPU shards. Besides the input should be taken from TPU infeed rather - than input pipeline (input_fn) directly. To fit TPU loop and replicate - pattern, the original train computation should be reformed, which is the - returned `train_step`. - - Args: - dequeue_fn: The function to retrieve inputs, features and labels, from TPU - infeed dequeue channel. - - Returns: - A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn - representing the train step for TPU. - """ - - host_call = _OutfeedHostCall(self._ctx) - captured_scaffold_fn = _CapturedObject() - captured_training_hooks = _CapturedObject() - - def train_step(loss, *cache): - """Training step function for use inside a while loop.""" - del loss # unused; required in function signature. - inputs = dequeue_fn() - features, labels = inputs.features_and_labels() - - # Consume the current cache - estimator_spec = self._verify_estimator_spec( - self._call_model_fn(features, labels, cache=cache)) - - # Retrieve the new returned cache - """ - `cache` consists of a list of tensors, potentially empty (of length 0) - """ - cache = estimator_spec.cache - loss, train_op = estimator_spec.loss, estimator_spec.train_op - - if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - captured_scaffold_fn.capture(estimator_spec.scaffold_fn) - else: - captured_scaffold_fn.capture(None) - - captured_training_hooks.capture(estimator_spec.training_hooks) - - tracing_ops = [] - if tensor_tracer.TensorTracer.is_enabled(): - tt = tensor_tracer.TensorTracer() - loss, tracing_ops = tt.trace_tpu(ops.get_default_graph(), loss, - self._ctx.num_replicas) - - # We must run train_op to update the variables prior to running the - # outfeed. - with ops.control_dependencies([train_op]+tracing_ops): - host_call_outfeed_ops = [] - if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access - and estimator_spec.host_call is not None): - host_call.record({'host_call': estimator_spec.host_call}) - host_call_outfeed_ops = host_call.create_enqueue_op() - with ops.control_dependencies(host_call_outfeed_ops): - return [array_ops.identity(loss)] + cache - - return (train_step, host_call, captured_scaffold_fn, - captured_training_hooks) - - def convert_to_single_tpu_eval_step(self, dequeue_fn): - """Converts user provided model_fn` as a single eval step on TPU. - - Similar to training, the user provided `model_fn` takes input tuple - (features, labels) and produces the TPUEstimatorSpec with eval_metrics for - eval `mode`. This usually represents a single evaluation computation on CPU. - - For TPU evaluation, a eval (computation) step is first wrapped in a - tf.while_loop control flow to repeat for many times and then replicated to - all TPU shards. Besides the input and output are slightly different. Input, - features and labels, should be taken from TPU infeed rather than input - pipeline (input_fn) directly. Output is managed in two stages. First, the - model outputs as the result of evaluation computation, usually model logits, - should be transferred from TPU system to CPU. Then, all model outputs are - concatenated first on CPU and sent to the metric_fn for metrics computation. - To fit TPU evaluation pattern, the original eval computation should be - reformed, which is the returned `eval_step`. - - Args: - dequeue_fn: The function to retrieve inputs, features and labels, from TPU - infeed dequeue channel. - - Returns: - A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn - representing the eval step for TPU. - """ - host_calls = _OutfeedHostCall(self._ctx) - captured_scaffold_fn = _CapturedObject() - captured_eval_hooks = _CapturedObject() - - def eval_step(total_loss, *cache): - """Evaluation step function for use inside a while loop.""" - inputs = dequeue_fn() - features, labels = inputs.features_and_labels() - - # Consume the current cache - tpu_estimator_spec = self._call_model_fn(features, labels, cache=cache) - if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - raise RuntimeError( - 'estimator_spec used by TPU evaluation must have type' - '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) - - # Retrieve the new returned cache - cache = tpu_estimator_spec.cache - loss = tpu_estimator_spec.loss - - captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) - captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks) - - to_record = {} - if tpu_estimator_spec.eval_metrics: - to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics - if tpu_estimator_spec.host_call is not None: - # We assume that evaluate won't update global step, so we don't wrap - # this host_call. - to_record['host_call'] = tpu_estimator_spec.host_call - host_calls.record(to_record) - - with ops.control_dependencies(host_calls.create_enqueue_op()): - return [math_ops.add(total_loss, loss)] + cache - - return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks - - def convert_to_single_tpu_predict_step(self, dequeue_fn): - """Converts user provided model_fn` as a single predict step on TPU. - - Args: - dequeue_fn: The function to retrieve inputs, features and labels, from TPU - infeed dequeue channel. - - Returns: - A tuple of predict_fn, host_calls, and captured scaffold_fn. The - predict_fn representing the predict step for TPU. - """ - host_calls = _OutfeedHostCall(self._ctx) - captured_scaffold_fn = _CapturedObject() - captured_predict_hooks = _CapturedObject() - - def predict_step(unused_scalar_stopping_signal): - """Evaluation step function for use inside a while loop.""" - inputs = dequeue_fn() - features, labels = inputs.features_and_labels() - stopping_signals = inputs.signals() - - assert stopping_signals is not None, ( - 'Internal Error: `signals` is missing.') - - tpu_estimator_spec = self._call_model_fn( - features, labels, is_export_mode=False) - if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - raise RuntimeError( - 'estimator_spec used by TPU prediction must have type' - '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) - - self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions) - - captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) - captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks) - to_record = {} - identity_fn = lambda **kwargs: kwargs - to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions] - to_record['signals'] = [identity_fn, stopping_signals] - if tpu_estimator_spec.host_call is not None: - to_record['host_call'] = tpu_estimator_spec.host_call - host_calls.record(to_record) - - with ops.control_dependencies(host_calls.create_enqueue_op()): - return _StopSignals.as_scalar_stopping_signal(stopping_signals) - - return (predict_step, host_calls, captured_scaffold_fn, - captured_predict_hooks) - - def _verify_tpu_spec_predictions(self, predictions): - """Validates TPUEstimatorSpec.predictions dict.""" - # TODO(xiejw): Adds validation for prediction dictionrary. - # TODO(xiejw): Adds support for single tensor as predictions. - if not isinstance(predictions, dict): - raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.') - - for (key, tensor) in predictions.items(): - if tensor.shape.dims[0].value is None: - raise ValueError( - 'The tensor with key ({}) in TPUEstimatorSpec.predictions has ' - 'dynamic shape (should be static). Tensor: {}'.format(key, tensor)) - return predictions - - def _validate_model_features_and_labels(self, features, labels, - is_export_mode): - """Validates that the features and labels for the model function are valid. - - A valid features/labels object is the one with: - - Type: A tensor or any nested structure of tensors supported by TF nest, - namely nested dictionary, tuple, namedtuple, or sequence of tensors. - - Static shape if is_export_mode is False. - - Args: - features: the features that would be input to the model function. - labels: the labels that would be input to the model function. - is_export_mode: boolean value specifying if in export mode. - - Raises: - TypeError: If features/labels are not of the correct type. - ValueError: If features/labels have dynamic shape. - """ - - def validate(obj, obj_name): - """Helper validate function.""" - if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode): - return - if isinstance(obj, ops.Tensor): - if not obj.get_shape().is_fully_defined(): - raise ValueError( - 'The {} to the model returned by input_fn must have static shape.' - ' Tensor: {}'.format(obj_name, obj)) - else: - for tensor in data_nest.flatten(obj): - if not tensor.get_shape().is_fully_defined(): - raise ValueError( - ('The {} to the model returned by input_fn must have static ' - 'shape. Tensor: {}').format(obj_name, tensor)) - - validate(features, 'features') - if labels is not None: - validate(labels, 'labels') - - def _call_model_fn(self, features, labels, cache=None, is_export_mode=False): - """Calls the model_fn with required parameters.""" - self._validate_model_features_and_labels(features, labels, is_export_mode) - model_fn_args = function_utils.fn_args(self._model_fn) - kwargs = {} - - # Makes deep copy with `config` and params` in case user mutates them. - config = copy.deepcopy(self._config) - params = copy.deepcopy(self._params) - - if 'labels' in model_fn_args: - kwargs['labels'] = labels - elif labels is not None: - raise ValueError( - 'model_fn does not take labels, but input_fn returns labels.') - if 'mode' in model_fn_args: - kwargs['mode'] = self._ctx.mode - if 'config' in model_fn_args: - kwargs['config'] = config - if 'params' in model_fn_args: - kwargs['params'] = params - - if cache is not None: - params['cache'] = cache - - if 'params' not in model_fn_args: - raise ValueError('model_fn ({}) does not include params argument, ' - 'required by TPUEstimator to pass batch size as ' - 'params[\'batch_size\']'.format(self._model_fn)) - - if is_export_mode: - batch_size_for_model_fn = None - else: - batch_size_for_model_fn = self._ctx.batch_size_for_model_fn - - if batch_size_for_model_fn is not None: - _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn) - - running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode) - _add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu) - - if not running_on_cpu: - user_context = tpu_context.TPUContext( - internal_ctx=self._ctx, call_from_input_fn=False) - _add_item_to_params(params, _CTX_KEY, user_context) - - estimator_spec = self._model_fn(features=features, **kwargs) - if (running_on_cpu and - isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access - # The estimator_spec will be passed to `Estimator` directly, which expects - # type `EstimatorSpec`. - return estimator_spec.as_estimator_spec() - else: - return estimator_spec - - def _verify_estimator_spec(self, estimator_spec): - """Validates the estimator_spec.""" - if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access - return estimator_spec - - err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.' - if estimator_spec.training_chief_hooks: - raise ValueError( - err_msg.format('training_chief_hooks') + 'If you want' + - ' to pass training hooks, please pass via training_hooks.') - - if estimator_spec.scaffold: - logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. ' - 'Please use TPUEstimatorSpec.') - return estimator_spec - - -class _OutfeedHostCall(object): - """Support for `eval_metrics` and `host_call` in TPUEstimatorSpec.""" - - def __init__(self, ctx): - self._ctx = ctx - self._names = [] - # All of these are dictionaries of lists keyed on the name. - self._host_fns = {} - self._tensor_keys = collections.defaultdict(list) - self._tensors = collections.defaultdict(list) - self._tensor_dtypes = collections.defaultdict(list) - self._tensor_shapes = collections.defaultdict(list) - - @staticmethod - def validate(host_calls): - """Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.""" - - for name, host_call in host_calls.items(): - if not isinstance(host_call, (tuple, list)): - raise ValueError('{} should be tuple or list'.format(name)) - if len(host_call) != 2: - raise ValueError('{} should have two elements.'.format(name)) - if not callable(host_call[0]): - raise TypeError('{}[0] should be callable.'.format(name)) - if not isinstance(host_call[1], (tuple, list, dict)): - raise ValueError('{}[1] should be tuple or list, or dict.'.format(name)) - - if isinstance(host_call[1], (tuple, list)): - fullargspec = tf_inspect.getfullargspec(host_call[0]) - fn_args = function_utils.fn_args(host_call[0]) - # wrapped_hostcall_with_global_step uses varargs, so we allow that. - if fullargspec.varargs is None and len(host_call[1]) != len(fn_args): - raise RuntimeError( - 'In TPUEstimatorSpec.{}, length of tensors {} does not match ' - 'method args of the function, which takes {}.'.format( - name, len(host_call[1]), len(fn_args))) - - @staticmethod - def create_cpu_hostcall(host_calls): - """Runs on the host_call on CPU instead of TPU when use_tpu=False.""" - - _OutfeedHostCall.validate(host_calls) - ret = {} - for name, host_call in host_calls.items(): - host_fn, tensors = host_call - if isinstance(tensors, (tuple, list)): - ret[name] = host_fn(*tensors) - else: - # Must be dict. - try: - ret[name] = host_fn(**tensors) - except TypeError as e: - logging.warning( - 'Exception while calling %s: %s. It is likely the tensors ' - '(%s[1]) do not match the ' - 'function\'s arguments', name, e, name) - raise e - return ret - - def record(self, host_calls): - """Records the host_call structure.""" - - for name, host_call in host_calls.items(): - host_fn, tensor_list_or_dict = host_call - self._names.append(name) - self._host_fns[name] = host_fn - - if isinstance(tensor_list_or_dict, dict): - for (key, tensor) in six.iteritems(tensor_list_or_dict): - self._tensor_keys[name].append(key) - self._tensors[name].append(tensor) - self._tensor_dtypes[name].append(tensor.dtype) - self._tensor_shapes[name].append(tensor.shape) - else: - # List or tuple. - self._tensor_keys[name] = None - for tensor in tensor_list_or_dict: - self._tensors[name].append(tensor) - self._tensor_dtypes[name].append(tensor.dtype) - self._tensor_shapes[name].append(tensor.shape) - - def create_enqueue_op(self): - """Create the op to enqueue the recorded host_calls. - - Returns: - A list of enqueue ops, which is empty if there are no host calls. - """ - if not self._names: - return [] - - tensors = [] - # TODO(jhseu): Consider deduping tensors. - for name in self._names: - tensors.extend(self._tensors[name]) - - with ops.device(tpu.core(0)): - return [tpu_ops.outfeed_enqueue_tuple(tensors)] - - def create_tpu_hostcall(self): - """Sends the tensors through outfeed and runs the host_fn on CPU. - - The tensors are concatenated along dimension 0 to form a global tensor - across all shards. The concatenated function is passed to the host_fn and - executed on the first host. - - Returns: - A dictionary mapping name to the return type of the host_call by that - name. - - Raises: - RuntimeError: If outfeed tensor is scalar. - """ - if not self._names: - return {} - - ret = {} - # For each i, dequeue_ops[i] is a list containing the tensors from all - # shards. This list is concatenated later. - dequeue_ops = [] - tensor_dtypes = [] - tensor_shapes = [] - for name in self._names: - for _ in self._tensors[name]: - dequeue_ops.append([]) - for dtype in self._tensor_dtypes[name]: - tensor_dtypes.append(dtype) - for shape in self._tensor_shapes[name]: - tensor_shapes.append(shape) - - # Outfeed ops execute on each replica's first logical core. Note: we must - # constraint it such that we have at most one outfeed dequeue and enqueue - # per replica. - for i in xrange(self._ctx.num_replicas): - host_device, ordinal_id = self._ctx.device_for_replica(i) - with ops.device(host_device): - outfeed_tensors = tpu_ops.outfeed_dequeue_tuple( - dtypes=tensor_dtypes, - shapes=tensor_shapes, - device_ordinal=ordinal_id) - for j, item in enumerate(outfeed_tensors): - dequeue_ops[j].append(item) - - # Deconstruct dequeue ops. - dequeue_ops_by_name = {} - pos = 0 - for name in self._names: - dequeue_ops_by_name[name] = dequeue_ops[pos:pos + - len(self._tensors[name])] - pos += len(self._tensors[name]) - - # It is assumed evaluation always happens on single host TPU system. So, - # place all ops on tpu host if possible. - # - # TODO(jhseu): Evaluate whether this is right for summaries. - with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)): - for name in self._names: - dequeue_ops = dequeue_ops_by_name[name] - for i, item in enumerate(dequeue_ops): - if dequeue_ops[i][0].shape.ndims == 0: - raise RuntimeError( - 'All tensors outfed from TPU should preserve batch size ' - 'dimension, but got scalar {}'.format(dequeue_ops[i][0])) - # TODO(xiejw): Allow users to specify the axis for batch size - # dimension. - dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0) - - if self._tensor_keys[name] is not None: - # The user-provided eval_metrics[1] is a dict. - dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops)) - try: - ret[name] = self._host_fns[name](**dequeue_ops) - except TypeError as e: - logging.warning( - 'Exception while calling %s: %s. It is likely the tensors ' - '(%s[1]) do not match the ' - 'function\'s arguments', name, e, name) - raise e - else: - ret[name] = self._host_fns[name](*dequeue_ops) - - return ret - - -class _OutfeedHostCallHook(session_run_hook.SessionRunHook): - """Hook to run host calls when use_tpu=False.""" - - def __init__(self, tensors): - self._tensors = tensors - - def begin(self): - # We duplicate this code from the TPUInfeedOutfeedSessionHook rather than - # create a separate hook to guarantee execution order, because summaries - # need to be initialized before the outfeed thread starts. - # TODO(jhseu): Make a wrapper hook instead? - self._init_ops = contrib_summary.summary_writer_initializer_op() - # Get all the writer resources from the initializer, so we know what to - # flush. - self._finalize_ops = [] - for op in self._init_ops: - self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) - - def after_create_session(self, session, coord): - session.run(self._init_ops) - - def before_run(self, run_context): - return basic_session_run_hooks.SessionRunArgs(self._tensors) - - def end(self, session): - session.run(self._finalize_ops) - - -class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook): - """Calculate and report global_step/sec and examples/sec during runtime.""" - - def __init__(self, - batch_size, - every_n_steps=100, - every_n_secs=None, - output_dir=None, - summary_writer=None): - self._batch_size = batch_size - super(ExamplesPerSecondHook, self).__init__( - every_n_steps=every_n_steps, - every_n_secs=every_n_secs, - output_dir=output_dir, - summary_writer=summary_writer) - - def _log_and_record(self, elapsed_steps, elapsed_time, global_step): - global_step_per_sec = elapsed_steps / elapsed_time - examples_per_sec = self._batch_size * global_step_per_sec - if self._summary_writer is not None: - global_step_summary = Summary(value=[ - Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec) - ]) - example_summary = Summary(value=[ - Summary.Value(tag='examples/sec', simple_value=examples_per_sec) - ]) - self._summary_writer.add_summary(global_step_summary, global_step) - self._summary_writer.add_summary(example_summary, global_step) - logging.info('global_step/sec: %g', global_step_per_sec) - logging.info('examples/sec: %g', examples_per_sec) - - -class InstallSignalHandlerHook(session_run_hook.SessionRunHook): - """Change SIGINT (CTRL^C) handler to force quit the process. - - The default behavior often results in hanging processes. - The original handler is restored after training/evaluation. - """ - - def __init__(self): - self._signal_fn = signal.getsignal(signal.SIGINT) - - def before_run(self, run_context): - signal.signal(signal.SIGINT, signal.SIG_DFL) - - def end(self, session): - signal.signal(signal.SIGINT, self._signal_fn) - - -class TPUEstimator(estimator_lib.Estimator): - """Estimator with TPU support. - - TPUEstimator also supports training on CPU and GPU. You don't need to define - a separate `tf.estimator.Estimator`. - - TPUEstimator handles many of the details of running on TPU devices, such as - replicating inputs and models for each core, and returning to host - periodically to run hooks. - - TPUEstimator transforms a global batch size in params to a per-shard batch - size when calling the `input_fn` and `model_fn`. Users should specify - global batch size in constructor, and then get the batch size for each shard - in `input_fn` and `model_fn` by `params['batch_size']`. - - - For training, `model_fn` gets per-core batch size; `input_fn` may get - per-core or per-host batch size depending on `per_host_input_for_training` - in `TPUConfig` (See docstring for TPUConfig for details). - - - For evaluation and prediction, `model_fn` gets per-core batch size and - `input_fn` get per-host batch size. - - Evaluation - ========== - - `model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics` - for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return - `EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case - the following discussion on TPU evaluation does not apply. - - `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where - `tensors` could be a list of any nested structure of `Tensor`s (See - `TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns - a dict from metric string name to the result of calling a metric function, - namely a `(metric_tensor, update_op)` tuple. - - One can set `use_tpu` to `False` for testing. All training, evaluation, and - predict will be executed on CPU. `input_fn` and `model_fn` will receive - `train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`. - - Current limitations: - -------------------- - - 1. TPU evaluation only works on a single host (one TPU worker) except - BROADCAST mode. - - 2. `input_fn` for evaluation should **NOT** raise an end-of-input exception - (`OutOfRangeError` or `StopIteration`). And all evaluation steps and all - batches should have the same size. - - Example (MNIST): - ---------------- - - ``` - # The metric Fn which runs on CPU. - def metric_fn(labels, logits): - predictions = tf.argmax(logits, 1) - return { - 'accuracy': tf.metrics.precision( - labels=labels, predictions=predictions), - } - - # Your model Fn which runs on TPU (eval_metrics is list in this example) - def model_fn(features, labels, mode, config, params): - ... - logits = ... - - if mode = tf.estimator.ModeKeys.EVAL: - return tpu_estimator.TPUEstimatorSpec( - mode=mode, - loss=loss, - eval_metrics=(metric_fn, [labels, logits])) - - # or specify the eval_metrics tensors as dict. - def model_fn(features, labels, mode, config, params): - ... - final_layer_output = ... - - if mode = tf.estimator.ModeKeys.EVAL: - return tpu_estimator.TPUEstimatorSpec( - mode=mode, - loss=loss, - eval_metrics=(metric_fn, { - 'labels': labels, - 'logits': final_layer_output, - })) - ``` - - Prediction - ========== - - Prediction on TPU is an experimental feature to support large batch inference. - It is not designed for latency-critical system. In addition, due to some - usability issues, for prediction with small dataset, CPU `.predict`, i.e., - creating a new `TPUEstimator` instance with `use_tpu=False`, might be more - convenient. - - Note: In contrast to TPU training/evaluation, the `input_fn` for prediction - *should* raise an end-of-input exception (`OutOfRangeError` or - `StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be - precise, the ops created by `input_fn` produce one batch of the data. - The `predict()` API processes one batch at a time. When reaching the end of - the data source, an end-of-input exception should be raised by one of these - operations. The user usually does not need to do this manually. As long as the - dataset is not repeated forever, the `tf.data` API will raise an end-of-input - exception automatically after the last batch has been produced. - - Note: Estimator.predict returns a Python generator. Please consume all the - data from the generator so that TPUEstimator can shutdown the TPU system - properly for user. - - Current limitations: - -------------------- - 1. TPU prediction only works on a single host (one TPU worker). - - 2. `input_fn` must return a `Dataset` instance rather than `features`. In - fact, .train() and .evaluate() also support Dataset as return value. - - Example (MNIST): - ---------------- - ``` - height = 32 - width = 32 - total_examples = 100 - - def predict_input_fn(params): - batch_size = params['batch_size'] - - images = tf.random_uniform( - [total_examples, height, width, 3], minval=-1, maxval=1) - - dataset = tf.data.Dataset.from_tensor_slices(images) - dataset = dataset.map(lambda images: {'image': images}) - - dataset = dataset.batch(batch_size) - return dataset - - def model_fn(features, labels, params, mode): - # Generate predictions, called 'output', from features['image'] - - if mode == tf.estimator.ModeKeys.PREDICT: - return tf.contrib.tpu.TPUEstimatorSpec( - mode=mode, - predictions={ - 'predictions': output, - 'is_padding': features['is_padding'] - }) - - tpu_est = TPUEstimator( - model_fn=model_fn, - ..., - predict_batch_size=16) - - # Fully consume the generator so that TPUEstimator can shutdown the TPU - # system. - for item in tpu_est.predict(input_fn=input_fn): - # Filter out item if the `is_padding` is 1. - # Process the 'predictions' - ``` - - Exporting - ========= - - `export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`, - and another with `tag_constants.SERVING` and `tag_constants.TPU`. - At serving time, these tags are used to select metagraph to load. - - Before running the graph on TPU, TPU system needs to be initialized. If - TensorFlow Serving model-server is used, this is done automatically. If - not, please call `session.run(tpu.initialize_system())`. - - `tpu.outside_compilation` can be used to wrap TPU incompatible ops in - `model_fn`. - - Example: - ---------------- - - ``` - def model_fn(features, labels, mode, config, params): - ... - logits = ... - export_outputs = { - 'logits': export_output_lib.PredictOutput( - {'logits': logits}) - } - - def host_call(logits): - class_ids = math_ops.argmax(logits) - classes = string_ops.as_string(class_ids) - export_outputs['classes'] = - export_output_lib.ClassificationOutput(classes=classes) - - tpu.outside_compilation(host_call, logits) - - ... - ``` - - """ - - def __init__(self, - model_fn=None, - train_cache_fn=None, - eval_cache_fn=None, - model_dir=None, - config=None, - params=None, - use_tpu=True, - train_batch_size=None, - eval_batch_size=None, - predict_batch_size=None, - batch_axis=None, - eval_on_tpu=True, - export_to_tpu=True, - warm_start_from=None): - """Constructs an `TPUEstimator` instance. - - Args: - model_fn: Model function as required by `Estimator` which returns - EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks', - and `prediction_hooks` must not capure any TPU Tensor inside the - model_fn. - model_dir: Directory to save model parameters, graph and etc. This can - also be used to load checkpoints from the directory into a estimator to - continue training a previously saved model. If `None`, the model_dir in - `config` will be used if set. If both are set, they must be same. If - both are `None`, a temporary directory will be used. - config: An `tpu_config.RunConfig` configuration object. Cannot be `None`. - params: An optional `dict` of hyper parameters that will be passed into - `input_fn` and `model_fn`. Keys are names of parameters, values are - basic python types. There are reserved keys for `TPUEstimator`, - including 'batch_size'. - use_tpu: A bool indicating whether TPU support is enabled. Currently, - - TPU training and evaluation respect this bit, but eval_on_tpu can - override execution of eval. See below. - Predict still happens on CPU. - train_batch_size: An int representing the global training batch size. - TPUEstimator transforms this global batch size to a per-shard batch - size, as params['batch_size'], when calling `input_fn` and `model_fn`. - Cannot be `None` if `use_tpu` is `True`. Must be divisible by total - number of replicas. - eval_batch_size: An int representing evaluation batch size. Must be - divisible by total number of replicas. - predict_batch_size: An int representing the prediction batch size. Must be - divisible by total number of replicas. - batch_axis: A python tuple of int values describing how each tensor - produced by the Estimator `input_fn` should be split across the TPU - compute shards. For example, if your input_fn produced (images, labels) - where the images tensor is in `HWCN` format, your shard dimensions would - be [3, 0], where 3 corresponds to the `N` dimension of your images - Tensor, and 0 corresponds to the dimension along which to split the - labels to match up with the corresponding images. If None is supplied, - and per_host_input_for_training is True, batches will be sharded based - on the major dimension. If tpu_config.per_host_input_for_training is - False or `PER_HOST_V2`, batch_axis is ignored. - eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the - model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`. - export_to_tpu: If True, `export_savedmodel()` exports a metagraph for - serving on TPU besides the one on CPU. - warm_start_from: Optional string filepath to a checkpoint or SavedModel to - warm-start from, or a `tf.estimator.WarmStartSettings` object to fully - configure warm-starting. If the string filepath is provided instead of - a `WarmStartSettings`, then all variables are warm-started, and it is - assumed that vocabularies and Tensor names are unchanged. - - Raises: - ValueError: `params` has reserved keys already. - """ - if config is None or not isinstance(config, tpu_config.RunConfig): - raise ValueError( - '`config` must be provided with type `tpu_config.RunConfig`') - - if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS): - raise ValueError('{} are reserved keys but existed in params {}.'.format( - _RESERVED_PARAMS_KEYS, params)) - - if use_tpu: - # Perform some very basic validations. More validations will be found in - # _InternalTPUContext. - if train_batch_size is None: - raise ValueError('`train_batch_size` cannot be `None`') - util_lib.check_positive_integer(train_batch_size, 'train_batch_size') - - if (config.tpu_config.per_host_input_for_training is - tpu_config.InputPipelineConfig.PER_SHARD_V1 and - config.tpu_config.num_cores_per_replica): - raise ValueError( - 'Model parallelism only supports per host input for training. ' - 'Please adjust TPURunconfig.per_host_input_for_training.') - - if eval_batch_size is not None: - util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size') - - if predict_batch_size is not None: - util_lib.check_positive_integer(predict_batch_size, - 'predict_batch_size') - - # Verifies the model_fn signature according to Estimator framework. - estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access - # We cannot store config and params in this constructor as parent - # constructor might change them, such as assigning a temp dir for - # config.model_dir. - model_function = self._augment_model_fn( - model_fn, - train_cache_fn, - eval_cache_fn, - batch_axis) - - # Overwrite log_step_count_steps to disable TensorLoggingHook and - # StepCounterHook from being created in Estimator. TPUEstimator already - # added equivalent hooks in _augment_model_fn above. - self._log_every_n_steps = config.log_step_count_steps - config = config.replace(log_step_count_steps=None) - - # Passing non-None params as wrapped model_fn has it. - params = params or {} - super(TPUEstimator, self).__init__( - model_fn=model_function, - model_dir=model_dir, - config=config, - params=params, - warm_start_from=warm_start_from) - self._iterations_per_training_loop = ( - self._config.tpu_config.iterations_per_loop) - - # All properties passed to _InternalTPUContext are immutable. - # pylint: disable=protected-access - self._ctx = tpu_context._get_tpu_context( - self._config, train_batch_size, eval_batch_size, predict_batch_size, - use_tpu, eval_on_tpu) - - self._export_to_tpu = export_to_tpu - - self._is_input_fn_invoked = None - self._rendezvous = {} - - def _add_meta_graph_for_mode(self, - builder, - input_receiver_fn_map, - checkpoint_path, - save_variables=True, - mode=model_fn_lib.ModeKeys.PREDICT, - export_tags=None, - check_variables=True): - if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT: - raise NotImplementedError( - 'TPUEstimator only handles mode PREDICT for exporting ' - 'when `export_to_tpu` is `True`; ' - 'got {}.'.format(mode)) - - (super(TPUEstimator, self)._add_meta_graph_for_mode( - builder, - input_receiver_fn_map, - checkpoint_path, - save_variables, - mode=mode, - export_tags=export_tags, - check_variables=check_variables)) - - if self._export_to_tpu: - input_receiver_fn_map = { - _REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode] - } - export_tags = [tag_constants.SERVING, tag_constants.TPU] - mode = _REWRITE_FOR_INFERENCE_MODE - # See b/110052256 for why `check_variables` is `False`. - (super(TPUEstimator, self)._add_meta_graph_for_mode( - builder, - input_receiver_fn_map, - checkpoint_path, - save_variables=False, - mode=mode, - export_tags=export_tags, - check_variables=False)) - - def _call_model_fn(self, features, labels, mode, config): - if mode == _REWRITE_FOR_INFERENCE_MODE: - return self._call_model_fn_for_inference(features, labels, mode, config) - else: - return super(TPUEstimator, self)._call_model_fn(features, labels, mode, - config) - - def _call_model_fn_for_inference(self, features, labels, mode, config): - """Wraps `_call_model_fn` for `export_savedmodel`.""" - if mode != _REWRITE_FOR_INFERENCE_MODE: - raise ValueError('mode must be {}; ' - 'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode)) - - capture = _CapturedObject() - - def computation(): - """Compute tpu tensors used in export_outputs. - - Passed to rewrite_for_inference so that model_fn will be called under - the rewriting contexts. Only tpu tensors are returned, but export_outputs - and scaffold are captured. - - Returns: - A list of Tensors used in export_outputs and not marked for - outside_compilation. - """ - # We should only call model fn once and it should be inside `computation` - # so that building the graph will happen under `rewrite_for_inference`. - mode = model_fn_lib.ModeKeys.PREDICT - estimator_spec = self._call_model_fn(features, labels, mode, config) - - # We pick the TPU tensors out from `export_output` and later return them - # from `computation` for rewriting. - tensors_dict = collections.OrderedDict( - (k, _export_output_to_tensors(v)) - for k, v in six.iteritems(estimator_spec.export_outputs)) - tensors = nest.flatten(tensors_dict) - tpu_tensors = [t for t in tensors if t is not None] - - # We cannot return anything other than `tpu_tensors` here so we capture - # the rest for later use. - capture.capture((estimator_spec, tensors_dict, tensors)) - return tpu_tensors - - tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation) - estimator_spec, tensors_dict, tensors = capture.get() - - # Reconstruct `tensors`, but with `tpu_tensors` replaced with - # `tpu_tensors_on_cpu`. - new_tensors = [] - for t in tensors: - if t is None: - new_tensors.append(None) - else: - new_tensors.append(tpu_tensors_on_cpu.pop(0)) - - # Reconstruct `tensors_dict`. - new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors) - # Reconstruct `export_outputs`. - export_outputs = estimator_spec.export_outputs - new_export_outputs = collections.OrderedDict( - (k, _clone_export_output_with_tensors(export_outputs[k], v)) - for k, v in six.iteritems(new_tensors_dict)) - - return estimator_spec._replace(export_outputs=new_export_outputs) - - def _create_global_step(self, graph): - """Creates a global step suitable for TPUs. - - Args: - graph: The graph in which to create the global step. - - Returns: - A global step `Tensor`. - - Raises: - ValueError: if the global step tensor is already defined. - """ - return _create_global_step(graph) - - def _convert_train_steps_to_hooks(self, steps, max_steps): - with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx: - if ctx.is_running_on_cpu(): - return super(TPUEstimator, self)._convert_train_steps_to_hooks( - steps, max_steps) - - # On TPU. - if steps is None and max_steps is None: - raise ValueError( - 'For TPU training, one of `steps` or `max_steps` must be set. ' - 'Cannot be both `None`.') - - # Estimator.train has explicit positiveness check. - if steps is not None: - util_lib.check_positive_integer(steps, 'Train steps') - if max_steps is not None: - util_lib.check_positive_integer(max_steps, 'Train max_steps') - - return [ - _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps) - ] - - def _convert_eval_steps_to_hooks(self, steps): - with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx: - if ctx.is_running_on_cpu(): - return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps) - - if steps is None: - raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.') - - util_lib.check_positive_integer(steps, 'Eval steps') - - return [ - evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access - num_evals=steps), - _SetEvalIterationsHook(steps) - ] - - def _call_input_fn(self, input_fn, mode): - """Calls the input function. - - Args: - input_fn: The input function. - mode: ModeKeys - - Returns: - In TPU mode, returns an input_fn to be called later in model_fn. - Otherwise, calls the input_fn and returns either fatures or - (features, labels). - - Raises: - ValueError: if input_fn takes invalid arguments or does not have `params`. - """ - input_fn_args = function_utils.fn_args(input_fn) - config = self.config # a deep copy. - kwargs = {} - if 'params' in input_fn_args: - kwargs['params'] = self.params # a deep copy. - else: - raise ValueError('input_fn ({}) does not include params argument, ' - 'required by TPUEstimator to pass batch size as ' - 'params["batch_size"]'.format(input_fn)) - if 'config' in input_fn_args: - kwargs['config'] = config - - if 'mode' in input_fn_args: - kwargs['mode'] = mode - - # Records the fact input_fn has been invoked. - self._is_input_fn_invoked = True - - with self._ctx.with_mode(mode) as ctx: - # Setting the batch size in params first. This helps user to have same - # input_fn for use_tpu=True/False. - batch_size_for_input_fn = ctx.batch_size_for_input_fn - if batch_size_for_input_fn is not None: - _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY, - batch_size_for_input_fn) - - # For export_savedmodel, input_fn is never passed to Estimator. So, - # `is_export_mode` must be False. - if ctx.is_running_on_cpu(is_export_mode=False): - with ops.device('/device:CPU:0'): - return input_fn(**kwargs) - - # For TPU computation, input_fn should be invoked in a tf.while_loop for - # performance. While constructing the tf.while_loop, the structure of - # inputs returned by the `input_fn` needs to be recorded. The structure - # includes whether features or labels is dict or single Tensor, dict keys, - # tensor shapes, and dtypes. The recorded structure is used to create the - # infeed dequeue ops, which must be wrapped and passed as a Fn, called - # inside the TPU computation, as the TPU computation is wrapped inside a - # tf.while_loop also. So, we either pass input_fn to model_fn or pass - # dequeue_fn to model_fn. Here, `input_fn` is passed directly as - # `features` in `model_fn` signature. - def _input_fn(ctx): - _add_item_to_params(kwargs['params'], _CTX_KEY, ctx) - return input_fn(**kwargs) - - return _input_fn - - def _validate_features_in_predict_input(self, result): - """Skip the validation. - - For TPUEstimator, we do not need to check the result type. `_InputPipeline` - has stronger check. Parent class's check generates confusing warning msg. - - Args: - result: `features` returned by input_fn. - """ - pass - - def train(self, - input_fn, - hooks=None, - steps=None, - max_steps=None, - saving_listeners=None): - rendezvous = error_handling.ErrorRendezvous(num_sources=3) - self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous - try: - return super(TPUEstimator, self).train( - input_fn=input_fn, - hooks=hooks, - steps=steps, - max_steps=max_steps, - saving_listeners=saving_listeners) - except Exception: # pylint: disable=broad-except - rendezvous.record_error('training_loop', sys.exc_info()) - finally: - rendezvous.record_done('training_loop') - rendezvous.raise_errors() - - def evaluate(self, - input_fn, - steps=None, - hooks=None, - checkpoint_path=None, - name=None): - rendezvous = error_handling.ErrorRendezvous(num_sources=3) - self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous - try: - return super(TPUEstimator, self).evaluate( - input_fn, - steps=steps, - hooks=hooks, - checkpoint_path=checkpoint_path, - name=name) - except Exception: # pylint: disable=broad-except - rendezvous.record_error('evaluation_loop', sys.exc_info()) - finally: - rendezvous.record_done('evaluation_loop') - rendezvous.raise_errors() - - def predict(self, - input_fn, - predict_keys=None, - hooks=None, - checkpoint_path=None, - yield_single_examples=True): - rendezvous = error_handling.ErrorRendezvous(num_sources=3) - self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous - try: - for result in super(TPUEstimator, self).predict( - input_fn=input_fn, - predict_keys=predict_keys, - hooks=hooks, - checkpoint_path=checkpoint_path, - yield_single_examples=yield_single_examples): - yield result - except Exception: # pylint: disable=broad-except - rendezvous.record_error('prediction_loop', sys.exc_info()) - finally: - rendezvous.record_done('prediction_loop') - rendezvous.raise_errors() - - rendezvous.record_done('prediction_loop') - rendezvous.raise_errors() - - def _augment_model_fn(self, model_fn, train_cache_fn, eval_cache_fn, batch_axis): - """Returns a new model_fn, which wraps the TPU support.""" - - def _model_fn(features, labels, mode, config, params): - """A Estimator `model_fn` for TPUEstimator.""" - with self._ctx.with_mode(mode) as ctx: - model_fn_wrapper = _ModelFnWrapper(model_fn, train_cache_fn, - eval_cache_fn, config, params, ctx) - - # `input_fn` is called in `train()`, `evaluate()`, and `predict()`, - # but not in `export_savedmodel()`. - if self._is_input_fn_invoked: - is_export_mode = False - else: - is_export_mode = True - - # Clear the bit. - self._is_input_fn_invoked = None - - # examples_hook is added to training_hooks for both CPU and TPU - # execution. - if self._log_every_n_steps is not None: - examples_hook = ExamplesPerSecondHook( - ctx.global_batch_size, - output_dir=self.model_dir, - every_n_steps=self._log_every_n_steps) - - if ctx.is_running_on_cpu(is_export_mode=is_export_mode): - logging.info('Running %s on CPU', mode) - estimator_spec = model_fn_wrapper.call_without_tpu( - features, labels, is_export_mode=is_export_mode) - if self._log_every_n_steps is not None: - estimator_spec = estimator_spec._replace( - training_hooks=estimator_spec.training_hooks + (examples_hook,)) - return estimator_spec - - assert labels is None, '`labels` passed to `model_fn` must be `None`.' - # TPUEstimator._call_input_fn passes `input_fn` as features to here. - assert callable(features), '`input_fn` is not callable.' - input_fn = features - - input_holders = _InputPipeline(input_fn, batch_axis, ctx) - enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( - input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) - - graph = ops.get_default_graph() - for enqueue_op in enqueue_ops: - if isinstance(enqueue_op, list): - graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) - else: - graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) - - if mode == model_fn_lib.ModeKeys.TRAIN: - compile_op, loss, host_call, scaffold, training_hooks = ( - _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) - host_ops = host_call.create_tpu_hostcall() - if host_ops is None: - host_ops = [] - - shutdown_hooks = [] - shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE', - 'shutdown_worker') - if shutdown_mode: - if shutdown_mode == 'shutdown_worker': - finalizer_hooks = [ - session_support.ShutdownLameWorkers(timeout_ms=60 * 1000), - ] - elif shutdown_mode == 'shutdown_computation': - finalizer_hooks = [ - session_support.RestartComputation(timeout_ms=60 * 1000), - ] - else: - raise ValueError( - 'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode) - - shutdown_hooks.append( - session_support.GracefulShutdownHook( - checkpoint_prefix=self.model_dir + '/model.ckpt', - on_shutdown_hooks=finalizer_hooks)) - - with ops.control_dependencies([loss]): - global_step = array_ops.identity(training.get_global_step()) - hooks = input_hooks + shutdown_hooks - hooks.extend([ - TPUInfeedOutfeedSessionHook( - ctx, - enqueue_ops, - host_ops, - tpu_compile_op=compile_op, - run_infeed_loop_on_coordinator=( - run_infeed_loop_on_coordinator), - rendezvous=self._rendezvous[mode], - master=self._config.master, - session_config=self._session_config, - ), - InstallSignalHandlerHook() - ]) - if self._log_every_n_steps is not None: - logging_hook_frequency = ( # Divide and round up - (self._log_every_n_steps + - self._config.tpu_config.iterations_per_loop - 1) // - self._config.tpu_config.iterations_per_loop) - hooks.append( - training.LoggingTensorHook({ - 'loss': array_ops.identity(loss), - 'step': global_step, - }, - every_n_iter=logging_hook_frequency)) - examples_hook._set_steps_per_run( # pylint: disable=protected-access - self._config.tpu_config.iterations_per_loop) - hooks.append(examples_hook) - - if training_hooks: - hooks.extend(training_hooks) - - chief_hooks = [] - if (self._config.save_checkpoints_secs or - self._config.save_checkpoints_steps): - checkpoint_hook = training.CheckpointSaverHook( - self.model_dir, - save_secs=self._config.save_checkpoints_secs, - save_steps=self._config.save_checkpoints_steps, - scaffold=scaffold) - checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access - self._config.tpu_config.iterations_per_loop) - chief_hooks.append(checkpoint_hook) - - summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) - with ops.control_dependencies([loss]): - update_ops = _sync_variables_ops(ctx) - - # Validate the TPU training graph to catch basic errors - _validate_tpu_training_graph() - - train_op = control_flow_ops.group(*update_ops) - graph.add_to_collection(_TPU_TRAIN_OP, train_op) - - return model_fn_lib.EstimatorSpec( - mode, - loss=loss, - training_chief_hooks=chief_hooks, - training_hooks=hooks, - train_op=train_op, - scaffold=scaffold) - - if mode == model_fn_lib.ModeKeys.EVAL: - compile_op, total_loss, host_calls, scaffold, eval_hooks = ( - _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) - iterations_per_loop_var = _create_or_get_iterations_per_loop() - mean_loss = math_ops.div( - total_loss, - math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype)) - - with ops.control_dependencies([mean_loss]): - # After TPU evaluation computation is done (the mean_loss tensor), - # reads all variables back from TPU and updates the eval step - # counter properly - internal_ops_to_run = _sync_variables_ops(ctx) - internal_ops_to_run.append( - _increase_eval_step_op(iterations_per_loop_var)) - - host_call_ret = host_calls.create_tpu_hostcall() - eval_metric_ops = {} - eval_update_ops = [] - - eval_metrics = host_call_ret.get('eval_metrics', {}) - if eval_metrics: - # Creates a dummy metric update_op for all metrics. Estimator - # expects all metrics in `eval_metric_ops` have update_op and calls - # them one by one. The real metric update_ops are invoked in a - # separated thread. So, here give Estimator the dummy op for all - # metrics. - with ops.control_dependencies(internal_ops_to_run): - dummy_update_op = control_flow_ops.no_op() - - for k, v in eval_metrics.items(): - eval_metric_ops[k] = (v[0], dummy_update_op) - eval_update_ops.append(v[1]) - else: - # If no eval metrics are passed, create an identity node for the - # loss and add `internal_ops_to_run` to its dependencies. So - # `internal_ops_to_run` can be executed. - with ops.control_dependencies(internal_ops_to_run): - mean_loss = array_ops.identity(mean_loss) - - if 'host_call' not in host_call_ret: - host_ops = [] - else: - host_ops = host_call_ret['host_call'] - hooks = [ - TPUInfeedOutfeedSessionHook( - ctx, - enqueue_ops, - eval_update_ops + host_ops, - tpu_compile_op=compile_op, - run_infeed_loop_on_coordinator=( - run_infeed_loop_on_coordinator), - rendezvous=self._rendezvous[mode], - master=self._config.evaluation_master, - session_config=self._session_config, - )] + input_hooks - - if eval_hooks: - hooks.extend(eval_hooks) - - return model_fn_lib.EstimatorSpec( - mode, - loss=mean_loss, - evaluation_hooks=hooks, - eval_metric_ops=eval_metric_ops, - scaffold=scaffold) - - # Predict - assert mode == model_fn_lib.ModeKeys.PREDICT - - (compile_op, dummy_predict_op, host_calls, - scaffold, prediction_hooks) = _predict_on_tpu_system( - ctx, model_fn_wrapper, dequeue_fn) - with ops.control_dependencies([dummy_predict_op]): - internal_ops_to_run = _sync_variables_ops(ctx) - with ops.control_dependencies(internal_ops_to_run): - dummy_predict_op = control_flow_ops.no_op() - - # In train and evaluation, the main TPU program is passed to monitored - # training session to run. Infeed enqueue and outfeed dequeue are - # executed in side threads. This is not the configuration for - # prediction mode. - # - # For prediction, the Estimator executes the EstimatorSpec.predictions - # directly and yield the element (via generator) to call site. So, the - # outfeed based prediction must be passed to MonitoredSession directly. - # Other parts of the TPU execution are organized as follows. - # - # 1. All outfeed based Tensors must be grouped with predictions Tensors - # to form a single invocation. This avoid the issue we might trigger - # multiple outfeeds incorrectly. To achieve this, `host_call` is - # placed in control_dependencies of `stopping_signals`, and - # `stopping_signals` is passed into _StoppingPredictHook, which sets - # the `stopping_signals` as SessionRunArgs. MonitoredSession merges - # all SessionRunArgs with the fetch in session.run together. - # - # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) - # are grouped together. They will be launched once and only once in - # side threads and they quit naturally according to the SAME stopping - # condition. - enqueue_ops.append(dummy_predict_op) - - host_call_ret = host_calls.create_tpu_hostcall() - if 'host_call' not in host_call_ret: - host_ops = [] - else: - host_ops = host_call_ret['host_call'] - - predictions = host_call_ret['predictions'] - _verify_cross_hosts_transfer_size( - predictions, - message=( - 'The estimated size for TPUEstimatorSpec.predictions is too ' - 'large.')) - signals = host_call_ret['signals'] - - with ops.control_dependencies(host_ops): - host_ops = [] # Empty, we do do not need it anymore. - scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( - signals) - predictions = _PaddingSignals.slice_tensor_or_dict( - predictions, signals) - - hooks = [ - _StoppingPredictHook(scalar_stopping_signal), - TPUInfeedOutfeedSessionHookForPrediction( - ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode], - tpu_compile_op=compile_op, - master=self._config.master, - session_config=self._session_config), - ] + input_hooks - - if prediction_hooks: - hooks.extend(prediction_hooks) - - return model_fn_lib.EstimatorSpec( - mode, - prediction_hooks=hooks, - predictions=predictions, - scaffold=scaffold) - - return _model_fn - - -def _export_output_to_tensors(export_output): - """Get a list of `Tensors` used in `export_output`. - - Args: - export_output: an `ExportOutput` object such as `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - - Returns: - a list of tensors used in export_output. - - Raises: - ValueError: if `export_output` is not one of `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - """ - if isinstance(export_output, export_output_lib.ClassificationOutput): - return [export_output.scores, export_output.classes] - elif isinstance(export_output, export_output_lib.RegressionOutput): - return [export_output.value] - elif isinstance(export_output, export_output_lib.PredictOutput): - return list(export_output.outputs.values()) - else: - raise ValueError( - '`export_output` must be have type `ClassificationOutput`, ' - '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) - - -def _clone_export_output_with_tensors(export_output, tensors): - """Clones `export_output` but with new `tensors`. - - Args: - export_output: an `ExportOutput` object such as `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - tensors: a list of `Tensors` used to construct a new `export_output`. - - Returns: - A dict similar to `export_output` but with `tensors`. - - Raises: - ValueError: if `export_output` is not one of `ClassificationOutput`, - `RegressionOutput`, or `PredictOutput`. - """ - if isinstance(export_output, export_output_lib.ClassificationOutput): - if len(tensors) != 2: - raise ValueError('tensors must be of length 2; ' - 'got {}.'.format(len(tensors))) - return export_output_lib.ClassificationOutput(*tensors) - elif isinstance(export_output, export_output_lib.RegressionOutput): - if len(tensors) != 1: - raise ValueError('tensors must be of length 1; ' - 'got {}'.format(len(tensors))) - return export_output_lib.RegressionOutput(*tensors) - elif isinstance(export_output, export_output_lib.PredictOutput): - return export_output_lib.PredictOutput( - dict(zip(export_output.outputs.keys(), tensors))) - else: - raise ValueError( - '`export_output` must be have type `ClassificationOutput`, ' - '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) - - -def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): - """Executes `model_fn_wrapper` multiple times on all TPU shards.""" - iterations_per_loop_var = _create_or_get_iterations_per_loop() - - (single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks - ) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn) - - def multi_tpu_eval_steps_on_single_shard(): - loop_vars = [_ZERO_LOSS] - if model_fn_wrapper._eval_cache_fn is not None: - batch_size = ctx.global_batch_size - num_shards = ctx._config._tpu_config.num_shards - loop_vars += model_fn_wrapper._eval_cache_fn(batch_size // num_shards) - - return training_loop.repeat( - iterations_per_loop_var, - single_tpu_eval_step, - loop_vars) - - compile_op, ret = tpu.split_compile_and_shard( - multi_tpu_eval_steps_on_single_shard, - inputs=[], - num_shards=ctx.num_replicas, - outputs_from_all_shards=False, - device_assignment=ctx.device_assignment) - - loss = ret[0] - scaffold = _get_scaffold(captured_scaffold_fn) - return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get() - - -def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): - """Executes `model_fn_wrapper` multiple times on all TPU shards.""" - iterations_per_loop_var = _create_or_get_iterations_per_loop() - - (single_tpu_train_step, host_call, captured_scaffold_fn, - captured_training_hooks) = ( - model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) - - def multi_tpu_train_steps_on_single_shard(): - loop_vars = [_INITIAL_LOSS] - if model_fn_wrapper._train_cache_fn is not None: - batch_size = ctx.global_batch_size - num_shards = ctx._config._tpu_config.num_shards - loop_vars += model_fn_wrapper._train_cache_fn(batch_size // num_shards) - - return training_loop.repeat( - iterations_per_loop_var, - single_tpu_train_step, - loop_vars) - - compile_op, ret = tpu.split_compile_and_shard( - multi_tpu_train_steps_on_single_shard, - inputs=[], - num_shards=ctx.num_replicas, - outputs_from_all_shards=False, - device_assignment=ctx.device_assignment) - - loss = ret[0] - scaffold = _get_scaffold(captured_scaffold_fn) - return compile_op, loss, host_call, scaffold, captured_training_hooks.get() - - -def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): - """Executes `model_fn_wrapper` multiple times on all TPU shards.""" - (single_tpu_predict_step, host_calls, captured_scaffold_fn, - captured_predict_hooks - ) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn) - - def multi_tpu_predict_steps_on_single_shard(): - - def cond(scalar_stopping_signal): - return math_ops.logical_not( - _StopSignals.should_stop(scalar_stopping_signal)) - - inputs = [_StopSignals.NON_STOPPING_SIGNAL] - outputs = training_loop.while_loop( - cond, single_tpu_predict_step, inputs=inputs, name=b'loop') - return outputs - - (compile_op, dummy_predict_op,) = tpu.split_compile_and_shard( - multi_tpu_predict_steps_on_single_shard, - inputs=[], - num_shards=ctx.num_replicas, - outputs_from_all_shards=False, - device_assignment=ctx.device_assignment) - - dummy_predict_op = dummy_predict_op[0] - scaffold = _get_scaffold(captured_scaffold_fn) - return (compile_op, dummy_predict_op, host_calls, scaffold, - captured_predict_hooks.get()) - - -def _wrap_computation_in_while_loop(device, op_fn): - """Wraps the ops generated by `op_fn` in tf.while_loop.""" - - def computation(i): - with ops.control_dependencies(op_fn()): - return i + 1 - - iterations_per_loop_var = _create_or_get_iterations_per_loop() - # By setting parallel_iterations=1, the parallel execution in while_loop is - # basically turned off. - with ops.device(device): - iterations = array_ops.identity(iterations_per_loop_var) - return control_flow_ops.while_loop( - lambda i: i < iterations, - computation, [constant_op.constant(0)], - parallel_iterations=1) - - -def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): - """Wraps the ops generated by `op_fn` in tf.while_loop.""" - - def cond(scalar_stopping_signal): - return math_ops.logical_not( - _StopSignals.should_stop(scalar_stopping_signal)) - - def computation(unused_scalar_stopping_signal): - return_value = op_fn() - execute_ops = return_value['ops'] - signals = return_value['signals'] - with ops.control_dependencies(execute_ops): - return _StopSignals.as_scalar_stopping_signal(signals) - - # By setting parallel_iterations=1, the parallel execution in while_loop is - # basically turned off. - with ops.device(device): - return control_flow_ops.while_loop( - cond, - computation, [_StopSignals.NON_STOPPING_SIGNAL], - parallel_iterations=1) - - -def _validate_tpu_training_graph(): - """Validate graph before running distributed training. - - Raises: - ValueError: If the graph seems invalid for running on device - """ - operations = ops.get_default_graph().get_operations() - - # Check if there is atleast one CrossReplicaSum operation in the graph - # This should be introduced by using the CrossShardOptimizer wrapper - cross_replica_sum_ops = [ - o for o in operations if o.type == _CROSS_REPLICA_SUM_OP - ] - if not cross_replica_sum_ops: - raise ValueError( - 'CrossShardOptimizer must be used for model training on TPUs.') - - -class _CapturedObject(object): - """A placeholder to capture an object. - - This is useful when we need to capture a Python object in the Tensorflow - control flow body function and use it outside the control flow. - """ - - def __init__(self): - self._object = None - self._captured = False - - def capture(self, o): - if self._captured: - raise RuntimeError( - 'InternalError: Object can capture only once. Please file bug.') - - self._captured = True - self._object = o - - def get(self): - if not self._captured: - raise RuntimeError( - 'InternalError: Object is not captured properly before `get`. ' - 'Please file bug.') - return self._object - - -def _get_scaffold(captured_scaffold_fn): - """Retrieves the Scaffold from `captured_scaffold_fn`.""" - with _CapturingContext(message='Inside scaffold_fn'): - scaffold_fn = captured_scaffold_fn.get() - if scaffold_fn: - scaffold = scaffold_fn() - if scaffold is None: - raise ValueError( - 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') - else: - scaffold = None - - if scaffold: - wrapped_finalize = scaffold.finalize - - def _finalize(): - with _CapturingContext('Inside Scaffold.finalize'): - wrapped_finalize() - - scaffold.finalize = _finalize - return scaffold - - -class _CapturingContext(control_flow_ops.ControlFlowContext): - """Tracks references to Tensors defined in TPU replication.""" - - def __init__(self, message): - control_flow_ops.ControlFlowContext.__init__(self) - self._message = message - - def to_control_flow_context_def(self, context_def, export_scope=None): - # pylint: disable=useless-super-delegation - # NOTE(slebedev): the method is required by `ControlFlowContext`. - super(_CapturingContext, self).to_control_flow_context_def( - context_def, export_scope) - - def AddOp(self, op): # pylint: disable=invalid-name - for c in op.inputs: - if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access - raise ValueError('{}: Op {} depends on TPU computation {}, ' - 'which is not allowed.'.format(self._message, op, c)) - - def __enter__(self): - # pylint: disable=protected-access - self._g = ops.get_default_graph() - self._old = self._g._get_control_flow_context() - self._g._set_control_flow_context(self) - # pylint: enable=protected-access - - def __exit__(self, _, __, ___): # pylint: disable=invalid-name - self._g._set_control_flow_context(self._old) # pylint: disable=protected-access - - -class _Inputs(object): - """A data structure representing the input_fn returned values. - - This also supports the returned value from input_fn as `Dataset`. - """ - - def __init__(self, features=None, labels=None, dataset=None, signals=None): - if dataset is not None and (features is not None or labels is not None or - signals is not None): - raise RuntimeError('Internal Error: Either (features and labels) or ' - 'dataset should be provided, not both. Please file ' - 'bug') - - self._features = features - self._labels = labels - self._signals = signals - - self._dataset = dataset - self._iterator = None - - @staticmethod - def from_input_fn(return_values): - """Returns an `_Inputs` instance according to `input_fn` return value.""" - if isinstance(return_values, dataset_ops.DatasetV2): - dataset = return_values - return _Inputs(dataset=dataset) - - features, labels = _Inputs._parse_inputs(return_values) - return _Inputs(features, labels) - - @staticmethod - def _parse_inputs(return_values): - if isinstance(return_values, tuple): - features, labels = return_values - else: - features, labels = return_values, None - return features, labels - - @property - def is_dataset(self): - """Returns True if the return value from input_fn is Dataset.""" - return self._dataset is not None - - def dataset_initializer(self): - """Returns the dataset's initializer. - - The initializer must be run before calling `features_and_labels`. - """ - self._iterator = dataset_ops.make_initializable_iterator(self._dataset) - return self._iterator.initializer - - def features_and_labels(self): - """Gets `features` and `labels`.""" - if self.is_dataset: - if self._iterator is None: - raise RuntimeError('Internal error: Must run dataset_initializer ' - 'before calling features_and_labels(). Please file ' - 'a bug!') - return _Inputs._parse_inputs(self._iterator.get_next()) - - return (self._features, self._labels) - - def signals(self): - return self._signals - - @property - def dataset(self): - return self._dataset - - -class _InputsWithStoppingSignals(_Inputs): - """Inputs with `_StopSignals` inserted into the dataset.""" - - def __init__(self, - dataset, - batch_size, - add_padding=False, - num_invocations_per_step=1): - - assert dataset is not None - user_provided_dataset = dataset.map( - _InputsWithStoppingSignals.insert_stopping_signal( - stop=False, batch_size=batch_size, add_padding=add_padding)) - if num_invocations_per_step == 1: - final_batch_dataset = dataset.take(1).map( - _InputsWithStoppingSignals.insert_stopping_signal( - stop=True, batch_size=batch_size, add_padding=add_padding)) - else: - # We append (2 * num_invocations_per_step - 1) batches for exhausting the - # user_provided_dataset and stop properly. - # For example, if num_invocations_per_step is 2, we append 3 additional - # padding batches: b1, b2, b3. - # If user_provided_dataset contains two batches: a1, a2 - # Step 1: [a1, a2] - # Step 2: [b1, b2] -> STOP - # If user_provided_dataset contains three batches: a1, a2, a3. - # The training loops: - # Step 1: [a1, a2] - # Step 2: [a3, b1] - # Step 3: [b2, b3] -> STOP. - final_batch_dataset = dataset.take(1).map( - _InputsWithStoppingSignals.insert_stopping_signal( - stop=True, batch_size=batch_size, add_padding=add_padding)) - final_batch_dataset = final_batch_dataset.repeat( - 2 * num_invocations_per_step - 1) - - def _set_mask(data_dict): - signals = data_dict['signals'] - signals['padding_mask'] = array_ops.ones_like(signals['padding_mask']) - data_dict['signals'] = signals - return data_dict - - # Mask out the extra batch. - final_batch_dataset = final_batch_dataset.map(_set_mask) - - dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2) - - super(_InputsWithStoppingSignals, self).__init__(dataset=dataset) - self._current_inputs = None - - def features_and_labels(self): - if self._current_inputs is not None: - raise RuntimeError( - 'Internal Error: The previous inputs have not been properly ' - 'consumed. First call features_and_labels, then call signals.') - - inputs_with_signals = self._iterator.get_next() - features = inputs_with_signals['features'] - labels = inputs_with_signals.get('labels') - - self._current_inputs = inputs_with_signals - return features, labels - - def signals(self): - """Returns the `Signals` from `_Inputs`.""" - if self._current_inputs is None: - raise RuntimeError( - 'Internal Error: The current inputs have not been properly ' - 'generated. First call features_and_labels, then call signals.') - signals = self._current_inputs['signals'] - self._current_inputs = None - return signals - - @staticmethod - def insert_stopping_signal(stop, batch_size, add_padding=False): - """Inserts stopping_signal into dataset via _map_fn. - - Here we change the data structure in the dataset, such that the return value - is a dictionary now and `features`, `labels`, and `signals` are three - distinguished keys in that dict. This provides a better structure, which - eases the process to decompose the inputs (see `features_and_labels`). - - Args: - stop: bool, state of current stopping signals. - batch_size: int, batch size. - add_padding: bool, whether to pad the tensor to full batch size. - - Returns: - A map_fn passed to dataset.map API. - """ - - def _map_fn(*args): - """The map fn to insert signals.""" - if len(args) == 1: - # Unpack the single Tensor/dict argument as features. This is required - # for the input_fn returns no labels. - args = args[0] - features, labels = _Inputs._parse_inputs(args) - new_input_dict = {} - - if add_padding: - padding_mask, features, labels = ( - _PaddingSignals.pad_features_and_labels(features, labels, - batch_size)) - - new_input_dict['features'] = features - if labels is not None: - new_input_dict['labels'] = labels - - else: - new_input_dict['features'] = features - if labels is not None: - new_input_dict['labels'] = labels - padding_mask = None - - new_input_dict['signals'] = _StopSignals( - stop=stop, batch_size=batch_size, - padding_mask=padding_mask).as_dict() - - return new_input_dict - - return _map_fn - - -class _StopSignals(object): - """Signals class holding all logic to handle TPU stopping condition.""" - - NON_STOPPING_SIGNAL = False - STOPPING_SIGNAL = True - - def __init__(self, stop, batch_size, padding_mask=None): - self._stop = stop - self._batch_size = batch_size - self._padding_mask = padding_mask - - def as_dict(self): - """Returns the signals as Python dict.""" - shape = [self._batch_size, 1] - dtype = dtypes.bool - - if self._stop: - stopping = array_ops.ones(shape=shape, dtype=dtype) - else: - stopping = array_ops.zeros(shape=shape, dtype=dtype) - - signals = {'stopping': stopping} - if self._padding_mask is not None: - signals['padding_mask'] = self._padding_mask - return signals - - @staticmethod - def as_scalar_stopping_signal(signals): - return array_ops.identity(signals['stopping'][0][0]) - - @staticmethod - def should_stop(scalar_stopping_signal): - """Detects whether scalar_stopping_signal indicates stopping.""" - if isinstance(scalar_stopping_signal, ops.Tensor): - # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF - # way to express the bool check whether scalar_stopping_signal is True. - return math_ops.logical_and(scalar_stopping_signal, - _StopSignals.STOPPING_SIGNAL) - else: - # For non Tensor case, it is used in SessionRunHook. So, we cannot modify - # the graph anymore. Here, we use pure Python. - return bool(scalar_stopping_signal) - - -class _PaddingSignals(object): - """Signals class holding all logic to handle padding.""" - - @staticmethod - def pad_features_and_labels(features, labels, batch_size): - """Pads out the batch dimension of features and labels.""" - real_batch_size = array_ops.shape( - _PaddingSignals._find_any_tensor(features))[0] - - batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) - - check_greater = check_ops.assert_greater_equal( - batch_size_tensor, - real_batch_size, - data=(batch_size_tensor, real_batch_size), - message='The real batch size should not be greater than batch_size.') - - with ops.control_dependencies([check_greater]): - missing_count = batch_size_tensor - real_batch_size - - def pad_single_tensor(tensor): - """Pads out the batch dimension of a tensor to the complete batch_size.""" - rank = len(tensor.shape) - assert rank > 0 - padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) - padded_shape = (batch_size,) + tuple(tensor.shape[1:]) - padded_tensor = array_ops.pad(tensor, padding) - padded_tensor.set_shape(padded_shape) - return padded_tensor - - def nest_pad(tensor_or_dict): - return nest.map_structure(pad_single_tensor, tensor_or_dict) - - features = nest_pad(features) - if labels is not None: - labels = nest_pad(labels) - - padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count, - batch_size) - - return padding_mask, features, labels - - @staticmethod - def slice_tensor_or_dict(tensor_or_dict, signals): - """Slice the real Tensors according to padding mask in signals.""" - - padding_mask = signals['padding_mask'] - batch_size = array_ops.shape(padding_mask)[0] - - def verify_batch_size(tensor): - check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) - with ops.control_dependencies([check_batch_size]): - return array_ops.identity(tensor) - - def slice_single_tensor(tensor): - rank = len(tensor.shape) - assert rank > 0 - real_batch_size = batch_size - math_ops.reduce_sum(padding_mask) - return verify_batch_size(tensor)[0:real_batch_size] - - # As we split the Tensors to all TPU cores and concat them back, it is - # important to ensure the real data is placed before padded ones, i.e., - # order is preserved. By that, the sliced padding mask should have all 0's. - # If this assertion failed, # the slice logic here would not hold. - sliced_padding_mask = slice_single_tensor(padding_mask) - assert_padding_mask = math_ops.equal( - math_ops.reduce_sum(sliced_padding_mask), 0) - - with ops.control_dependencies([assert_padding_mask]): - should_stop = _StopSignals.should_stop( - _StopSignals.as_scalar_stopping_signal(signals)) - - is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0) - - def slice_fn(tensor): - # If the current batch is full batch or part of stopping signals, we do - # not need to slice to save performance. - return control_flow_ops.cond( - math_ops.logical_or(should_stop, is_full_batch), - (lambda: verify_batch_size(tensor)), - (lambda: slice_single_tensor(tensor))) - - return nest.map_structure(slice_fn, tensor_or_dict) - - @staticmethod - def _find_any_tensor(batch_features): - tensors = [ - x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor) - ] - if not tensors: - raise ValueError('Cannot find any Tensor in features dict.') - return tensors[0] - - @staticmethod - def _padding_mask(real_batch_size, missing_count, batch_size): - padding_mask = array_ops.concat([ - array_ops.zeros((real_batch_size,), dtype=dtypes.int32), - array_ops.ones((missing_count,), dtype=dtypes.int32) - ], - axis=0) - padding_mask.set_shape((batch_size,)) - return padding_mask - - -def _verify_cross_hosts_transfer_size(tensor_dict, message): - total_size = 0 - tensor_structure = {} - for key, tensor in tensor_dict.items(): - shape = tensor.shape - size = np.product(shape) * tensor.dtype.size - tensor_structure[key] = shape - total_size += size - if total_size >= _ONE_GIGABYTE: - raise ValueError( - '{} The transfer size is larger than the protobuf limit. Please ' - 'consider to use Tensors with smaller shapes or reduce batch ' - 'size. Given:\n' - '{}'.format( - message, '\n'.join([ - ' -- Key: {}, Shape: {}'.format(k, v) - for k, v in tensor_structure.items() - ]))) - - -def _add_item_to_params(params, key, value): - """Adds a new item into `params`.""" - if isinstance(params, hparam.HParams): - # For HParams, we need to use special API. - if key in params: - params.set_hparam(key, value) - else: - params.add_hparam(key, value) - else: - # Now params is Python dict. - params[key] = value - - -def export_estimator_savedmodel(estimator, - export_dir_base, - serving_input_receiver_fn, - assets_extra=None, - as_text=False, - checkpoint_path=None, - strip_default_attrs=False): - """Export `Estimator` trained model for TPU inference. - - Args: - estimator: `Estimator` with which model has been trained. - export_dir_base: A string containing a directory in which to create - timestamped subdirectories containing exported SavedModels. - serving_input_receiver_fn: A function that takes no argument and returns a - `ServingInputReceiver` or `TensorServingInputReceiver`. - assets_extra: A dict specifying how to populate the assets.extra directory - within the exported SavedModel, or `None` if no extra assets are needed. - as_text: whether to write the SavedModel proto in text format. - checkpoint_path: The checkpoint path to export. If `None` (the default), - the most recent checkpoint found within the model directory is chosen. - strip_default_attrs: Boolean. If `True`, default-valued attributes will be - removed from the NodeDefs. - - Returns: - The string path to the exported directory. - """ - # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use - # `estimator.config`. - config = tpu_config.RunConfig(model_dir=estimator.model_dir) - est = TPUEstimator( - estimator._model_fn, # pylint: disable=protected-access - config=config, - params=estimator.params, - use_tpu=True, - train_batch_size=2048, # Does not matter. - eval_batch_size=2048, # Does not matter. - ) - return est.export_savedmodel(export_dir_base, serving_input_receiver_fn, - assets_extra, as_text, checkpoint_path, - strip_default_attrs) diff --git a/build/lib/caireCovid/mrqa/xlnet.py b/build/lib/caireCovid/mrqa/xlnet.py deleted file mode 100644 index ce14572..0000000 --- a/build/lib/caireCovid/mrqa/xlnet.py +++ /dev/null @@ -1,292 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json -import os -import tensorflow as tf -from .mrqa.modeling import (transformer_xl, summarize_sequence) - - -def _get_initializer(FLAGS): - """Get variable intializer.""" - if FLAGS.init == "uniform": - initializer = tf.initializers.random_uniform( - minval=-FLAGS.init_range, - maxval=FLAGS.init_range, - seed=None) - elif FLAGS.init == "normal": - initializer = tf.initializers.random_normal( - stddev=FLAGS.init_std, - seed=None) - else: - raise ValueError("Initializer {} not supported".format(FLAGS.init)) - return initializer - - -class XLNetConfig(object): - """XLNetConfig contains hyperparameters that are specific to a model checkpoint; - i.e., these hyperparameters should be the same between - pretraining and finetuning. - - The following hyperparameters are defined: - n_layer: int, the number of layers. - d_model: int, the hidden size. - n_head: int, the number of attention heads. - d_head: int, the dimension size of each attention head. - d_inner: int, the hidden size in feed-forward layers. - ff_activation: str, "relu" or "gelu". - untie_r: bool, whether to untie the biases in attention. - n_token: int, the vocab size. - """ - - def __init__(self, FLAGS=None, json_path=None): - """Constructing an XLNetConfig. - One of FLAGS or json_path should be provided.""" - - assert FLAGS is not None or json_path is not None - - self.keys = ["n_layer", "d_model", "n_head", "d_head", "d_inner", - "ff_activation", "untie_r", "n_token"] - - if FLAGS is not None: - self.init_from_flags(FLAGS) - - if json_path is not None: - self.init_from_json(json_path) - - def init_from_flags(self, FLAGS): - for key in self.keys: - setattr(self, key, getattr(FLAGS, key)) - - def init_from_json(self, json_path): - with tf.gfile.Open(json_path) as f: - json_data = json.load(f) - for key in self.keys: - setattr(self, key, json_data[key]) - - def to_json(self, json_path): - """Save XLNetConfig to a json file.""" - json_data = {} - for key in self.keys: - json_data[key] = getattr(self, key) - - json_dir = os.path.dirname(json_path) - if not tf.gfile.Exists(json_dir): - tf.gfile.MakeDirs(json_dir) - with tf.gfile.Open(json_path, "w") as f: - json.dump(json_data, f, indent=4, sort_keys=True) - - -def create_run_config(is_training, is_finetune, FLAGS): - kwargs = dict( - is_training=is_training, - use_tpu=FLAGS.use_tpu, - use_bfloat16=FLAGS.use_bfloat16, - dropout=FLAGS.dropout, - dropatt=FLAGS.dropatt, - init=FLAGS.init, - init_range=FLAGS.init_range, - init_std=FLAGS.init_std, - clamp_len=FLAGS.clamp_len) - - if not is_finetune: - kwargs.update(dict( - mem_len=FLAGS.mem_len, - reuse_len=FLAGS.reuse_len, - bi_data=FLAGS.bi_data, - clamp_len=FLAGS.clamp_len, - same_length=FLAGS.same_length)) - - return RunConfig(**kwargs) - - -class RunConfig(object): - """RunConfig contains hyperparameters that could be different - between pretraining and finetuning. - These hyperparameters can also be changed from run to run. - We store them separately from XLNetConfig for flexibility. - """ - - def __init__(self, is_training, use_tpu, use_bfloat16, dropout, dropatt, - init="normal", init_range=0.1, init_std=0.02, mem_len=None, - reuse_len=None, bi_data=False, clamp_len=-1, same_length=False): - """ - Args: - is_training: bool, whether in training mode. - use_tpu: bool, whether TPUs are used. - use_bfloat16: bool, use bfloat16 instead of float32. - dropout: float, dropout rate. - dropatt: float, dropout rate on attention probabilities. - init: str, the initialization scheme, either "normal" or "uniform". - init_range: float, initialize the parameters with a uniform distribution - in [-init_range, init_range]. Only effective when init="uniform". - init_std: float, initialize the parameters with a normal distribution - with mean 0 and stddev init_std. Only effective when init="normal". - mem_len: int, the number of tokens to cache. - reuse_len: int, the number of tokens in the currect batch to be cached - and reused in the future. - bi_data: bool, whether to use bidirectional input pipeline. - Usually set to True during pretraining and False during finetuning. - clamp_len: int, clamp all relative distances larger than clamp_len. - -1 means no clamping. - same_length: bool, whether to use the same attention length for each token. - """ - - self.init = init - self.init_range = init_range - self.init_std = init_std - self.is_training = is_training - self.dropout = dropout - self.dropatt = dropatt - self.use_tpu = use_tpu - self.use_bfloat16 = use_bfloat16 - self.mem_len = mem_len - self.reuse_len = reuse_len - self.bi_data = bi_data - self.clamp_len = clamp_len - self.same_length = same_length - - -class XLNetModel(object): - """A wrapper of the XLNet model used during both pretraining and finetuning.""" - - def __init__(self, xlnet_config, run_config, input_ids, seg_ids, input_mask, - mems=None, perm_mask=None, target_mapping=None, inp_q=None, - **kwargs): - """ - Args: - xlnet_config: XLNetConfig, - run_config: RunConfig, - input_ids: int32 Tensor in shape [len, bsz], the input token IDs. - seg_ids: int32 Tensor in shape [len, bsz], the input segment IDs. - input_mask: float32 Tensor in shape [len, bsz], the input mask. - 0 for real tokens and 1 for padding. - mems: a list of float32 Tensors in shape [mem_len, bsz, d_model], memory - from previous batches. The length of the list equals n_layer. - If None, no memory is used. - perm_mask: float32 Tensor in shape [len, len, bsz]. - If perm_mask[i, j, k] = 0, i attend to j in batch k; - if perm_mask[i, j, k] = 1, i does not attend to j in batch k. - If None, each position attends to all the others. - target_mapping: float32 Tensor in shape [num_predict, len, bsz]. - If target_mapping[i, j, k] = 1, the i-th predict in batch k is - on the j-th token. - Only used during pretraining for partial prediction. - Set to None during finetuning. - inp_q: float32 Tensor in shape [len, bsz]. - 1 for tokens with losses and 0 for tokens without losses. - Only used during pretraining for two-stream attention. - Set to None during finetuning. - """ - - initializer = _get_initializer(run_config) - - tfm_args = dict( - n_token=xlnet_config.n_token, - initializer=initializer, - attn_type="bi", - n_layer=xlnet_config.n_layer, - d_model=xlnet_config.d_model, - n_head=xlnet_config.n_head, - d_head=xlnet_config.d_head, - d_inner=xlnet_config.d_inner, - ff_activation=xlnet_config.ff_activation, - untie_r=xlnet_config.untie_r, - - is_training=run_config.is_training, - use_bfloat16=run_config.use_bfloat16, - use_tpu=run_config.use_tpu, - dropout=run_config.dropout, - dropatt=run_config.dropatt, - - mem_len=run_config.mem_len, - reuse_len=run_config.reuse_len, - bi_data=run_config.bi_data, - clamp_len=run_config.clamp_len, - same_length=run_config.same_length - ) - - input_args = dict( - inp_k=input_ids, - seg_id=seg_ids, - input_mask=input_mask, - mems=mems, - perm_mask=perm_mask, - target_mapping=target_mapping, - inp_q=inp_q) - tfm_args.update(input_args) - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - (self.output, self.new_mems, self.lookup_table - ) = modeling.transformer_xl(**tfm_args) - - self.input_mask = input_mask - self.initializer = initializer - self.xlnet_config = xlnet_config - self.run_config = run_config - - def get_pooled_out(self, summary_type, use_summ_proj=True): - """ - Args: - summary_type: str, "last", "first", "mean", or "attn". The method - to pool the input to get a vector representation. - use_summ_proj: bool, whether to use a linear projection during pooling. - - Returns: - float32 Tensor in shape [bsz, d_model], the pooled representation. - """ - - xlnet_config = self.xlnet_config - run_config = self.run_config - - with tf.variable_scope("model", reuse=tf.AUTO_REUSE): - summary = modeling.summarize_sequence( - summary_type=summary_type, - hidden=self.output, - d_model=xlnet_config.d_model, - n_head=xlnet_config.n_head, - d_head=xlnet_config.d_head, - dropout=run_config.dropout, - dropatt=run_config.dropatt, - is_training=run_config.is_training, - input_mask=self.input_mask, - initializer=self.initializer, - use_proj=use_summ_proj) - - return summary - - def get_sequence_output(self): - """ - Returns: - float32 Tensor in shape [len, bsz, d_model]. The last layer hidden - representation of XLNet. - """ - - return self.output - - def get_new_memory(self): - """ - Returns: - list of float32 Tensors in shape [mem_len, bsz, d_model], the new - memory that concatenates the previous memory with the current input - representations. - The length of the list equals n_layer. - """ - return self.new_mems - - def get_embedding_table(self): - """ - Returns: - float32 Tensor in shape [n_token, d_model]. The embedding lookup table. - Used for tying embeddings between input and output layers. - """ - return self.lookup_table - - def get_initializer(self): - """ - Returns: - A tf initializer. Used to initialize variables in layers on top of XLNet. - """ - return self.initializer - diff --git a/build/lib/caireCovid/qa.py b/build/lib/caireCovid/qa.py deleted file mode 100644 index 5933ade..0000000 --- a/build/lib/caireCovid/qa.py +++ /dev/null @@ -1,258 +0,0 @@ -import os -import sys -from collections import namedtuple -import tensorflow as tf -from nltk.tokenize import sent_tokenize - -from .mrqa.predictor_kaggle import mrqa_predictor -from .biobert.predictor_biobert import biobert_predictor - - -class QaModule(): - def __init__(self, model_name, model_path, spiece_model, bert_config, bert_vocab): - # init QA models - self.model_name = model_name - self.model_path = model_path - self.spiece_model = spiece_model - self.bert_config = bert_config - self.bert_vocab = bert_vocab - self.getPredictors() - - def readIR(self, data): - synthetic = [] - - idx = 0 - for data_item in data: - question = data_item["question"] - answer = data_item["data"]["answer"] - contexts = data_item["data"]["context"] - dois = data_item["data"]["doi"] - titles = data_item["data"]["titles"] - - for (context, doi, title) in zip(contexts, dois, titles): - data_sample = { - "context": context, - "qas": [] - } - - qas_item = { - "id": idx, - "question": question, - "answer": answer, - "doi": doi, - "title": title, - } - - data_sample["qas"].append(qas_item) - synthetic.append(data_sample) - - idx += 1 - return synthetic - - def mrqaPredictor(self, data): - return mrqa_predictor(self.mrqaFLAGS, self.mrqa_predict_fn, data) - - def biobertPredictor(self, data): - return biobert_predictor(self.bioFLAGS, self.bio_predict_fn, data) - - def getPredictors(self): - if "mrqa" in self.model_name: - self.mrqa_predict_fn = self.getPredictor("mrqa") - if "biobert" in self.model_name: - self.bio_predict_fn = self.getPredictor("biobert") - - def getPredictor(self, model_name): - modelpath = self.getModelPath(model_name) - if model_name == 'mrqa': - d = { - "uncased": False, - "start_n_top": 5, - "end_n_top": 5, - "use_tpu": False, - "train_batch_size": 1, - "predict_batch_size": 1, - "shuffle_buffer": 2048, - "spiece_model_file": self.spiece_model, - "max_seq_length": 512, - "doc_stride": 128, - "max_query_length": 64, - "n_best_size": 5, - "max_answer_length": 64, - } - self.mrqaFLAGS = namedtuple("FLAGS", d.keys())(*d.values()) - return tf.contrib.predictor.from_saved_model(modelpath) - elif model_name == 'biobert': - d = { - "version_2_with_negative": False, - "null_score_diff_threshold": 0.0, - "verbose_logging": False, - "init_checkpoint": None, - "do_lower_case": False, - "bert_config_file": self.bert_config, - "vocab_file": self.bert_vocab, - "train_batch_size": 1, - "predict_batch_size": 1, - "max_seq_length": 384, - "doc_stride": 128, - "max_query_length": 64, - "n_best_size": 5, - "max_answer_length": 30, - } - self.bioFLAGS = namedtuple("FLAGS", d.keys())(*d.values()) - return tf.contrib.predictor.from_saved_model(modelpath) - else: - raise ValueError("invalid model name") - - def getModelPath(self, model_name): - index = self.model_name.index(model_name) - return self.model_path[index] - - def getAnswers(self, data): - """ - Output: - List [{ - "question": "xxxx", - "data": - { - "answer": ["answer1", "answer2", ...], - "confidence": [1,2, ...], - "context": ["paragraph1", "paragraph2", ...], - } - }] - """ - answers = [] - qas = self.readIR(data) - for qa in qas: - question = qa["qas"][0]["question"] - if len(answers)==0 or answers[-1]["question"]!=question: - answer_sample = {} - answer_sample["question"] = question - answer_sample["data"] = { - "answer": [], - "context": [], - "title": [], - "doi": [], - } - answers.append(answer_sample) - - context = qa["context"] - doi = qa["qas"][0]["doi"] - title = qa["qas"][0]["title"] - - answers[-1]["data"]["context"].append(context) - answers[-1]["data"]["doi"].append(doi) - answers[-1]["data"]["title"].append(title) - - sents = sent_tokenize(context) - spans = self.convert_idx(context, sents) - - if "mrqa" in self.model_name: - raw_mrqa = self.mrqaPredictor([qa]) - # get sentence from MRQA - raw = raw_mrqa[qa["qas"][0]["id"]] - # question answering one by one - answer_start = context.find(raw, 0) - answer_end = answer_start + len(raw) - answer_span = [] - for idx, span in enumerate(spans): - if not (answer_end <= span[0] or answer_start >= span[1]): - answer_span.append(idx) - - y1, y2 = answer_span[0], answer_span[-1] - if not y1 == y2: - # context tokens in index y1 and y2 should be merged together - # print("Merge knowledge sentence") - answer_sent_mrqa = " ".join(sents[y1:y2+1]) - else: - answer_sent_mrqa = sents[y1] - assert raw in answer_sent_mrqa - else: - answer_sent_mrqa = "" - - - if "biobert" in self.model_name: - raw_bio = self.biobertPredictor([qa]) - # get sentence from BioBERT - raw = raw_bio[qa["qas"][0]["id"]] - if raw == "empty" or "": - answer_sent_bio = "" - else: - # question answering one by one - answer_start = context.find(raw, 0) - answer_end = answer_start + len(raw) - answer_span = [] - for idx, span in enumerate(spans): - if not (answer_end <= span[0] or answer_start >= span[1]): - answer_span.append(idx) - - y1, y2 = answer_span[0], answer_span[-1] - if not y1 == y2: - # context tokens in index y1 and y2 should be merged together - # print("Merge knowledge sentence") - answer_sent_bio = " ".join(sents[y1:y2+1]) - else: - answer_sent_bio = sents[y1] - - # if raw not in answer_sent_bio: - # print("RAW", raw) - # print("BIO", answer_sent_bio) - assert raw in answer_sent_bio - else: - answer_sent_bio = "" - - if answer_sent_mrqa == answer_sent_bio or answer_sent_mrqa in answer_sent_bio: - # print("SAME OR QA < BIO") - answer_sent = answer_sent_bio - elif answer_sent_bio in answer_sent_mrqa: - # print("BIO < QA") - answer_sent = answer_sent_mrqa - else: - # print("DIFFERENT ANSWERS") - answer_sent= " ".join([answer_sent_mrqa, answer_sent_bio]) - - answers[-1]["data"]["answer"].append(answer_sent) - return answers - - def convert_idx(self, text, tokens): - current = 0 - spans = [] - for token in tokens: - current = text.find(token, current) - if current < 0: - print("Token {} cannot be found".format(token)) - raise Exception() - spans.append((current, current + len(token))) - current += len(token) - return spans - -def print_answers_in_file(answers, filepath="./answers.txt"): - """ - Input: - List [{ - "question": "xxxx", - "data": - { - "answer": ["answer1", "answer2", ...], - "confidence": [1,2, ...], - "context": ["paragraph1", "paragraph2", ...], - } - }] - """ - with open(filepath, "w") as f: - print("WRITE ANSWERS IN FILES ...") - for item in answers: - question = item["question"] - cas = item["data"] - for (answer, context) in zip(cas["answer"], cas["context"]): - f.write("-"*80+"\n") - f.write("context: "+context+"\n") - f.write("-"*80+"\n") - f.write("question: "+question+"\n") - f.write("-"*80+"\n") - f.write("answer: "+answer+"\n") - f.write("="*80+"\n") - - - - - diff --git a/build/lib/caireCovid/retrieval.py b/build/lib/caireCovid/retrieval.py deleted file mode 100644 index 05a4cc8..0000000 --- a/build/lib/caireCovid/retrieval.py +++ /dev/null @@ -1,83 +0,0 @@ - -import json -import requests - -def retrieve_paragraph(query): - url = "http://hlt027.ece.ust.hk:5000/query_paragraph" - - payload = "{\n\t\"text\": \""+query+"\"\n}" - headers = { - 'Content-Type': "application/json", - 'cache-control': "no-cache", - 'Postman-Token': "696fa512-5fed-45ca-bbe7-b7a1b4d19fe4" - } - response = requests.request("POST", url, data=payload, headers=headers) - - response = response.json() - return response - - -def information_retrieval(file_name): - """ - Inputs: - file_name: file name - Outputs: - all_results: - List [{ - "question": "xxxx", - "data": retri_result - }] - data_for_qa: - List [{ - "question": "xxxx", - "data": - { - "answer": "", - "context": ['paragraph1', 'paragraph2', ], - } - }] - """ - with open(file_name) as f: - json_file = json.load(f) - subtasks = json_file["sub_task"] - - all_results = [] - data_for_qa = [] - for item in subtasks: - questions = item["questions"] - for query in questions: - result_item = {"question" : query} - retri_result = retrieve_paragraph(query) - result_item["data"] = retri_result - - qa_item = {"question": query} - context = [] - titles = [] - doi = [] - count = 1 - for item in retri_result: - #context.append(item["paragraph"] if "paragraph" in item and len(item["paragraph"]) > 0 else item["abstract"]) - if count>20: - break - if 'abstract' in item and len(item['abstract']) > 0: - context.append(item['abstract']) - doi.append(item["doi"]) - titles.append(item["title"]) - count+=1 - if 'paragraphs' in item: - # for para in item['paragraphs']: - # context.append(para['text']) - # count+=1 - # if count>20: - # break - context.append(item['paragraphs'][0]['text']) - doi.append(item["doi"]) - titles.append(item["title"]) - count+=1 - - qa_item["data"] = {"answer": "", "context": context, "doi": doi, "titles": titles} - - all_results.append(result_item) - data_for_qa.append(qa_item) - - return all_results, data_for_qa \ No newline at end of file diff --git a/build/lib/caireCovid/test_api.py b/build/lib/caireCovid/test_api.py deleted file mode 100644 index b4b6d86..0000000 --- a/build/lib/caireCovid/test_api.py +++ /dev/null @@ -1,51 +0,0 @@ -from flask import Flask, request, jsonify -import json -from retrieval import retrieve_paragraph -from qa import QaModule - -def get_qa_result(query): - temp_json = retrieve_paragraph(query) - qa_item = {'question': query} - contexts = [] - titles = [] - doi = [] - count = 1 - for item in temp_json: - if count>10: - break - if 'abstract' in item and len(item['abstract']) > 0: - contexts.append(item['abstract']) - if 'paragraphs' in item: - contexts.append(item['paragraphs'][0]['text']) - doi.append(item["doi"]) - titles.append(item["title"]) - count+=1 - #print(len(doi), len(titles)) - qa_item['data'] = {'answer': '', 'context':contexts, 'doi': doi, 'titles': titles} - data_for_qa = [qa_item] - qa_model = QaModule(['mrqa', 'biobert']) - answers = qa_model.getAnswers(data_for_qa) - output_list = [] - for i in range(len(answers[0]['data']['answer'])): - outJson = {} - outJson['question'] = answers[0]['question'] - outJson['answer'] = answers[0]['data']['answer'][i] - outJson['context'] = answers[0]['data']['context'][i] - outJson['doi'] = doi[i] - outJson['title'] = titles[i] - output_list.append(outJson) - #print(len(output_list)) - return output_list - -#print(json.dumps(get_qa_result('incubation period of covid-19 in humans'), indent=4)) - -app = Flask(__name__) - -@app.route('/query_qa', methods=['POST']) -def return_matches(): - content = request.json - out = get_qa_result(content['text']) - return jsonify(out) - -if __name__ == '__main__': - app.run(host= '0.0.0.0',debug=True) diff --git a/caireCovid.egg-info/PKG-INFO b/caireCovid.egg-info/PKG-INFO deleted file mode 100644 index 2e95fd8..0000000 --- a/caireCovid.egg-info/PKG-INFO +++ /dev/null @@ -1,17 +0,0 @@ -Metadata-Version: 2.1 -Name: caireCovid -Version: 0.1.0 -Summary: system for covid-19. -Home-page: https://github.com/yana-xuyan/caire-covid -Author: yana -Author-email: yxucb1229@gmail.com -License: UNKNOWN -Description: # caire-covid - Kaggle system for covid 19 - -Platform: UNKNOWN -Classifier: Programming Language :: Python :: 3 -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Requires-Python: >=3.6 -Description-Content-Type: text/markdown diff --git a/caireCovid.egg-info/SOURCES.txt b/caireCovid.egg-info/SOURCES.txt deleted file mode 100644 index a83c5a5..0000000 --- a/caireCovid.egg-info/SOURCES.txt +++ /dev/null @@ -1,27 +0,0 @@ -README.md -setup.py -caireCovid/__init__.py -caireCovid/main.py -caireCovid/qa.py -caireCovid/retrieval.py -caireCovid.egg-info/PKG-INFO -caireCovid.egg-info/SOURCES.txt -caireCovid.egg-info/dependency_links.txt -caireCovid.egg-info/top_level.txt -caireCovid/biobert/__init__.py -caireCovid/biobert/modeling.py -caireCovid/biobert/optimization.py -caireCovid/biobert/predictor_biobert.py -caireCovid/biobert/run_factoid.py -caireCovid/biobert/save_biobert.py -caireCovid/biobert/tokenization.py -caireCovid/mrqa/__init__.py -caireCovid/mrqa/data_utils.py -caireCovid/mrqa/function_builder.py -caireCovid/mrqa/model_utils.py -caireCovid/mrqa/modeling.py -caireCovid/mrqa/multiqa_utils.py -caireCovid/mrqa/predictor_kaggle.py -caireCovid/mrqa/prepro_utils.py -caireCovid/mrqa/tpu_estimator.py -caireCovid/mrqa/xlnet.py \ No newline at end of file diff --git a/caireCovid.egg-info/dependency_links.txt b/caireCovid.egg-info/dependency_links.txt deleted file mode 100644 index 8b13789..0000000 --- a/caireCovid.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/caireCovid.egg-info/top_level.txt b/caireCovid.egg-info/top_level.txt deleted file mode 100644 index 1c4c208..0000000 --- a/caireCovid.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -caireCovid diff --git a/dist/caireCovid-0.1.0-py3-none-any.whl b/dist/caireCovid-0.1.0-py3-none-any.whl deleted file mode 100644 index 5221d1452a0e0c88720a71fbbfa8708f8387a18f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 253719 zcmZ^qW2|UFv!=Ig`)u2`ZQHhO+qP}nwr$(qXKT((=Ewc+%w0*ZWc}z=(p9zkt;$pK zQotZ60000G0Hfi7@~6$f6K_g!qJ4r$j;Tmm`+d6!q&oBPmk8#gJ*I8 z(w`m%px>`-9N>_YHya;B8h@VD2Ft}qVE!#9m`y#1DVeS^zs8HWJVLBUneD4#JPX{% zs%$2%=@E!$8EN8lhCu;VgenbO{y*%K@2W=n00ICg0|Njc|2O+K1{SveY`Y}L*agwU zgx-Cis=5{+8Cu4rxUQR&-PAJ)lG7BtfI-SkcUD~bem13b!onKyUiJZ@TtYfd|ViCBzoKf{DQaZ=MqD*=4y77)E zt!`KlEH5=^xx;>>P>xid(2=secm50zzV*B@vMq{nZTJ$O^qZZkNCaJTBpsGbsi|=u znjs~Upw((qVg&6F>{^U99ymUa8jwh`VdjC~IG#ptf^RdE82yxnX&vKisxwYf(exzl-0|Xi#!OJ<0Y5Q$L#cR%zjYPvuh-l_h?=}A=gtmqFgzMyHufy+nSU(X z7BxEYQIC4h1J9j^M=OCqDls*JL#kupnqsFx!MY8yX+EfwN2-{_D?d3Ip<=lOp)F6s zP9yVA45sm{;z8C#F3|R|+w#^abu@6o$q$WAO(GB=`nRD10ripfgrma10uz`7Je7No zZoK^L1leC+Xg5fw&m_D6GGoMtZ}ymwyRK}K1%K+!M zm?($j6Qq?m&@KOGNBP8oGututOtgEb@eU)?91+;`(yxFM7kO_NG$*kd7KA`*)N2KM zu1H;*{a}r4KzJ%xb;PJ(7yhaiFhLvtONSN&;A1X%kk9hUy~7%fBgcd*mEAfk`N`;O zjvan}fBL?*)dz}JE(S}?^@Ce-@br(umcPqZGJw^Lwq$GUAcp%GI@8f5WFb_}{FcCS z&FIej1u>+GX|FC7g8uP+HtA(Tl{r!?)ZCr_j?{T(_md`@kQc^0M7*y?7$z%;0uvRQ z1G$TvW+GW3qM|%p&CA~M*mKcQCDy-V$_<@w2S%LXbt*ewLP8Q$EN3j%Xj9TNl1^G} zV{m@3WnW!RB3!`$MQm800XI59^ss6y#3rn@`i#=^8wGZmS16*d3K~gMXnvyv@C3%_K9O|$@Bt1Tg*tUGq8+r{2^z%xe&7Yytatqu?jC_7 zM#)b?r2ds7yAy!3(H|3+7kYfHi(E3jFZfRe^5{_08f&}FG|(y4jYvbZ_ZXMROU~qk z5@^Bnl{&fgdC}ZPPF}q;$!fX(!a5L|X~3KaUYO!xgA7_R2nFq4TC;AB#y-DXJowD5 z&0kSTN=Vlv&&IJp7@_;*Mt?_Kjx&A?&o*m#7XH0&-sf8jpbHEz7EaD-k>dlz#IdQy zpND$tKpeL@!4kVQ4i%GSi)>rBLlpcH?E3o?T%QU_43mh3kE*67HNM=qSt^C`#MZ4V z49?l7JB`@`=7+H#A?j7%Nmb?Hw`>2j(k|i$kNA^m_#^Vi5l%Dj91;v3Av^N{0K6%f zZoUcCDs~;*e442){h7cm8?-Wa2)}VfZ2fdV7G|2!8^A!uxj@uHca;b08iJc`RE#?{ zxUgy)Xbt|f9F*fqvV6<;Kf*3ltG@0w(DA=s)!+2qtgF)zZ){L3M(Yb7S5HlTaC3e* zD}^y^aIF1j(O=*uStlI1`VLTuqXkfs9W|o{t^_Z`H>iO}>?5CJxH#yAvv)evpcz_+ z?TQ4Parf)o3ai`5i<b(t<6PP{=!*DI<8i>(Sc^)ouWdO=4X| zoe;V3=s7l6#J1t6YEcV$vSA?MR|j7w<)I&O6&`kNES&d~qoq2zYy6eY3hF%Wa1t)o zJ2cW4q`0aTn=R18%y>Bfl}-&bZa0}(uZ697qCo{BORZPd9$Kw8=B`@ymU_^NXPQi| zReP+CR}rDZ?tDewEskVX!Ulu^AY{2dzf0n4*a}}HO3d!L}F&$LckVj zCjJ0^^oPJ7FN>;6JQ@!w(to_Gnwr{pxgIied^k9ry8e4g-xm2fmu0PWyBHpEMrHQo zP^EDX%p)$q@?{9jsUY32Rny`G_`dk@^HOglqg#j%=@j?{i6yx%!DE?O|9CiPGfP{b z(nE9loD#k&Unh^N%cmUW=kl9>>6b!E6~VK9((xGk$W>xNL`jB_%Z-HYql8$&B_&bD zjYG2CPyLcLU@5;FZ?N^y+{i1H+sBvSUz$FKA0@Uy3yTX+yeVJc{=C&&ershK2e?(3 zqb*fr+uh?028q?vT9_2$7?)6XnJIPvCb4$%ScV>0^9o}w0YN~_Or-&|tILzKkonei2=&-ZUlM(@DUDp~J-i#ch#Y zA-HVkznl)f5;PLM0F00+>|}pmIHHTj(bn|%*Sdd6f?ak&8!u;o^!jC~K)n76Uwi}p zrz9C)YEfnUCmtD~001!lnzW|G1lt}N*T++-Q zO9*k0Z~J^#-I0L8FFUCfhod5?jTP;8jg*q{>hqxBHB*qJDfKR_=cPv8ArXIDI1LE} z-s0XkHmg^ENXIh#+9VS4h~Ed~@a8Eyrc~oNdY3@xE|UyZ)q)KMnyQTFp_JYqzrImt zO;%3vg>Y=P2&cQQTW{W`vB8%N@s~1ZHJR+l8A#>Ys_{KUpD+GZ%y zD1~GPeGDF5U=>X6>J$T;2U6t1!U^lepzL4{#_Xq+ZfvG_n>iuN+T}ur4#BR_&*lv_QOD

gH`d4bM}Y(056_;jB&B-hbxV(--4(91sA&5a|C~C_0-sIqMnNTl`BiA7!byC3*y2 zw!T44I9Xweu0`GQWK?Z-d;!ljAm10E|=_(70X{PCf{ZYk8(xJIq$u0o*EV& zD=rRR`W5(5&esH5DboY`f%DYsG4p22yvFiUt5u|_z$Pd>leBaN0K<6I+iILwK=*ja z2N_@1nPq`gmib0pc8B=EL3S5fF}K$Wbu(!2Z~Spss$WCK!$wd>R6)e&X`k{*^IlWk z??jqbQLgtB(PcFjES1_IXN%~_vYL$Mp(mME^B9xe{KabwpwK3Hny31%EUQjhR zm?fSe-Gco|D?ydn6kW7s-kUWNw6hic6ju(C&|||3C7A2O<)}!uMG4){TydTskSN>( z?+JI9%sQoKav`bmQM%k9n(k0w)_7@1Yp7TV7OKwjo~B*q`2yw{CEbTzJYtSb@+LrR zMtB1GHDt38rZ)odrwriX%n8^e7l{4;0b2 zj}zw_S!BtPj4Td{(|hA*An%JNMYC$TptaAgo8pxS*!`4C^g?$J3M_WEy9#cvN^u>I z*%Bq#^R?X~dYGXh0-rjm&;=&&g8A<}lWJ&0{@HymRf6vg7XvpkOsS~9e@{MnVBu-X z5$exNwr&lkD+YRleNBczJ077EBm*0t)`?tSQH($jB}@M5OTVuw1y@sJqMyrK5U_VX z9msO-Wi1q3dY<@sZ}gjPro6cZ5`Mz|XAJ+l9svG@@c()K{Coe`W{07LouP@N^M7r6 z00aEzcB+`uc5e~@fZ=2Sfd6T`jh(THwS}$Ozf>6Ev2fZFcisD-^1P4^d6u6)$T3Qx zN1a(`Ix-e@-FTF>u|P#4%tTVf?+3__l{x7DYN%w|h(`0wrBw#~F4mI~&NtOLQKgtoEM^!C8+y57|3wFIP_Y@V z@|SqB5;aQ{D7+hS3P&_G4!XluM0e0w?TS!tDg*`I!%J$29G@Qc0Uflk(^knmNAQ#7 ztHpUMqPYa}XwGJJX)iLFwL)5 z(pvrU690dLeZmZ?TDd|{=Cr;?QWnkxHV7^fXz7xIGg)GI*^3ID`nY0cA>%oTyu z9Ng+PD}ZxJ2}W)YdfguU2)x;yUSA$S^q!vH*W>9>$ou#8PFK@Y)LPwc6x-c!oZYNm zZ?F6JDbCMdiR_$Tk*~9>r!-^&Ko$CEZ|ya9zjWhR^JL7!>z?FYkW^H;a*ZjG-f8rD z+LjBpm7Z!T^;q>O&lI-y3|MJ8U~-Y($*txRvK$eTpac8xtIE62-XwY4Up+4UK|$Qm-A&8bXL8y6343828m&HimK)uBv82J=P8o7z1+_O)zL zt0V!&{B?7eS*OZ?%|+Rfd`EX#gf)2}y!@)Hb5k#+Hl`k{(+MN(%#8Q+qxid3zV3J? zklT2(q;|BpeBRx)m4bXL^qj@^l(TI9MOX$8(6C^n0rIr^3$XGxP#UoC$3W2Hqo8KG zke(vNbOE?rbmrt36Zk%3;kcJvBnqqS;~1Sbp@zw1j2Yo1^moAikkg#lSqsniU7;<@ z(?=;P9`eT}(gL&qdL3$FS`WAkjC})<2^VQ>Ml6w-ns=DCXm-l<=~8Fhm7aw65}rBe z9-qSOGEqRIw2@@{mL`zdVx~V(GrF5g^b5%8!lv~%;~z={rKZMF7iOVtN;4)!18+F; zp)hn$E2bn^@uNtnfr6=Q#Yn|8S8&uus^!z_)`1YnYe=Zk(aPb%+T1k)fkdiU6`q0j zwv{QtdjjhN7r+N^Z8Aex;+yL|m=nif=3p3Lw)+nY6Mr7|l8X3z`BtqUFu>=&d%da= zX`Wg;gJ4T&ox9iY>GX5?%Tq9!9Cx|0#Sp!+CcI#=+2uO;1-PgJ9G47Fodo~?;-yk5{e`5|X`_W z0F=}%bAb{9q#(`&LSKe8Jv&F8I?obblUm^+u{l^OQYv?gZIoL@`C=^yx2ohj#1 zfaT3t@HB0b0RvRpb==HR_U^VAUHRlV^<&G;{zgF50fcmr(Gx2BxPGC{rDim znT}>c9P(rrHE9kFBCzTiROLO4*bjsJVZ!pFqRJe;B@dK}gpyr_;EEm7q_Cl~^s`4Q}&(RFV+hMP4u-=S9Gvy}aP z_?hWQ95H3JgwgbB)u_SF4cC2i2cq9!Zd*cbe!g@CQ(ly&vn+0`8@aDLnP2*e@B*Lg zGeV6C{i6;=Zbj6hAFzHCWI2hv_Wj(+c{Y-;NuHuk4R;&6~x9hLs=8M4M+LI+5g@y9Vgt{}JN8WYFJxTrr#Gk3I86XsWgI`4uwBZ=o5cXiDhq-X~NOCW|U%w_JfHig5R$jYX6|C#%o7=@Xb-g!m)}r| z#d!&IL$1zhmZ!yoM3TN~02sH?L5<1y8LAL%O3c%Fqr*;nX z6A>$Q?gZd741BL><-Sf7{@Ex!9i*Y4;+FMdZaqzOeeNtijBVoH`>iiQZMUG-zUiOQ zEuDonvs9937K3jod`95<`EUb{^JI2juH{oz_({;reBwmAQgr=aDjvZgYV8ezCb>=gj_1ak&%zBCH85b~J+V5+g%oZ~Gu z91fsov_Do&2@p8nagB$eHx*U&EH;s_XB~zWcY~`sJIsnYE_v~$k-&B{esKHfAT!EN zx#G5joLfyIyVwm|JXJ(hQeqVdyZc8E_Sq5_62@GJ*Ljn}V|wHOm7HbWZ_|yMlyGQ> z#b!d$(}9t72t<(!gOYSUI_|i#a3?(%=owpioGpVM;;J$riVaz`Q4*niQTpIZvi#;-?aGH0o z4tLzP`rSov_OnkSNH$SR(`(Jy(mB?C6Bdlu%gpo`yaG`0=@ zLq!s)^!!VBWN|zLHSa4w?xlf2PjoiJIb-F^gLdpVL<|<&)nf09#cXROze~&?s#0p0 z!H4w|x`7E$Vvth!ietOUt^iEJrQSJMGq&KT#{LHh{}n~Y2*$jHHdo)%t=#yK3RNF; zb+xcXd5C#cB?4zC04#l^b*kG|Gj$m<7C>sZx)>R}O9uP(Wxx_{{L$f^<7G7y><-D| z9M-z`s|#mD{QA+EFjh8>><}gS`I*74bj&DECUlkMQM@yUGIjkG4L>k8WGYjRR#Tc>A>tKVop?MI_E{XRJP3~W8& zRysW+#tLgneQl~Rux59u3o&~Z!v?d{(bTXooTuQfUkR@W3msACbUAFwRtjB~T-*pl zMYvoEU(iT)t+w5f-Ac-NtN%gQLiFH09S3;)c+{pL&ekL@rr0bv zHL)>d8YQ|ycx;BzV$5P`K%XxlxC?zxqcQb!oXDIN0|tY?0kgw0Ew=G1+m;`{xCGxU z`8`F6iMKza%w}~;LZqpZ^6_G)s#v6j8#zkceTOZmC_`!wJbdR(qD$Y zGU~zSP7dy$*YmC8^G`lJzo7XVv$dSf3OOl%J{y0{UeTl{a{JrB^exz5%+s^dOm;l2 z-2jnRV)PZZ!ww8wGOCHXQt1p$iWMq zcN!h!(2D|e@Z}A9w`&uI|u1lPl%p8H^AX~(y@Y$ zms|ja|K2O&rdglI$HXM5B#JeP@9%U{Re|Sp94(edTyjIN#5a2%YGZv_ozLaXW<`n* z4m1Nm#*;*p3g*Nt+B1_R!kK1wa?&oAbVH7Oe7HTTGxL&NIAf@{>pVJo9hw>#$x>c4 z%S2uRDn~H{6^SY}z3K4x4?&mVD)g;Q<_6Z^$duj46r)~>;)cJ+W3f(}Xb}~#6EkA@ zl4s3eHEAjsutssC>ahqtpz>mNaRabl%S2EzVpLnTKBBidI<(dHS7W#2lP4+Uwapr3 z0e1nt4q{GZEgE!3!6F!JWI}*QF+lhC$}4#JVbyc}KJ@{wp-oAHBy-Eaw)vC3qnj6< zvV@>W>)Jn-f~g3b(ox*=?KdNtOJ+E=GmjEH;7KD_Ab+Lg){svD0hdN?E!_8sPCPwa z=>2B;e9QHN)h9qIRzG99?VLj$Qg&2S&3G0w-`Y2(z5638Uj}H>^(y zkZ(r{z<71Z--koc7d;e!jw6%n+x4cZ;Y@ofN!{((F}zaPFDcEYjT^uy-NeOty<*L?3t z(~LK03RdsVg3)|vULE&k=*@EEO~T0O>+ny5dYHQ_9wwbT=wXOr@PlZI%|NXXI6L}R z+$xu)cNPXs;i8i0c}6WfBA=B0u_%(y8k^lVuotZbv@FBE2DKCb|A~d+^bl!ID+X2q zG64wmULl=@pW;6=7B*TfC?d4c1TWj{JiIZcRtJj#&!4Sp0lkqHv_UZomiwmAk&>6M z*oc;X4l??VMjFHVOm0F+2M)Q)229JjAw6IR%Or;9g1Hdz+)boip_Xhf14xs2WzZt`AO$vqD0jAD6Ks5C|FoU!(?(lOZNpP;f6p3|!6@Fo|QArryTiaSS_a8#{r% zx+5?b;j-X@M#zu9qRm~4!DY_RkMrbyJTM@g>g>tcT%o{^6)a+;$l62!bXvvmI} zGIv}J;)ukQ&SrDdAg5}f7j~lt_vz^)Su1Lt^Lb($%$Juih4|y0tXtsAd#YLXhOCPL zd)*kS4YN`@0NM&3={g=fSasUVyjyA{{N4Kr0rnL!o*z#t*JV**ks9JD@4gXD;PXJd zxi@d%W>cGV!~;wZ{tY?jGHGcBhr3g8_PRYg@_fPUEYYM_$Dm6hmSSH>Dmr$>LCwQe zpcSfRSm%?8hVeO+Re#AEI_a(uz=GFx*6y>4Rpd~l19i&PNm`ns)XQ>Sl?hYurb%{l zZ?|>pvS=df6QMM-rhVdU>r*+tOh}XP8VN-jdM@MhWidZ`gs47`-vt2FSr+eaxA2SU z(pvt~7p>i;G7HOBT6tVJL`bMvv3PB60DFs<*VYV~O`+xUX=g3lk|ra=f-jyxJSK;1 ztU|>%Z7WfyaAHQ^#~;wm6zSmYA=3q@i zgTGKBYMfMeOScFbKVC+fD(as!H>E~mS4o|3DR857>eMAVZzdUA+Wik7kB>#p9?XgQsz;%nfZ zg-N&4jNWeW!r+w?J(Ht(M_Pno6+ASV6rs&pD8W3NBvv#44a$X{?*W&Al{YHTZjc6b!!4lLN!1P(+kHF;8c#nbz<2JUE}JRx8P_2DTmAyr3K{zaPV8XuI1~ z&QS$%Q5X`#)IYdO?OB7re8>!5ybq}wwz195B>0js*+m2hHw}1lVdFVxxB+9d7?!7T zt!%j4wlS=F;ut|t;iVye&d#3`a74-M?C_j^8%rv&D3uBBKBinYQ9f^k&XlfR#>K{K ze0GgUeHvdLrw_8_&sc$1I8dpC1>UnPQzPmleqhxgrsm}sCG{4K)j9#$cYQw4qlVV< z2v7nW_1iEZ-~%QJMoM>CF8V3IF%E`cN7FEz)Garq8hz?I&T-%-duG8a3WMqIUvbqb z*#tT$rIs@3awseY?PaL6tfe$GBt-X07%Cnr2;OCiP;{()38GP)fqA**Y%B&w0)99= zaef_??7s;vZEd_r=!HEwhK8;C+HgtJoDVuWD+Q(H`}ed2ye*MXRc&x(%u`Tw6Sqmu z*``km=(%l`Yb){&)O`Ts;jqMsBcK+){4eCHL8Kh;(KeuWBuxP-z^3J$BD&kv{;{6L zmeP*Z&X9FzYwpPxr0wObv|zes#aXCB&aO~R-J%|lmY2iI!W9j(>Q#`%JI%Wxp)56T zx1h!|Z*dz=@h>IFuJeqB z>=KTs#lo&?R+g5s2l`6sIKO2q151`Z1wXNM0}?W$_6P`U1dynd4$i?1Fn^*}Jo!ch+;C8NyHv_!gptE>{G-F?qlg0Ae=pDfE z_Qf;9SJdQ$+dFRscmOjXlV(i<()k4fbU`a``k|8ys_YNguf+CfBc`Da-h# z2zL=(F?Nsql{$)}u`rqLN(JAV00>5tm`LdgK~pGXm4_tbtX@wiezg6b;YbWL3DVR_ z%|8ejs#6w%wOWdzm+`Xw@=ZL{5|rH~N?iom0R%Jp;?1>NY{tL(ChGk2;|!Fe=AI|T za)rn-*hh7n`H?s$AHq8HhHW%*e;hM;sE4G%%Cc2ZV#Zma zC+C9EV9y5`ol-;a+AKE$TZVeYb@0~^GW7i7cCM3=PT`8hLut=ysA#u5Wh(uMd>}3h zS=OJ^XA~W$oE^p>hs{e9eN5k^5J5`=2AUr&!2jl!N8;Jfcg`7xb$3?He6jnDB zv1v-0&)=Jwh~gAEdJEHm2k6VE1#AeMW1XLK#5?mH!UJ+qQMY zE_g*J@MD%{5mPy?yTg8w*AG7CGbBB#IT&wRR6dfs%7V4*(}tUhl1UV~1}6fqtS(Bu zXVRBvN@W%xUK}}}eFPbh8-rwXWh(8yTG*`6@Be5zS9vg0Vub7K9hhz$8a7n{eNg{2 z{i~SO=f7oEwPosM21iyvwJ~c*{nNi|0;{$+4mH#OHt-Ax7%cUHhqqS%K%-Mdu*evb z4a{+!HR?>Mn+@j8bhy5Gvw2CM)h|_2YbY|x$53#YzyyA_D#YIWH2FuN2jdN_4fpcT zk1V>klB|KD+*=Mi0~DX-Wp9+RVN0-RJK#OR3Gzlq5G*tNX|;4;2isl;d`K&Uih~KR zl^WNZbT@bwX^oDVJWZOuc|b~rLjFf`CgcNfXJ9l4@_Cngplf#)xD@7wyxHjYdvjCa;;_`?O^n~K(1+%2SvnS+&N9WKzOylKXe01O=-S*X)#iLsCp48P?556L zh(?{qQlBRJKwg__$@nLg2;Xp;CRx?TLkj2J526(dT3DavYm0Zqiz_*%DizDiaHG!7qc8F%SSbIZB23TGh*hOCdCqmYpi07E4qiVuL z7%zFZ_k;h5X|q}^x9zbPaE2q17D@3R2*K(|jQxSxv_GF-J^|+tS?(R|yR=;H4enj4 zCCQ$E1ydF}+%bb|Ui)x%D0quxf$gC{7QaM)0!|YJg9@5}W+u6^*4iKEpr z++-)z_M=zfg zEE48V4iV8k+*@;*Ee&d8%S&8j!%g8h*9(ktc5{q$Gh+2NVQr{#)(ZNz12NfPKJComVrJa-f73_e z3Xkk|fik^y__eqN`@DtSF3;)ibvbR`K5THkG@Y&88P>>jOc^#Ad2sdFm-+JkopLwT z{NwaVsad<&mHwTDJd#zOj<-}cRTGi6rOw9q!8DKhV@SB8i{GQJa?U~?;yp~swo~p| zr)+O^c@d|t>Z?%~a87mof{XU*`LNSQhQdmn31yPJ^+RfB$F??6sEONpDiwdZhWvAO z-&tM@ua?Y)-CZEGfYYh6yeg-6ORG^lcw+{NO!T(=)is8Z%M%NFxeSY&`i+xCYO)9h z6a+^JF_Grzez|)FM=|{nMVWV@-R$PNI|>>Ej{~M4pt9!%ven#f^&Iwb7|+W%xDHMA zxFhIvH=`r_{xHbgA*J~8A$ZfM@Q1;mOShh z9CfEcxKFp8B)q&RdAE)1EkfZIM%tBE?ENA@;;qQDQWv~u(S)bjC>Z6%0i(Iu`)Rq0 zG5lI2jS^V4{2b|CU}b)~hY?i!!2RB%1x_1|0df?gD#?}Hqn-q$xUBz(NNj$OAxckB zt5XS4Pvj+zPkLlQ_aR6tj3K%=u*o3ug2HL<+8C3gb5)Dy*Z~nll^?sK#lncm*l5r1 z+}m*eh7PU>0PHvK?crVEw}zXR>?T8-Q>S*LdR`#MOuw1MS!q+zUWo%yy?S&TMp@^` z>H>45rq4y!1uA#tPxqYU>q^6r25&owayR@8f&d18sQvqCj(-gA+$5Qr;h}(w#X%)&(=(?&95SR#pa(>DtEc)KGmf=P! zU>~}}v$j8Q4^#Z+Vpnsuz?&*eyi4yR__Nfjx@oBP?J1A+{v$ew?OGc5SPEw97tS#1 zw>>ogJFtrGFB%u_=C5U*EPn;T8hw2e%;J7ZVB`X;+ST+#Y{4E>LdjPBnHm^hijOZi zllES2z?>9!H1~Gwk}|7EzT@e^mq3vM5trzR*`OenAeW?bKtm|I9zN`>o)Pm1!nMS_ z-gCY|&LI4*^OT2^H~NVErdu}@V)$}1ajHTOj)~vK&sB2l?e16q4=KO9d!ob$&T>#8=T} zh2IRL__;Vr^2kRLR21W~Wn?JZu8~wFMmXuj=eyB()1ZbzGZcKCfsf_TCF+mTQ_Q&{ zOp4eS4`nAAH6RIF8W#bFt`BW$?On&037uN7^84niz`^R+vZDkwI~ z&Ai_k;|k~X-;eKlJH3J{P)F#UJ#}p=BwAig7-Bl2^8s_IG8i}!=)^bntoF=ZS=R>6jCkJj?YA>_jr zRn=(?>@!ntz5b@=u36fCFRiiE@%nux!~5V^1C))ZdF%luF42{2zO4EIV(6L44nk#m zF~{m$+qf^u_OSS_Vw-1nxn)4CFXc<#0`QynCCUV=7T!Thf6W zWJ=^Wk>$Pu>suX#mc}!n-SxfGdrfJTZDU1VQu~li(`;NP8cWGtODdJp+ELL~vC|Ez z(h00LYN5LJfD7>?!O9{oZ{=UvsHEUCya3bJ@mN798$dj@Li#SNn5+M{7VR_b00`nk3A|Eq=Nna(=f4D#Mr!^H<|m^Z~P0zGe~(1 zR_w_PAx5IwFf_?3Ex-ja+$ZzhE%!?L9=D9{c<;#+h*MQLQi)NH9&UdXOxdhZ<+mbH ztCKfvwFW9gkzf2?Td6%u%vd6_3Ue@V#1twb&JGxoaRP)ib>0y40lN3-DR{azPT!U($m zW?0`GATzwCMiW8i9U1>155Og*r*UxSPQ63d3&Itl=cOS2!27lc>UO~+$QuQRtJkpf zr|tet$RQtMTnyM09|cNX5bSWqAkmiUg(aF+p9F1wHp9aCLh#qjV(F;1BCxgF8zPGFFSi@3mXeh17{1n|2=dzqpoFl z#fs$nR)@h3N=X*!eF%zX)9EYjMEhbJ4?uQ-hjLLNDAi1(jHHs-825SQ7K)znNWycz zbV|a=f#d6Rz$TO{K>ApyE34Jh{$*gPNY1pwy@8R@<%U#3njbNzZ*q$+fTa`wm z<>-D`9hjwB7py$753{y|idly`S5V_bD{$1A6>9Q6H#2;%N2@|$nNDnnPZ&^;g%#_| z5398xIMON)4O??qUMOErZg2KOaugFVjypr$0p^loPaJdVX$+V)4;>s#DU81QimPN z5=-ZSI~dCv+c1jevcRO?RKS z@x6t?-U-!qpob;#j%X}kd(821o$!R`A5!nHJk(^UU~HZS{a3NNqj~|yyuBJqWI!2$ zPK?Gx0X5Tt$x&aK5d$vlZi{ot7u1+U>D=(yWTM$6&77I?V~IS#d`JeVRVMbyGC4nV zK%YPa)S}beg~}r{5mV4o--$%hSnhOM!jB~~*#C;)pjzk@Dj6rzKC z1S}n>2LlF-Lyw=7!%!O6m$pce9M$L@IwzGew6jK$hJiq-#G~jMfK+IO)feQ6XGMmq z6AvK2q)n8_1B$aoUpf9w#Hbsvo_ZuZMHE~bQO-~}kz}nRz9I+}LZk?=mXd=>L7GH% z%LOx3q)AiEO10H{lR%AX_5R}N48yT!gbqdID&uEt*qk#6^Axepy$02kKPHmk4D%{B zdk@OI>iB&Z&53BzhNcoI0Sk7aA7}n2aQz<)^-|z6j@5AJJ&-~7Iq<%Ys6d1dLxo2V-z$Y=nxh+JYV0%WV4G|KnggC~ zE3F;x`a`HBeTcSP`hwgU750!CG)^_dGf;&W6^`b@v7drtdy9DNVP^&nOfXFV9RD@# zj_8^WevOELVCkG0ouy<2QT~WXD+KG6X^8_*$le&b%dH?gNVfq)>ClA=E)gf?;}|@g zp6kG|Z3*Jxxr7b~<4i2(p`yB?RXmmi1(}m&8TLv%u-FoVj7eSQ)%uu3lamyzQT{@O8E5qQ^ z&80ujYQe#)v7%P|zPBS)!!UPk`m`JE`EU$l%&aQlP~zSaEdU~iD}~0b@Ocw{rc=vQ zwQIzaaE1f(?z*gYXF4*Uh^PU~#W)~s6lso~4=7<&Pk2TlV4+^rT6|l6(=ukFyj#c> zm;LODA6kr>alq(7L>}1QEQliy)l|)ZUD*Ias|}s>lE}2ea^*OON?lq{4J=^ZfSfvG zJ!Is;i85(#K56ZxUBeE-X*=<~Q58k&Z80VfOjg23ppzX0}l0geI`}vD|5N4bEcAr<8{)N99C;j z=_l8})UF~u*pEIlu+tfXsf+Z2^BV|C_@w{&F>KAJF#C#}|LLPI4~W6tq5_FgtmAYJm<1)fDd7e;2OAAr|%colBQ zFIQP;^+yy1BeAj7N}R3kUk0XyMadxt;p91JA&rahEn+SLZlN(^F-tWfOK1_A)}X3( zy@`H{Np?Mp6v$is&LGY@GflT%>?0e|?hunFn=yVNQ^0~2%#VxL{FX9belu8Paqb@1 zJI>Zo)B{^aXdc*ze?-bu>bgK0do87O^h?DC9-;u3ZVTD0uorS+PIS+tihRFSF3A|G ze)pW1(`uhmx1-Qpd-@BJY2y1%{piO8eYY0cwuZZK=6YQf zl#gUn?#3HjGq;Fer?eRtuk=PKpOzb2%W{sY326re2QEprRx9BCtIfe@3HstK*sfI zC^h)>wgBT|Q`{xEjkE~n`J$1W64OFUV5$lLfc$?@KlY9$#ui4-c8+@gmxKQ&fmVFJ|MBaw=XZap{vJzeNE(mI zn%I~+^2MQle%5O~_Tc&b3bKlc&D`#K&UPxoiLqit zix$k8v3&5+M6yq-ER^Wtd~tBXpA4VmORKUpyV-d^6ts7U57P?OO(NL45bg@SSA?sC zwMiB1&AmVgP*sWh7z93Nj!^dont9vUz-cU0u+GgskL~D^Jous&jif*?k^oj-_Z1N1DSIZ&BJccLgov?>$$Ru@R`U76Ov-HE7*WqlWw z31ErSsxH%cQ#;Jk)Ipm$0I(~{#qjJ>TwD&ttFz0C%L(B4a&h(h51+5Zm$Ofw&fZ^L zytxz~K8f=W?_XbBU3_>CPjAK9`~MPuxOo41A`)N~RFm9qN~9l%Op%RAj6Azc5{H0g zAqlEB$s%2*3uu?>^i^@2lsPmKQ?Et#Yu32rPUKKIRLQ1U|AMx5r)gAZptJKk=CNDMKrI940saC=RUAFy-ks@ zzVDkd&1-m@FOb;5K)qfRS(ebNO27JZ1>e>0byVNz--UrFf%;;tpXO;XPs%!27jcpy zm!eAO-#$#SsexgCA%-CwfA(SP;#-n4SpKYGtMp#~tbyZ2xy*_?0$vWFca~YSs>BqY z0{V&X-o88gVc2RM!Rt3~FW$ciQ9$$6NDZSQ(itv_ ze3`C79I?ZRcwgknM1MyuB0m#X<)~g1C5Ypu$Wi~`ctME)2|cPy7`#wDNegi>5JQU! zMu?Klq@pqeBBF9}lh)9O(A~jsJm?{Lcw4}5?V{5EBdQ6y$)fT>5?$X!HC1Yf6L-jG zMRSpFsav<9XzEQ~K`x;&1ypjCguq#8t8dd zIdtL&8uB}Pa7cvj#SS@4W&fd$z=D7x5O0E2Fpec`Z3i5@gTE?l|BU*@Hve*v&E>XnkdUa5>T~7J+eCP5`cM8WTxi0 z2w`+4Ww?kcZ248$*m}R*B-C>-9tcPv5Osck9go$xN?W#

(<#2{;z-9&K=Q{Enpj*A6JON_m9^5RF{IXNhw48|JjtY!mJfV2uI(X5jkZ4~IZ z#Z8lct0v+Gbnv=a+z5#b`_$4GeM^9<`Yr*KvcSIm0c|cHY>)hNcZcN78q_iwKgjON zOzcp+lAQme`sFg5v9OEtQd7ehNkFf(qnOj^01`?5CXR3eI;%pq;ZS40A4_rzf>-3! zHqZ(n6!kVvDvdXf>cvf1rGLRbe)$5>UXcXTdx7s=Ew>G$)?^OOANv$(Pfc9MbyNlu zdQ^{wSrU;67M9Q#P|NRsgNPTaS!>5 zcqUznLqZnDUIFtEs@+nhmN2l0*UbjHxIohdnhsST2Pa0JKbj9tO|}ZKJ9B8R*Jg

37aP>&L z&0x)x(xeWdxiDuGo4u-CNBM&wDzWZ0DjHadB(S~_INrgQrHLTR_E9)6QlO9)>KDh3 zuBkm)$d3@1i!xHMO7evIC6$BJbV*LAx!}vmTpB=V7woGNqrAci)y`7KojOs?M?h(W z>nbcGExH9fgObyf#(^L^6aJsWVvf(M^JEblR0ew_!{LtsF+!o#YsK7K}n=a-M4pPzsH42Z~hp9sXKE&Lv%2k?eDScF}6 z=Xq8%F(Fs=xK^Gtyp7$V)|AKwL}(e*pn-9R6Xl{zBAO>_j(tLlWU(qY3s`!mn*gR| zT$TpKG63>UfBfNJjv2O6OX(gh`wKM5Z`tg_2KgUl*W#D+Hv&PJL2cWN6IvlBxD=+w zizW4+p+m1Y!0@!DR^H$0|X){TZLhYP8l_Qb%!A zNBgyIwID5tc(yd~qpFEIL{ImoiAPC}eu`KV^#4pFVFZJ=GHW5HNmsO*eVdV5Ln!h2 z)4RR&7EWp)6R^rmDn1<#Kjc6@Nol$iU)5;)iUNSPGzhw<)KEGIiy9X@?fcuLoELyL zD^@FLpBczzX@N5jnEV15shv}Ia{fZ+Ql_me5kuhY^g@HQC@cE39YcQb> zO75Xc5#5poT@F(wv`wzA&~9p^4ZcT<(5Di<2+`{n=E(}m-6s7l!zQU(?xXc41LlD$ zB&S?etkp6)E=c@taB?*Ic(>7zH(3@|*dwrlUM|D>2Kwfv$YL@cjyW+FR2&JKY;tG? zzQyidp?@1L+AtSByH(Qt2JShMokHAVe1@OA&^& z)D1^fk}W3#&tmvBs=_pGzgA0w5Z9Znued;xtBTgAqD)tMI>xe7sjNIGqCeDhcf&x- zc0i;RIRk-*px34(iqso`ADSA-rh&O9f3Od{arm7-TQJ*7fUyr2z;7#n`L8Un+hhQB zUsyoiZUEzt7RYHjKsnF0=Rw1&j#rE+le#H$i6aclWP{(wwt#xWN?_wbnB>oA@LO{8 zR6MDM;)xhJi+NH_ILlDuN30vhV}@elvfSV@9lRSUFYn>DU3DsEPpWG|Jru*>&tP(o zyslMcVJhh8rBg+a#_W9+Ph!H|QSvcZc@Mid`IpCFbff`LmKLS+Y3#^^(Q+5)#d!pP9r{-XpjnmCif#GjC)$gl$nmmPG>{32T`$6eAx$jgy)h5b&KKJM)dJTOZ(g_&~l zpIvom`L0x=ePIEO&h+(Vs}D2`f9Hd{9b!tjubE=h9fV3z|Mov)FPZQ7z|^$uE7Z~U z>Hm@@AK*X&LnRI?v^_#?8H^JrT`eoz_`$czQS|$_{_K z21=o6%T%sOs+(w&U<-+BxXGPJS`si4Bn=J$9Tno2;kDLf(11JKUjl(&B%_5IUeL0I zn1arQ|DjWbe*d4EKa~6-r-!190Rz4{zJGE2;;rV1QpCu2u9(TNK{||82n@T1XY3h} z&hi00E4Dcj9r+YBOUVI*kh7s>^YB_%Y~E=z#=Mvf)d(A2D_F-Wn(4QTo2bN5dD$m@A$&FMBtEtz#3w5N+ayEgA3i^v9E!s+^d`AyfKRt+a;J*XE(f;P zEp4%;eVw@G&&g%825gS9QMu7;<>*6YcevMkDx^A-PwZ<(`hBAvaO^V(}i3vJLLi#mG{Cv3g#klLcfB}I8n zWt&ZFX2;hKR7XF_r`ga^CLE_#8yZx-CvoX6q8z$Y^z4rOO2f+`Nu)O}_1+Kepn!ZsyB>^h1t8ylxbje~8c<*`bxB_uxy_O) zDQ_|Ai9-Ufnkds!YLF-rZqMZ?&a&S<>!E-f2Z|wV{2ju+4tnX+3|J?QAV))h~|K+-EcIWifBF zm;0^d?Y45arL2+eRC>KCZ%>7{Q{6pE+3l_B_EL0vsJUJx*P-K3*}xdR#TZ%r7Ntvj zPIcjGq9=ab<#7Y=_Ly32(3&jFqr@ELxd$#4pt{VpqJ39HaRvI6)NGf6{YpOLR~XXt zwbDLd?svqn75O(%if-h(c{RtzatMK8ijf$GIZCg{R=FNVWLRKqBW-88I|Ku*-Dx4l zkc`F8Eq0yS!O1J*kF(=bztpv?1HKz~ruLL7RZH*S8-jSBqAUx$R5vjyRg%%o8orrv z%EvyhiR&-7E@L}Z&3r3kB?-(=U!l!8yzABd_>fqQPi<92K+kcbC@_F}r;8@tVYdeo+C`7?P|IAdyOFkE`f`BW(?J zBwp$oza+HECs&SMzE^U^jGO;%;Is{V^CGeXO@9HVaFpq@R2c4yXhE-6SsKfjlRUjv!)tYq_$!PIk#homB!p7YxXXgxwO({)3IY6$%>l(%wK&(i zS4`PTF);@6F}(yUF-zP}KXu8nmVL)1)s}aT$c|uD=;sa*v17_$ZUZS8(~`Ef zQK)Ek5XGoUks?}h8?FXQ7A$Ra*dbI*^Y(q^gaiVTx=rD%pP|SoQMs0Og=-SbS~Xl- z=S!=bWFWTJeO4$YyMhCD{k4c0drfm|_FLDnDc$ZTcK~&?e{1KTd~fI5Js!SAZtXn1 zL{W+bItd_j<8)Cu<O!aG)bkFh$~M|4W(HG&#cx=5K&m}wr9 zt2S_`R6F%EFg2c%`Z2^an*KW}izBE{O+B>#+cn{7W$xkub?(l=%qO9NT0@W1*YR=d z;ESpX80}Lr^-Y03XsmKCa7IKI{NLnU_#Sf?yUN|B0xW1JnA1~aPQYVQ>F9ZRjvN0E zFCr5;A(7ot$R?O4X^l(P35F&uXg{Q+O#zIw*XKoKK&N&6%UKBZm#@$q5Dc&E(0HE0 zWOghqo?+&r&s{r^*su;`SNuF>G~uCY;r+Et>Yt{XL}$Cut>4+XCVrc*yiiLDj(fZ& z&ZjP~n=zl-cI-3pVq29ROgUmadB)K96W8KWFN%Ii-XX#l%vYH{-1Z9imKO=k0a`?u=_NxUaXP(7~Fr zsJ}0!k2=k^1%=*^Y0|l;Ge(5&dXGA}rLwd7;gk4O1!sbN`XDP*KhXh_-UoPM3;tWH zd6T0b??sPmK(t7jc{CA8)uTvqYv3g9Aka)zZK}YUcRIj;Zb#`i#!hehR)MgOtAovn zyDMS9ZY_`18M68|ORWQ56(8=XW#=3#YwFo(e)RTr58ZuX&~60u4kVpX&yKTaI|8p~ zp>OrB^d5of&fd9Q6-kdgu*{8sX0wgas?*N!Wk+vLd*hg08^lhGmNL5QO|Y=PP=2D+3=b6gh2lzH*Pw|30pN_LK-~FHjftCZDIC3MoJV6 zxZTQlHU(Qx?&@^9BDwET@`YNZAAt5AZ=?6uayptlZ`@xpXNZ6X#a!rQ2pwR;H%rR+1fWF6NTGIJmdDssyW8w$GV>eTe zEM>TX3QNCN4Z-GH6G70K!$p)2q$?3mPRm#mJacQCGYt3vDdfq)KxzS<&EQmgK^{Ed zxo}L)@*l-bE-u&TfSea){_hI>T+gw{fsBHFeeluAkN^6+qmzIA;pI3`rOuR&!7rw6 z%Gv_mmFDP6?P7%LB7K5y$T35UeqY>SL=z#CJaRdMizKyLwkU`OTzLQ%F&2pDvQSKv zydr!m_G(d^17uh(FvP36C({2bS4|?y?xKfE=us0Ox2ja9by^l#R@_ktWiR{5BaVR# zGDlBcZ16CmOwv(+Eu2wun><}qogMNY5lC}VB@(bK5D-RC+*Ax{7On7zojO(y!j{E; zKM@{7<%LQKf>}@|d|+Ud!%YFz4XMBoL)arW0;*A%Bg$Lv8%U>Np4_RmlyM$*D>*VGY$Q6C!?#{3(uNi%2=hc7omk&4)U6k? zhF<17GbK+)x&p_30Sk(unEG6>OFFe_KQag>nqnTe)YH_lo(^_7Cl^iW__&Rwqg4Aj zmM5ZB7h~5S;x39pc8Hb(DSC6fn>bn5vHT$P$aGAWZBdasMcsoHzWL8|lGxa4X)hl^J1yl61NU?f&B z{mR@P>oof&%hMLnStkkx@anV=Edx3~?Ws<}1cB9p?}0mJP#?+?hf>+nUR!-9c?E-t zf@$Ss`-z3}Yp*DMzLpfobmHmYrY6q>TiiFoQWK(~eU2?lCo0uBrwp%*{1uey~^ zAJV_h2i}qF^r6+#u9ii$?-f+)n$nXbA*JLQu?&W8uoOG-$&Yw$J<}<077wAJ;Y|TE z#Co34R!C*^Jk?AVhLZt6B|DTcsxnp3)TEFMUIB^!NyJ)l0CK0CbUp9c22495zqTyw zwl3<_67yk`^sVW-+db`J2N@M;G>12794EPMZLmt}Q19S`a#1#FL5K2;AL9@e!`!W5*$G)Atw@j z8AkEGQ*QvL++OQn9^1be>&B<7yIeM5 ze`m!i)oh2V!HyYqkTdbRiBenU)do|#%;`|9k1NjuE6Vk&h0B z#H3ZnOx3NeSyv^CW|RU9a#>_Ch`b-$>uL)v zsUtM2Rynl;UEy15Q6m>Oan;8v8d2j~+KM?Il)ho7j8P?EN;2J=k+W^DxfBpe zzq*!IbjjoA=9C&X_%2O#mK2wvoG1n2Mf3h`wDszvr{YQrhWL{f4Af0rIo(RH12Np3 zJHjeu`Z!_I6A{}1D$wezztee$kArClR-r~3*3=zM<;^heH*>s;Z)dqBoG-&` zw-VJrr>op~$E$G6>gy_Orztlll?}C{e6PCfh>`KkARUY6c$I@GK}K1DA-MdGgFsD! z>RsYwG_bY!#@{3CJR9xERI!m-XM}+ipfjUP=a_&r;O5jNK)51L1=gU8{_z?Dx1+6}L#YieN9QTa>n`vjQ&NUD-~}^YPQ0*B9qkCY=n+gbQ;x zql6oGr_2E43>k7E1}I0SIh>VOfMUX{V1D~Y3jlBDl>MIkFZXB<8Le)#m2S%?BJ-ot zTtZ$})wfik+!(H!4Q=GvSTp>lt$L*tS*zIEP%m86N>F8vvMWKJN{Y`8jz=yoi%nQ2 z>j=GhWy`6Si?155U8boN5Evax`5?O1OgLflgPBZrUAcumdYj%S@w+5f3#!o!*2OiZ z7pwpIhq0DGwnphLWSh99IIUg2t2|%QJQl%k$Pqtb4r2AW)G@C~G2Y6e9#r0hw+|w< z)(+zYbCQIZ^rXZrUiihs!Vg_-YsskP?GNsn3pf~;jPO8_pxirB20zlol^#aFYL+M z(+%=h^}?k5ldE*AH|v1RGNt&b61FH%OS>YZN=Ih#;~+yIM=_|cCa;mY zyhdZ^rKBer&EX#mNCGxYPmcA`0||pymOZdkt9J4unsBiTjggmGxN}Yxt>>{GFQFuQz;?mi_|lhysF35Nz|pUgEF0Bj}!JWc3W zDHF#UL3b$1Hm|uX=vW`J~Dv27jYVD;9eWbN+J;3ctcCXMx<};)5R5)Nbd+W2Fey{5giwaM===Dr(7+zkD=wY7K15q`CjYTnPf-WScUV{Wu>^~D{n7Nx-+bP5=wmeUG#jc`%C=jvF^?!U z-Zv5=N1}$(zmAP}O>0i~hcd=oU$3$A#tK?qutC{Bw*$-22ANNNC-~qo`5U;*>v$k5 zef`4Y3XL4{8{)Ia19e36KzvJ*4XXLYHy!<=LMPN-b0PD=%k{)F-1#opM*FdHG-6tL z0y8Nq5O)@pD;Z=W3*5@ShYAQdbh;T2e(3NGn}M^!Yb9j_$XW)`WNdd^W~#US&vD2A z1NmCrKIb9JJCGUa;i$0Hk3dA-?6l2z^u2EO>?UcJ&CX$a3RVWFu zdN)#o+jVp>eaW31d*I;80UoHez|u$+ZTPhk?w!UG6z>j)jX}CIm9p9__I8Xr-xnZ=sx~+q}szXYlctX zkAqV}*+p~VU~x`iJ&LBk2R2G-=pb+dIMHW5VogwYo;;F!KXESXENUG07QvCj#%JQh zt;tCs;n;;cD{IfH+C=kA&z9xX&Ri+n;Xiffjt+3vs3~W12dLr2)coqlNZMiArC?Ys z*L~GErJ1rZ6<_OB$i6y#FDCC~u*=+T73{Z(bNJZ*XnETD#QEs!?k+S8(mblYl1{r# z#Tpn)Z6I6Ax8z}}7N7U0Vsy`Y6vo26?IX6>Eb&-Fm8ggvdL6#U8UF8R`h)TBp`?^E zDy(;xL65t1pl>{Px%69;=`Nf+UK^zJ}mhrf%!w5XkJYvz)9I&x}sTSUxhAI{Ln=k4zOfp~2X7umxm>hKp? zsq;kNR10>-GU%#9|1^2^hvMJ>#gt_P-wJM^8DZy`)0drr+KLHxy*H!I+FS+@Fg9ky zc3&myJ#f|^j4m1Fr(kg@U|*ODCKh3jE5)&?Ek-&h><(i-=b9PUyz`-F7TLAfk|EV; zg^79C51#@i&CoUTcv{S#c<8*SMZ0aTV1sSPTw%tPo-5QsUM{)fG)C8HnrBTcPafb! zgb%chT;$qZmN$-K98R`CxU_!Mvl+ z>7KuMmC&iS%3H1Gg=jAnX_-%x%)4aDVAAijgxoTX$MDV zlD#6NXM)IgwwJ7YA!|CScM>1V((aM8KBDPg?GKYtF`*9Q&_OYrYQ;0h73d`wC6t~+ zM>5vr6O~u;Zg(PDKz+-DCO2T|urve`AI(!x>JJ1+jhUs_9vC;FEI#(ugJ(9Dfi~~B za1t+cm&(#T;zf{L##FEFrC~g$tx2URGbbaBwvO5HwR1lPSN{j*PD)T%=~DU*th)}@ zhxV9RArIYHOYCQRwWW5nnfo*KAsgxSIqH%+N7j$TXuFKIM}hBM?qGtY$BrCE}hkT;D(R(#63Is&(t&~G0-O> z%~5o+o6OPBFL(kUX9v>tF@eg=`tb5wf|vYq=Q+$8GKLeg3EZJMsqqrF5zC%#C>rxL zaNP%?W0Mf>QYH?_i#zevu;Hst`c+xySRu27InBZwd7_Pa93;sOm4+WJjZnu4ZeonY%=8456j4ewE@B2xJWLWW0a zGp?$E7*S!m0ri!ewU_jjk`WwM2KyI>6ll7VJ>DRFO?g_o43&)=9#+F(H$Fos#|LeR zGTh*Pkc$j?ya;<%$_2an7c37L#9-s2|FSCNne+{A1RhZ`LkTKbDI@v&5Z~_xgL%a-WfL9}vfCMJb zDnuHXaV4`wRw~o#b_+q991G9fU==u*a9H*1c`jL{l!a51CG&7vAbROt7=dN=DoQRJ zO=yGKZ(AQ7|x#%S8un!lR4c;Q{ojS52nNbY! z6fa4jRIPTDnv|k4&i56J=*5-(I&$W?sq;jP?ePF~=BKcUDLOL=+N+NYi?MgjeuM?L zmmu+FBfrAE?*J4tgodBxF$TJW68cf5GNB8SK+x%aB#e~on)=6%)S1_VRyskCaLUZg zT1O|&p_F8g5nl78B^Xx>&giz33{)a^Q&>Ormes5tt9U_k5t~9m_uQ#8x#r#XaeMeq zZgNl(v|lkWOyKv*SL+Q} z-_q@or`*+`pf1R@jx~`n>S_p;WU({npVNJ(YK`n5oEZ#KcMcS9`ZB||hmcIV$Z?vv zqC3kctGw%016*LWil%u>l%4A?x^Gh8^F#~hwG!H{@+w!scZw=qm?lI2Hfq0DV;=U) z7d><*-Br7Z`>?ztBPwS&|IFF#quTmXYadnlYbd z9YUbo_3Z?S13%7PBA01^TCpZw0VZl;_lt@gk$y~l)SIjYdAW|hm6-yhFfP+%#*q7G z)`U_gOefxM+5PO~deYtu;{*zJ{TRy%FH=rC^^UFTPJ8y3?Cuh;!rc7g;+v&-bttj(@(z%tU#AXAMT2ety; z5h}C2t>@WPP!<*KR5TJ{Dz!*Xt^3j3*h5gyEkf3*1PHxJ*-rEs^e;F?m7`*g^Qi7j zdRaN?H|ey-TN8ax*N(F@_95k%2*VLxrry3JkURo-%{rD-=QZ_D`mFC@X9vC6lLf(p zW-M^=nBxxbs4ePbz(ajggLH=~cdRTs69|ZDSF<_-5Pc*py+Ah(je=cB(1k%}9@qy3 z<@NV)I7tb%BjMTpjhot+Zm%PI4)1K^m}fwZTL{}nc($Q)j0Cf5!Y?M&$<=KjV_#l& zr`So0FzOfhwK=#RI!Aw;!DRwpr~Zhj^d}tzRk`ati#E6fbDdmhSH&eA1SgR` z&_hsMKuy$(Cfl}ot^B;2m8~O`!!z76!UI`k0?-Xt64zJc$XAQvt5~D+ZJyB9 zd@!_hxHRah90KJbU7Sl!4IjFWbF#eH7Y;38q+qoZxsLs6@maF)&}O;6 z!68n4`!4bfb36h$&K?p6Y~|oxr}i_KT>7ZB4a--eY{1?_vBPu9ze>4Ty^CeUli2vi zaU8sQ^XW>ZZnFUzL(D5ohxv%xiN2_&jcX{Lm{Oyz22J*9&v+|KewwoP`~pi@+PvJ! z3VHvJgK+dD9-EF?kxo@t(6wq{JfTuhBe})8o7zq0SgWjgS`agogaDaJSs9VZC$z&w z8jGzRD@<-iD@<4y zql7BJ3_DtCI4@yN-pCKiR+g&_-{{c^IeF36`LY%SA~&%dmiEB6$2A6OGed!Fe~bMG(1#s|!_dmMEoi=-~nA51p#yuH-d4=QQ_G*zk7@KV|ibaS$^eSG-|&=aTsyi0KX zo*nBIy*htnmBHw*`nH>Dcsk<+uYcpgXZn^yMa&o!6dnbm>)rL5vcnhkQHl@oLuIGe z*IM`(l_kjUM@C~uosM!AFcOkSup@IQNse<@i7x9LOT7+2=hP~g#DuYLN5Idqvf_;V zR#=xgNb5$w?YJw$ZPm0d%?GD^$}*|xl!Gdwlb^G7ho3w6-IKOXJ^D|Dny@ptY$kHg z+-@n~7ecDc{}OMYq9lPZ^kgP5IqSYJ>I)@6aWAK2mr>2f>5O-zdPkH0070d001HY003iQX>w&PV{dk8WG`ZAZ(?O~ zbT4vsZeM0$V{~t6WG--d)qVYU+c=Wo@A@ln%FDBq$cmlx?Dcw^eJ^p69`9t5OzceF zp3aYiP6Yjj+H_e;SVe4Itc?8?{RARZ%u- zQ>OqTtJN}tmecR^bls?|0AyF|JWGmss_wGphMLhp01fpY8bo>3Bv3km@@sfn%5o}c z2x0uYX`1!PvuAgAchiKBotD-0vz!ao&)%J#zIlKC<`593DxZryt!q`KzizS$x_Wi5 zk~QEnPp$yVJh@Y4rIPC^g>OxX_}*1n1Js?Ux?DDQNtFVWMOHUecC~3dVj1k9&$0lJ zD=F0Q<+(aLAF5X`&(F>$0OQNq#UDR>zEEFYe){zC{l(dvbM@hqI{one_1VSQhxhRG zR=s@xAL>tM?_W<;3Z#N&((mgE@dqR`BxAZjnw_VqhrqIAgz9xV&z9L7`lSc`y1Y%R z0=lEtX|>8~9LyTfTL6?iTV+ks(3@7Lri0;d_-Rw9S8qODV3*GSvU&Mh9Z!$3wtfO4 zPodagu&m0JisR*`*;Hv9Bd%rDsN||HbD&Ag&wa%f*=>f1^?hGgS<%4TVvfiT2Kx29 z%=469)#laTYWS{yuaf4*{4Q;Y3a~F%woJK5bD)kbw{Czae%ErNG-_T7F4 zYL(S8FYi=Rt7Zwr$dl_@Meut{KL>-i?_U0L4sYmjN>78qqetqK;mUPVC9AZ7$;P#( zuiv~qd;cazrYf#S17!T+Ra!OiyeyX4b&PXtI8pD*BAxJ8ob~*XsKB#uURJ<9>#{(3 zf&7gl3>%?% z_(%9s*-f5Q_l$0HlQh(zLrdKuRW-I{(c3QLUT&K8rim9>)g}&q17@qB3nesp2NNt6 z{R3Gt|F$lH*J_Ydd9JQf5(Y5B)GQ3x2bz>SMr%mu?m+^cL_AtJgRvR_n5s9^>*<7_ z4{u@0O|i`Op2MInvU$_N7(pw#AkG@o&?3Ec&7i1h(z@BVVL%zU(vr?^`wTv2i6pHW zZR$;u=P=?p*^?e_9JdGhsw{Ke=Aw*YzNJ+>PipM#MYS<4ez{3$jDXjvh{7T?$XS8I z-=yE0sXD(YH~9hw7^@OpK)NKT3-v4@EJ#wSw@I#LESKO2+FZ99PNrxK4kK z^R&2bZUD&_KmH)N5k6b8tC+Y?y=xQ^+Q49)4(BPGPO97kaE(L&G zd>`sVFROb;|9|Nsf3aDCSS51g9y#)p<3+ffjbQu1opL$w$T_w%@Ca$x;D8DxqmEJA=^ zE|OI*MBoe_oj6^W^P3t8F+J);T%#>`Nz+AfRU8R0zo5z9=9rtn+FWhcFr+1#7SLg6 z^k{ml`SzoI?X}^6gGERjdVFa zI_f42H1$WA)nG@%#05MNLZ`yr(kfUS6&tBBbQ9!$x!$A6RZ`q5!VlYBp#TBXK85vy z!r2VAA&K)m-z7+3EAY(^ zVbi*vq8=()FJ++5&XzPxx_x8lgKcn^vL;Kw;$5wA;iHmGQ^H)y=47!gQndaFIyMMm z>Tmc=hMX~*(k59XO|rXB*K-nd2%)!IA9V{ve509d#}3m17x4>&$o~UfgGmWO$1Xm+ z&Roz!@oi4h2}`NZpWf{hKd6OZ^kE^F)=qN{KNNs8@nN=9U-bm~ibIVCEiiH*n~)C( zhUWXN=Jjn_U6lYbFR!nmS9V@%n;w+~*xL#SXZ$pGtZ>lJk{bgEX@!IYL$k&yVr@!X z&7-iw`pIG zuqzt=_hhxsfm|TX$S+lwE4@Bll*D8=sLQNC-LBV*O`gX!(jS(l%Vpf$z)0Mb`GWNO zkwe23RELT*L;=|2TO8VJls3-7M|?CKBZvtA;WC)#NnNY+UpL9(4R>Z#ULjMDPoTNs z5a^D>f~#&V{+{8o&-9##UT5^vu1g3eI+&R916IIq%gWIU=?qHDQD)MmHp-eb1P$}y z@strlaEo-Q;+WQy@u*JoWlPEbn$&T&XusB0dW;La)>mAiuz}zDR94xw(W6*5>RhlB z{;|QkyA1SfM~SQ^PZ-b;S}f z!dI@a-(@K3zipQm9Fvo*j3PPL=Eix zaHxMih2Kog6ZNf>Nt)fX7p%$UekHj#H#{$H}C%J`X2Y5G7SwDd7hU!Gk9@m!y zdZ>oezkzK$3WiqK1+m{TN+%j213CDr9xn)XN6m*ol_TuKlwTeK(c=aHSvip2r?JNq zR?3~B`{XSM#|_BCJ~0RDUQ{tlT?~J-I^S3FY>EsGw|IQ?HYv=W~x- zqSr9yG0o6bvhFDX^ud(?>ZvZtN1i3Xx!nMN0aN5#TiT`9=%4AV_Fo|hv4|9emeMel z?QbD)B)L)I$Y)<1I=X9-=w7&jR%V9svNZ;#jC>as_Xot*@LvnTXa)#{qWK+u#!<50 z@j-}bH&!U4-P8XiNj|`d1cu5YuF>|0jb$)mFcKIC4}p#b@yqbi$TH}_9d6eE z!_U*vTu(3P*<3{+bK!sJ0ioajtC0_td?@IltQG(P-yD5^e)RmUp@|m6NOvii$*@5@ ztW^jEyM$*P8Q{+99-SesI}DCoeTJmut-+GBp(FF~(ll(}88gPdm<{y|8(!+No>eq6 zZ|65jg|qk$`WBbVC29xp8SLrX*drl)wf!VIwm2k2YYMhWj>JEBb}%_m2V)pb@~i-z zZnN}GSEG#u?5|(jLCyLGb3asJ7%U4}fbB}MfF#;9S;L&c@fQU`h_b@$@c0td`g|9T z%qFHt$AFlobC|Qj0D`5_@pvGp_iN&jZ-y41;}x@HAiH`t%odh2f>nbqVBU#hEdh!O z%aPZYn%=h#i`)Ai3R9mZJn}|To(%jkq4Kifj?mw>?A2&7 zUB?WWd6@Vu25h;rwt5;MdDt4X9e>9@0*@4VsglJ)1FnU8l8CGw&}N9MbdJYW(Bp%C zHz*dsGaHBG>7xy{)0giLE`U)`P^hdqsKHYBmZC#%Lpz9fX;v-N=OVKT)j_M9%@Xu& zX}yDg+klZ#f6LZ{p;lf~zg7U_ec9M!3xLilP0Svm*`C3z4&ZO-*S+3 z8xUZiy4ZV0j7z zsDsD#fx4}y^kdkwfz(6nNf%kh0SGicvSK4yt*t9tGyCv(+sqBVc8Dro!RpGI$0FbN z=Ye2_7xr2a=mPZ*gY3a~?nnh!mS-8f<C9zkxREN4^ilkZjC`JVJRzJ7MW#(Scn0%ZaorJY6xefSk*ZRhdpce`B4!&WL z;8T+4ao4hg2$gBh=q8Qy^cl=sElI=JZ_5hSdULgvu#AE*qHP64%hh(0t3?WfMds#` zkBli`p^Y6;`%P;6+GkSx1~Km^Xu5TpfSoN_dD_vE{?^eB>XL`I)f&2)f#DZ5mJ>8= z3V4|PM6BMyBe)py$0+DgKL(){aEY)qC|_xHfbC#CQGL`V#-s%W)VV@BR$%lQt$^-1 zsjK`ZCLKg>!S5jOw$;s!7CJY7_ySb^yxia!P@EOB(|6~W6E!=3^YN0{fPPU0 zlo*Uw3?&gu=#MYxfg)`Qb*NsL7Qduq0@FIj*tRvBlejbHALw%jI)Zb8Q-go07HN{3 zQ}%c;4Z(t3uk&odTd>6f&nd5g=CI(JXnP!r(Gfd7TBo7uo!!kEtL=E+s7j3y3*RBw@%qDh?c0@11IL%Z4(@$NzYWa1PgV-po|@ebJ46Zv%?_jlRvBUh>&I(H)v4wMhZ`NnjWyANO%YZ|z#y65 z6-oLTvW#YxOSUU~lVH|r;nJ;4TEoPL34`HtOflKDbGGZRh0HkUn%{ERyN(^3_BiOj)WcG5W6i+SctYaGA}?_IpJ`PdLVH^E(B?>&!ZX6$ zhXdl=*TKS%ZVI)gp2V-?3%XN;4!VuxloSq!~esJ#BM7xusb@>1m;QB;F5KM`}T8k7FOgj!!xAj zyfdG6a(*$^LMXpNf#wy#vqv2gFA$i*j-|yjXg>Pf<(|imbr`$q=Sbj$;dUzcYnjwP zMFvIBm+rUkZR-%d?N?c;#RW&w4))o=eF_~_h>kU%+IH+y^?X~C9Yi@4GyoIf%vmgQ*pumtc=_Rh%?I@mYJ0tadOlsykosuVDmpj{&mdTE$hmRL0 z`kWG2qxamH{)z66?E4hs4B)?&){+wVcn@5BW1)r4%)^J!sUL)n58t7)6*a4n+W3IY z?h)M<-3HL_gi~Pqg2%G^!xtKFcLSoZY~kVZpo@%d>r_`BZf#)?-Dx)j!@ZT?v}4o4 zxbNJbf8=i~h|WWPp8IB7^+4YZ%?8XKXE42OBDcn2|2mM$KkO$OvXV0DJgMhN!>ISj zo-g#G{w0Xs{Ah1{wB$#$^(T3Ds&Cv*65i%YV3PNFcUk%-Z)80TMEdMBjrSW>6T?rt- zb;JQsSk)j-yO%^vv=DiGrwn%1G$5*DB0-deBR2OO+%*eP`hYafBv4F z78Dn}m$EkHg9#`be~Mp_$^@3oOZ^c6`^<@K!uj=A}W{_WD>!Y>4->xKi=a3rPupMRZ0o4>CmJ zv3m-Lo>xsD)}F_agzqQ5u|SkSHaV#eCA48bh54NNLW$Lk1(6t$9qOO6WC**Yc^U1& zbQG(53@)tQlzc)pbxK>CCAAslNfSel{#q_cwD25<_o%T_`0QRFlpmQfE;j%d;>gZ< z?^DWa{qC3f43#=oR6P&PV*U!2zb(Tww}VQWxP@y z{jE}tAeS#Tk#DvNCKvhC7~HAnS4@yg-ka*3$G}6R&CNnAPD#F?Fda}mo4mpl<8{L; zUnO?AKX!k&?@Vc~-7%FkUI=O$c?uo5;m4i4rgv=ICNV@+3f0F9tx*c6!UTTH_i@8V zcBG*XQP?Coa-6%Ko_W~S9LpMG5u6KIKZ9aYbD^GbYY=tR50oM5FbX9idGv6^t~`lo z#cpHwc_k&a7sEZ*U3X}!=&&l7A4ibr_HhQhLQvly|6s%PmPc)Ea1{^S`{%;v)lB*` zt^HjOCo7boU`j^I7RE(eZ0Zj5LbPrc{X@0QtOVUuz{BDaL80zH#R`p2E;}i2a4%G@N$cclu))tSh$9D$Ws7x7{1c3 zOWFB?kKVV|o#C%Ngo=6*_RSZ0-!fz(Shhs%$l3UmQ(hYL2V#2t7f*Ojfi9tHoC{c=^g0!w8LVG4ARjb@4tn$jE zNjL2UNQ0NZcwZQBxPX|hybO8|ti7YHIm0@fOSmH(YQI2+jV*VkwIs!X*V>5);6j1S zFduz1V}WxEiI7qT!em=bisJKf$`*+>gudNkJ5{)cq)J>64QlH6!LjT4cNW>n5L57N zg#m1uj3U=|v#B8tN)?x(h6f1tjo@x$l{9DUL8SkteVL}pT`{+cET9Bn#%lp zkxe$EN4o2qd~&CuR@E>P9KaF~?=z;K*DnbgZtX-+luUaW2GbI%RGAEA!Usw!8!59r&d+Iv%%obw z(pr`x>Rq0^N@6PJ6I;%B4K&7(Vcx5zql?Ne#%S)2FLDQaT)Etw_d{?IX358w&4m-! z-WL+K`O#y~K^=A>oBk+!(5g*8u#^=GpUi3aHfhq0BcKQ{OqT#Fq|~A;TLKQ`9v=kU z^ja)L5z=PZq+DJdT$3ARuHGq>#Ul8#X=_`5GlqbgH32?j#rCAf$a8fD`QN3$CJ=J* zF_j&5!0fa+(JLKu;)-zWL&0$kVh;FXGsvr{Lo@5kJ?A;~a^8YbAocRpMbPgR3G94$ znGrTe?~~5a{YmPQx9l0u{tjp>ply;r0pw}1N(BJx_6+MEVg{g#{eY{hk}y@XjFNXG zJbyjH(R)=v6tbx45LxwIYhjW*Y|Uwp?DtZ`1*+Ea-aB8&fy#ER_YCz^nD3=WkIs5i zw%(>+6Mj|+&umqU>KHQQ;0G8W6w1HaN)d6bP}aWW+}7go+n@9Ynp?R!yTPaB_7YTl z;RmHD|#zR=C$K=)jk zaUx!+gK0{}jsOlni?!2xSdP4vbzwFCjF)~256b=>ZN49cqPblbmp=37igUpbYEE`w zTk1LL?dmYYcdP{6wK(xybQsHXyXyW6lHlQAj+U`^!V0&ek!~hNFte?f0nh0GwVyi- z4nICgP!ht4dDLF_$J3|Okh6n~3;cKUJw1M@jYYS}F2|qw)Zmd2s^`+ij7ss1@+L6d zX!1*<3x_e83AL$$0>Ld$WtHc?V}LfEm!%JA7~2fyi~*ceXQKm)9_g@&fSOM(Ioj7w zZuV+Ke_*c&>{Dyk2nB$eD?sxmF<%Ky_XDWV@cG@$_oJTyT&zOL4b6R8ff#cO<;e$U z3F%%urk2{s=*zB{yzTZM0+v`O_j3So4?eB}UV+Z&U@D!ph8iGx(346=QK|xRK6aS% z)Cm+9wae^~RoD@cWgDWQ-mJNJ`bCA6t7s{~WSZ`rA5zXusJrC1{c3XbE8ogXo@gi> z-0h8`hB(u|v|6Q7#8JNH2GR~mg>)xTIvvh_>LiRD(LnKUzt|US@A+>uKpk3ikIZ#ftkvc z&|3f;FB6~2xUaPCA%r~Ao~DLYHC8e$1D!USGPh2)WBH%4Rb1R*(J~^*qqxE}w8XDv z_{3u2B~k+s>U7^}kCxE-23Y;9lFeQwUwWcjl6v_qJ4BpkNu_qzhQuPv%j9BXimtA{ zh@@x5+bow2+xMlw&?UMft+%utaj)U;FOk^ZNyN~OyY1LSjRJLyJ%zIOE4ZGx-J-A` zTv>mHZ{UO!DKY%JZPU3!KiskGzM9!@SG`Tox1M^Ok|JBh3A+TR9i~cdu}E5(tQ!y- zy4Pu~9SHY}h7^aQd2#uc@*-q4J0A~Rdu2+v@gN5XE_m3Ir@2TCA5|u3!9Ou)vB-fg zW4l2_nDj*t-Yo*qPT&Z9+g-pR+24Esn?-JDHJS0h|2C}WZxIp8a7zsb4z~HeiJvw? zF+1v!alypYE>x}TBtW>0Ij^k-Kbyyk!!PEEO_VW3du!ThX%V=GMbe0{tf)!0e)QtG zv>zY^LK~Qg8{j94;iIDO5j^fa`E}9Kfo?)=4GsXnX2|J@)B_6F4E(>POs!RGe9_bL zI1SSMpum0z!oqubpX>2?bTdSR(kCE?)XNh?TqTE@`M zEM}s7n)dd9yEQKJ+>#~-*2aBE@$iPnPc`VQ@rvUMwj3<3M!yxWh&Sy9EEiy}RVhC# z7jipO3YjU6?M2x&X=|0CY^s7ytrRPtp#p_LS#h9v7O63lHXc+^%(yPgIX4tKX#qEw z#+_0U!qy}Ni5A9%F=NMtg|X)#ZflTC0>ppE%PeCT9Y@KYi0x)CEpV&Yd$MbddI<$` zYVT`d@7@Xf*FS51i>@!tZ@u{7|7YlI43$WLt2Yd z4}f6nLXakV(&B_6z~ByuxN(*+qVxj8)! zY!&n{)LWZ<$upBJ48u~aU+U3QQ+P_nUFji_J*;i*WaMIFI6B$P#MAZIJSe~M*Rj_G z3{0inusHZ}X<&4VHOC#Dabh@`K-NoG| z6l}BmhQs{iE$|Z`91okdoP2T2B8Qe*&5Q6G)T8aRG40HEZqa&M4Aw^TUf+~uv>C2$ zv|o-i;?E{vb2FAq&-96i{2uQhK9L=Wr;`XCSs_@VKFDRzie~c$!02F4OSfegv5{|g z?_IVelNDUQ+f(W>QMq#pOg=Pddna}TO3#TMn4v_^Yn<#~#_8&>#rT}&TozE=E~#(+ z)RA3kU3X`YmRUY@hPnp>Ew6gG(wAv46ZWWts?oTw-0fjQ2BCm9&Z|1vFZT3DONCsn zqOBE*04Kq*nQ%1?iRnFGZy-pL-YpUC{8Jz=F2twgvp1i*=>#-UiNz#(RPXraD6321 zKge&L`|DA@S90FnLC~QuCSHs%?qs>N;Rx0V&p=JQI?8?cA`BB{ zr*MDIP|hUI*^@*}X^6;w-Z%=A@ZqEW)Zh;s=rw2!9YY8H8Yks6R@@04DlSI9#*0fT zZsC23xsPu{wupj#^0F)&`(k9DB+_9{qOgRzBRi8b2+a)qePE-6#$3)&U{_Sbr-4+_ zIV*TJ@Ooy=YxSJtzKMJGCxY!r=EkCt$YchY2X7;%ao<{n@sJ?KU`}n*N`+mUw#$e} z4){rdMPJv$WW{3NFb!4-OF0;=T*w8~{N$b#MYXkBPj_!y%XydVo`Q8I4Hh>1RL=e* z>xw>61c#v*$~TlZe+(z1!N3eC@b5YJR*9{|?KwjQWF>ot$Qu>rw}iuve>()UBVSUs zi!$S^;T|^=nAr~JWq_=M%rf^Brh>iNr{?cK51=%TiEt+gE@y&hRdxwm88!a2A1Jez zpJ8tt&oLV4O@d%uRVi~x#3boL=ES`2l#wJ4UPLUh`j+lIQ4OSLfs6iB7K2XsAI^Uv zc4NVgM_0@)zR(O@+dDzxmYpu&YlmDPUUL(t7zA<^4WhRck^oCa|1?foh*j$p=tYAR}&C|B6a55P{2M5J5E107bAgTnF|m{#x%7u=oC zeA06(Fnad6=lNja_ojpl);Z^z&!wj~^MrDMl?R$T3%RE&ExIqYAaxMhD$PWrQ+qzd zI_nw}yb3`dl$dSReRK7GyKhvwr$gY{t6s)C0(j34zqjD#yepf%9|_&Wq%|b%pETpR zLAz2{wcOl5N*~t{LLabSekO9mqSu=dQ_%c1W>;mJ;K*mbscv+Wf*8a7eVoq9mkJ82 zk_0{VAENm8hJTwPXUMGs?b(^!M;0X6cwB9gGw;O8mnUxtL?=u`AHxz59p_USBBs# z>>vBgWpc}^3roATF=il-lQSPEOR3zHeQ9oxFrQGU*+T9|xtkO>;KO>7IPiUjczu6x zIWStsUz&xEUX_Y(D)cQXR)N3>2QV?saBs%I**3yv1kR~Lx4Y$uMIr5Ckk=nWY?cEL zz$#06n&MGANZ8xjKzcfO*$b4-;!tDntX0zo zvw=EjtNpm(e_{CEA4M6&G?!KYa;Dw9bv z7P-}TRZOD5kr!*tDBMc$FklfxWsqMb8k*9++#GS>#8+NeYfkIHU2AV-+p2XR?$|q+-mV<37+gGlUdj;mh)Q|C4ES29b|%~x!>u`K*X_iiowhRC;#YaTUg59lR+yD z1JDRb58eu?Z4aySk0})PyTwrO7|bGb5Zd@YrDnK~MK9vKk4!TT;-i>_31wJo*tg zgIY<5i>ePTT8JzKT8qSP&28OlF=rcQH$Yl(#z@?TyedXv7VvP$ori0m(>T?>RpDH)qAY0F3QW& z+^;~J-S~AZaK2^Z@B8Vp*LeIH`h&j*$9&7B4m3zh-_4Zmak8G{UlE<<29HP?#u`KQ zEPQM7DHVVk1W=v>DdowVDijr2O;KkD+xWI_GDDHb9xciSuiFHQJNfVcwzNe0dTG|% zGNb4f&W?l%yClm4`J$4F<&H7!sHD^NLtJ4Pr!f+QW_(Qu#w{j5k2vOFlqR#>#|1U=egsO<+xU1|#;)bN9xnco(m3hQsT^ZtUYVJJkC7#}$x&UJfxtbY+1L z_B>|1e#Koc0$-u}JWvNvhf;0npR+&n_Ydg&pN2Mkn*tvJ5=RE}oN_T3zrSY;x}`T* zSKnteSFDion)F9ewA)D_3BF_A?~_uPoq~8q71E!n_;bT zyKE!QOV%STbV{&yBq!aJ8}Mxmb#^ak42J9bJ#&9hnLgj|sZVoc;6|a+i67?mA|`W8 z;+rAg8KH~QI#jzsoXEEFC<^Ix5}>BueU&OKdIo#+gk8BI{8L2@MRV z>yW}wfr~V)b8Km*J=5!Yi+!)y_mN9qjUck?7Tw{dw_+Kh1$5V?TMeVJ-YY^$4TfuW z!dI@l{#6ZkB2JX$o+)-4*d%SvxwN`pKe@*w{2N$J@?E3^{aWJU?U$J-HjLC+#b^e0 zk+8Q7=l~%)Aqu?z`NC!BQxx6(JftCn*h}+fT#TBM3V%Z4yOXts_}ex%L=d*FkQbeV z{c=e|kDV5H9c>3Y^)s_d{QXW)ZHJ%aJ(M>XqILHAo)w+A%l3v0+S$&{(;h0VbCm+r(2TQ zpaM9MEEI4PIW@1lvj{WI3?D0GSADy+{Gn>$qbs8L?H)LI0PdCpYWuTtTBvqQ_CSep z4@tKinq`-VAQxUCAUw@1`k*UER_GG~D%EBlwRp-dD7^Cxx1MY9G-MF~->2*jdZrKy z_mZt#-)^o&O7U)6GVDJDK!b@`DWNM}pNmV+N+!B;>7EN?BDEg@37RcD0EO7Nfy!(x z>KKNthe{Lm{)+m&@_*qICA4qM$>4&J?<7XxP)USm=Qhhg<*2s|;1zT{!28rf`uGAG zkkl9eAdR>;GwV!!Voy)Gp(U)pn~epJgpj}0`Cr!6Abw8&6~clA!@OKmLhH$#`W#}A zs^Tm<>N|}Cy)U!>n;DUWWqEZ?2Baz~a86wyu%*TJ@^fEx5aw9ITrfD05!48x``rEi zHuiH50a3etr-kDD|Nj?;oDJPf|6h6iXAI^yScwBvZ=y z+SO+%Cna|-)M-p3u^7&y5cu(pn$MR@yKXF>m|~sl->=Nagn%r+b^X6{0DmGy6N}%d zY%Y#(+KS8%+Sj(y&o4^Rz`Z|qqb9K_{V1YT$%f}MJfhC%w+9!h`0bDI z=ke9oW@dr_)tQ*zc1i?4K_(HVX%I_S!h*1YCPhePtWNQy3*g~GaxaygY388AkXYJf z^T2pDKo3a1$vIceVb+cd7;4PO;k{>aG&}ieI!AHzUsiGZVZZwLnkQrg&H$^T`9_)* zqNp_4?G&acgw@}!KF@Dg-vX)Pw7^QzE1Sqj`1t?n>LaYrOZ@%q>XV(Tn(BQi33hz< z#9d-Pbk(y;_4R6Oa)}Z#=f~BwRRsQD7U6m?dQ#)A`o4??+!E!KiRw;NE7AjEWO84L zMED7Jc531BesdNM?I9cw$Qvs*n-}Rkn3~-lv)ULoT&e|bFE2N{^G9S%<;{voXas-O zR8u^fNWZiSVs*`fYzMs~r$R~RsWPEFUv0mTBG8vjH0~eJ8@4$HhgBP$VT0f(OP5Hf zK+OL|5sigtDuP4D2V^yY2MnD39$Xiqu0Gm*3l|5g-^4x@cFDuXJwk20k=7S;1*A6# zl~|iZ_*d%tdMr<+6-6YpT|p1p#!o27!`nd@Sp(giVabtILHIO6)pQoUKH zjQgnW0wmS_{R+FsM=85BgY{Suzz@Y%u_v)IT^1XX-ebIXRwTn$hOFz@Y8u!VVy`sl zev488JL6LN2n)SL0tb(B5Id}}iH4$qzY+?*Xx$o;EbQ=~R7f@wz6soa7?hQBtuRe0A*zJ};*&gh zArLu)r{zf4hYw1Hgh0G|BCMM+=`A}UmQl=sVtCJ->bZwy4K{1DVPz}>H=w~_a&JIC zvh4XcR`u}VIM(7hVH#PLKvBSLOv8{q05ECtzj{t_w$ds@WXDK+L9O-d66ij>y}v>N zYyCo0Vb!>4bo!79A1hyIdxCtGsggN@41>1l+2YTfa8F-??gsrw*-np%~5Cujs|zcCQe zPf;rc%Z#7BOmbGp0t{pP5Ly-7wt?~lZ7^Yy1rkaie`fpNoH`xo&>eq`gzD&-SO8Yy z9X>wwtLc#|-CXOi`ow*Ze}PvZlBGlx;1xvJHvK^T;bi=uIhwcq!!q?ze11d>|XIOeuOYK8}E*}m%5c9M7d{l?D9+kk9>xhq&SVVlIX^aq)FbfKybN17@wf|k_pYH z$TUYKbw=UiEn5f#!x6y3pA=sNhS;;#;cV1TFqkA@h<@P1z@m{t;#^(3`*o7Ey|NNr z2DAyb&k6BJ%h2bQS@k^!eHR=4LOM5(4)*}#^AGHS$LGF8jBTfOv5HdPFL1dn&=h`GSdh|DIEmiT+i$)Y3LKrJDB z6u{d%T%lXg?1;v9P_&OBC<6eIK2H=~C8hJZF*F{DHC3EJtSk>-&~=Y-7PlD0*Nf&~ zH+RiUOkSu?`PJ+P*+1lvR`@AFxwly8@+ia=jO3h>`BV{GNCYdm*xiLs5rUvYlui^i zRu)5+I+cb?`eUslYPsNBQ}YOc+bu`wMKRHLi-Ky}$>fEy8P)^Gy^oLC{c+hhhF6}b zhT;y46|@Bv32Mm(zz4&sAPk%>^JM_)IxpS@||8s zl+Uhw1Yeib{!C%g+l?9CNkzV!Oy>6=+Z1XH1=TUwWLa&X-=Nms-l*}{1L>#Q5)aC3 zRf7i|SsuFV07MG}tlvPXA(ttE8Q(Z94R=30@bR^F{k95jRMqJ+Yam{<;~n(cNb<_k zMJvIxx7oP@u?az&)bK-y!Ar9blnUP~c@0YUu>E9Zgt%WE zLC6U1M_-vyQXBR+mCL{S$nqbGS%dt4IHCsmtwsG+C7T1E0xv=O5$lAf5VbRo9hfzM z?+h|O?;_a;MH48e3%4AJLA2MUBv;4Kr?hRIUl_>|MVNsSEFnA4d@?S ztR@(B%>D-SlhMS{{RdrZ+&F#%`aT!%*qPoLF_ZTwZIKAZnpAi`U_z5Ztc4euR-$UQ z6t&;;S9@%Il`-t>%QJ*TI03`yGAQ}y%WD#!RzAGQ({i~hnOmG&naF0`e|3RN8JbNi zyW=j%b|i2J6=7eJO9n)P67wDI5YEmKq(@f8()s~z9GoFA{0PaRd%6uwpb>+nLVpZF zLi~Qzr}3q}6BdOP$$n+Tl`roaHwP=rd=SR@3@9@uTSRyZRG`7?T?{kumgbHz!w{@^ zV3EQfLP}BV`jh3gL{A@EuGZTaOk;hW*e57#{mafgRTtA8I=cfT^?TeHTBICQE}~6jM#`fx42&~Uu2R$ zxv)NmK!%?Wg;iAUB6mHq&o`_g1UDwTl_}SWhM#+>jirW1?ixqa;@i;bwI*y`%WMGq zfA9g#f{0cekjw{AqveqhT>>;`9w^Z*xFRu^5;t>CeL$mL{n0v|9|i>8-L! z0MDvTZJrUPpJD7=0sIHsZ)|Nqt$KeP)`42A1Ofstl;)YCQpTmyGJN-(7YgkTrW`6< zk-3wHNt`C%vOKEqk<^}AX1$_eMhKHvf-r6-9{h4+;vc=&k6r%qYYf|{^fQV2E)ECM zBy4{Ts{I&jolBVJABHF2GeRPuz(UA8>=vwf>Rm@{Qsv_x2ErBt8qL0sG1&$R91>4s z&sgc|u4}G$eQc`PRW*yNP{OORukfI~H}J!UC`B0RQ%o;L?5CAy8P_>bbgn?wwCGQ@ zCXhbjn+Kyny#?z}zc6_dLB<%Y%+1f57+M6csLb!c%KW1>^UGYNP?Oc@XQ?wYQ#F8C zk_Yny+3iSyTFy9S^0-+fHaJJAHnr2Kao=N8*;7fRo!4;Fnv85|le7j*sqNxuWgfQgyVPoor(WHbKQ}Nj)OxN_T znRz{2-ZAW2Zn*%SsHw9bfY^}CcUK5g)e?0~6ZN$NN2FTLnU}}HJ7>;t_c{n`*iA>R zR%PCm(g+1&F0=&}nQZw*5yHPH;$r0&MN}9R$l@x7%UaRkGFA8nYh3@Lh^MT89^PLR zv6xJEc@FM6zW8w=vz;2aul|c7Tx_U3GJM6WcLs^iVBVaUu;v-SY^nFK3@sIU7cKxd z2Y>Zyv2=_kI8?cFpbJg^8M7APptFwpo9>iX#8&7eDgEl8*P3pXHmDYbwEqih`KRc- z!3bq-RMnw7`YN-dtK+}>tp891a0&)g4TKd@8UvpG9I$97l9hTl8}B#GI0za4FN#QI zB6l>Ol0S2X#I51!@7vRl5E;oFKOY@p0fBbvnOG39?LUDu6SWUG*>jj~=7n3P7?vwp zv9$0qZ(Gl2)nXxiwo{TU4__m>C>U^fzi1L7_tK`JGhmv3aM0Wbyg4Fw4Mu}>YwVa6 zc)~Gi^pm0D`Wp+ls#|SVW>+m3Ylt^8(Uuk2FylF&Rt0&@WLUK}^lst(h!nFZ^Desk zblBj~o0*t={g@7e2b3)s)bkx}gs>c6>}xcoEVS``?T?mR|XHgB3uV{;Prh zhM#x%(Oh;q)3?$8LytNcHNk@|#$*C}{WoB)IcEq7UCxFEmYLrGG--i>5taIIa z4n-zg638_zl!Yir3=$Xcp~>G+nqo~8Eseh-Vy|*>u;oF9Yq2;9QhV_WqJW5{7fAb9DXSXa=}5ojcSG|#X_ z|B1%6(C_Apg6TxQQ1g%uc<`Z)x?oM5ZIufe-Z*{v{gq*KbyS4TSkW~$&@e0G^!Un| zgtMyK$ct}_M~9sEmy9kBRX-T?{QO*lrjPNQ0CCOCZUxsogx^rTQCCfQz_ubE@_(Uv zy360!B6JiQ8cL%u&SJ7%O;hfU_KbyJ5wQU@T@}%8-dGJ@YFfsBWNV5Bt1g zbnkt~90mq`3!hdQEB->mM(NRP-|lYy76vz!cFG4qy|?}o5vVP|{}U0NZdcNd^)-`t zg;z{j-6nhCiI)nN2ty3y{C)q4hys;S$wjS4ZpZpj8UXZonFmuw%)cJ@LT4N~0lPN9 zMas)ynl@31zRcSTQ^KQcPdj$jHYGl*y>gP%J|#4sKyF_K z{##6*`Iq+(M+&EufvjG2T#NjSTZw+KcR7HcZ7W(yz<>CF(04V?@|}qmn|JLAJ=S#}0->nFOy%O->Qm^y2ew>`RB!o|GdR|9yX^{H-DIO8)G=n{GX!^<`(nW{4l_e{7A+i7z_16rkc?k9Rd8 zHC@a8PeeE{n9ibm?bkyWvh#Ohg!MrB*>!||S^C*@gaEJm z#qY#WYm_C)BXBiZ5laA0v*8qGO?Y z0BDCFg46cTMJSv$9K0@^7+}DT7YZ$1Lr!<>;C z9-94j5468`{!G#*alk}-{+P+t0hDm>z}WA|-Pi@kJOvCOej_XF`cM-ddk!#7G$T&M z5K;EF)Jl~H>pX}F{z(I{aD>@{@C!rof_&z1j28UNMI&w2LhqCgwh0uz|D&92J|ekt`dBh!XB1h43c$3%v37~+@4 z*8T}Epz?nrf-e5q7Z+jx@CzbaN)1H543oynTMcLv6wPz{MMl&)6`tha zimtaVM3WUzsY+_vlyH)0GKzZw4U2?-xQ!7~ew$Pg9lPL0S|QxQL`R8hG*04(iNK26iN) z2D1rOf%QSxDL(3d9-{U?4>6uT)N$d^PLmriwT~TifQ9^^Y$aiw*!SYn5h0vB6P5jt zJjp@JC=lenOwg>dyTXL*-0cF?rOf&3AzYkVHG#iJbmggrB|-t5Jn70TPyZtNPEEf8vx9~ zjC)1-8I~#(!Qn%(ejD|o)itR_q92T{k-(JC?=!{LGUUpmDZB%Mh3w%{3pZqrJ3pW*IG<)bEyY%of$Q_MwnC}g|w zxQwr1Q1+8$aph1NsKo;WIMxno4&kz+I5u`-!t)}N2IjWDsy9g*ms4?7J>Gl6pcf4P0Vx>?BGsvYFXvbhVdgxQpJKECcVp!AMt^8?(6jTs|Q> zkcPLsLh($qc(sqx^4vFDh^u$L4EOdVPwbx%)D`lSTJ!7g{Xd2OxN_ah5xP^DzAC+j zluraG{a3=ajbGe{OYcFm>Hf%Y>}obh!LAor*Bg`Zo=s^jili4GFN4`rXDq(eu~qZH zCm<>2kRU(#jZjJ*o;*z}$m+5MWwHkZ!)>6Uu423z!sm2eR!HT81pa*mTAa~7Z)=4u zzoSywjONlamttrx^fNwLDu#ti8>(I$O}*LF7!O8h*(-U%qJ6N~DaOQ*pxwXRePTo2 zX^{rT*{4$~{P0N0TVVes5%1qofB>7It&tr*Y~}u7d>AUk_DOs}t=7JbYY~K6W39?tQX4-~Mr9^{KghJLjq;^5$QGB;`4Qiw3Na(>r3Sf8ho#Py7Kgfufs@ zpV_%gwAEF~+cxY-tO?|c{mz&3C(!v2sDJKPxL!o%u7{>TAa7Oblo+K-wK8I=u8ESc zA=Ir_=^&}HLZnX@O#_&0V|MkmG~{p%GFzr9b2Omvk*4n)Znl+pMId^7*APd>(h=8c z=nGC`zY@TW0_ip%jWJ`gy@o(c1p->XsU=4BeSBq$L>olg3B8^vI6zJzE~#pEa{e)I zhn*-wKUiQ0dmU#0n;@ghu~S_Ngs@X6q+9ltxFLfh;$DJ^;lR#w$*VsU%!))ezn!Hk zT6}=_kB$5JcmHqrHFDsVQ%eL4`Y9SJ+y*l&pfrUI_s*&Ut&Lv@XkDAWeVxmMu*-yq z#aKK}_Ws$fyB{+}$Fv6QknA6dT+^#Z1SiM@VKd}4Jq)=Nou8hV}6TJY6S zJ)ZF!B^AgJF?xGf7%|Tht_|Qc?Ik%eR>uz946i`t$#&?|5ucBPSGZW>?Wk} zVF@S~YTaFs!)8!SBM`FH8|`MGTF`s$BiwDL-p0r-@)b{d*O*Adv6g)(Ai4p=Z=>GB zS+8%p0=j&^35KpDp_=`c)d-tu$d_$V1Y$aVC6k3bM)=u#gF1@4oLlgYqMAFyt-%?l zc0hMMIO+88ZG5xoP}s0YDxCJWQTlYj7v(=R|X*3&(0^JfFT za}2wBx$^fE1D`vSDY|soocDCS#y>}jYP{$ufmMd~8 zX(lhHmvqb{_{$n<4Ngy#o-#~Eb<8zlp|=iMd3?|mc)7*myMgJfdw%BpXEp>X8^6h< z`|^QyYToi)^|v5hi(5I4`Z~^@RpIxypa8mTh?j*dmkmHlSyPVg>A`g0ptyJvG z4kBc$Tp?aZ$+ja^2&h0C`hp>_u^NXIIeJUXv#Wm)%KAE6y%@@-YcR5__;jx$SVj@s^d#oSq3nbPjfI6Xt6kSbP02L8=-JLOJG8dwy5ixz z4`;ZO!U4all$xoc;XNs`rXSb4@p&p>L$9a4O`DQ_dwiOUph*VW?;?Z~bfblu=)%P) z7atx&?H@=mU5h%zc?lO|0|MTe0{P+7^wWtnftyhGU5%9=tC@R%K6c{=<&g(6u-*K= z=)DUPX0}=8)R`YyvcCE@%OvUg6b!@;#09`nXqBifSAYXD-)=#C(o&;J0+J|OC)&-w z#~h6lS{{`~vx8YZn^jGJD4`(q9{_LJ(jDp0_?+LioQwnzQ^<$+2|-|GF#bS?c^eeh zu{cLT6Lhd*dc_A9dGzYTP<8sCI6dd?h9fdcV||(WF#4UL^skp7Gu#-yOX_KomyY?v zIN4&ey)I7wW&I_=nA5o_soe7}?#uq7*zoUugFW_KdP)X#15trrw*X(;NWKvlws6RrtA$xXKfx+6&EZm+t?RjjlO~?cLXy@^r;-&D3zySO zV&;xQa>IWj*FyR4`v=CTlFe}ggqkz6~$ms!-CD#|JA!9zXs zRCbip`?AnsYo0NVuGm;q?IEMIt$pG!6;%M@-7c4e)0e%Nh8oEadFQ){Q&c($rqxg`>Srgm>bJ`@vxj zy=PYC=EfX+vcS0RuR43FV@h{}x5WLd#BYE~ItI-MX9vIvr)}j4a`&MIor>ik@Y(`q zDqqyeM+P%1rDy$nV8K}?!U@040G|`P>p857 z+?~*SmcAO1e)tkxvVb3Y08lQET$!732lgQs4@0&b^-+Q_?7QH(h?A8GvW{(VuVBI5 zAb;W!tr*Y)TMQ+ZK~-atK)KfGOtlwcgFQ7d0n&MIF5@RrE>=k!w@+ZDm_@Ji+N|6? zMdVL_kBc+8UQONaep~H761oE91(SGfo)#7Mz(Ri~i^BMwzw{D}FykTbe-P?DV6-q^ zot@EImMM$ghiwF?laI!AR1B9@7x8~1>-p3N&dyi&PaGNzif{*8`+Eug;0B9&G6!7#eL}KK}l7y_%~q3wvR}1tWh)zg|V%>NbZ5< zADRgWOP>2IX&bLwpq%5+9tbRb`3*tMZ{_(D_Kc8fa|zw6kMOP)559(rQJWtv{H2l& zct`9oDM6N>X8dFSj08Mf%Ub^y14uSZM#D1 zn-z{E+=-3)&2U$Ds%_SporgqMGN%Y$p+*Kqd)p<@U7CqjvwkN=n3rV~HU5Q>c}?#o z-*z&+VwR`h$NnTUF0lp+(scE95;$@fD3g`OiT{lCR@&J1RbKUcQq(en)XGk!$;#JQ z-{fCGeQ}KC75x%Oqg{8Sk_*nMoi*z`-%z{BG0gyNG{V>4F$BPb7bJo$;tw!>6T7#% z9LMiRq;YX+AsDYR?|I)kze3A8TCEL!e!|7VualUk*EXW7_;HvdcG0ZY1*Ut93uozS zV$l}Af@MG3x_#sxD@u=jCGBgd(Xu|2GCBW$=sKt1P=c)s$F@&w+qRPv+qP}nwrv|H zwr$(C{@lk~b^odAdD{=u(>2q(Yprjo3?h1StC=eVyIyn-HtGvGah@MHUuKmVdNdK- z$L_cQeV!Xeb%N7ql}z%Vq6DJ{X91TtKa9C696aL0Z^8DdGzpOJ%r;9Y=T)5P; z{Xy(ZgM2PbT2QLnqwpMSG#@Js(1zeO9Z;At%I}|s+wGPfCR8LeF-D6FFz>}W_JZ-f zl+692hDsid!U^)MiUjc2hVt+Qhmuln;LvT5wosu2Bt{w{g}kJ`9wuC&9`jps&mrZ; zf^HFijB2SD_Je(nBq0#X0A>b%#m*t+#XiWu7|u;!e%rggHmHZ-V89wT=*Yz6Z~nM{ zd|g|6YXqd1Cbw-=A|Haf^Grv*nfl_qntI{cQ7kJav1y z|G38C`4ocF!|CbaVgG&(lOwZwdx`$M|9EMh3n_XQxIV;+8j)2E82OC;59Eew?vg1%c`kfZrC!MkvLL1cOzo zY~hB0T_!@SzKmTE+(X&}hL!hd4c?bq&My+UfG5 zURqJ0HpvroxjuKC+P@BgVP&&hVJ!QjOn_8fTA^J}i-G6NiL*YfPBVwpkD!1-l`z8* z_PdDimF3?U$?a^kF$jABz8`O!SV8yEwtgtX>U@{n73prVzrp}*g28Da6}x!8h*`vApx5-iARjvYhkQ? z;VC)0&AtTGrXitI+f1WfB}c=zt~eryR1!o*CiN1AI%g_Big<=%WBpY@2O@dD5b^Wd zKp>+@FmJGUKrG)#Y>Oyr!e)dJ7D)1%#8pGU8{9&q1~z%TH+ z_SP0(084m0?*8JyyqmZa`MI!>WjwtV!My1G# z5fFM85H@nHRa-UA&uM=B1&Jp~VEx$Eig&2t}r zxn4Y;wSWe;4FcvF8%{J-BRk)_7$zdL4>l~DdNqNEJ5F&}=NNO)0!Aiu@L~Xzr{&X? zm;$if%g@>fTmu3by}l-RE(T6@Bubc5teR*rl+e3{sH!CVVbD*b#aHyBQZ&k(+4ZT# zbNtx*gcGHi*aZS|ijNI`^I*YknUs&+@&c(0`#^!tjA`NeUPjek^_Z8090oHGuXiJZ zKZpx2%+xKd<(G3QbBJ0b&tWEYlm37rWcAmk%DtF^tBGaAUT*UNomeZ`=<{F`T!oZ& z6MUmW_6m!lq7^cF1G02dBKH>pC6(=ht#}}nIB*76*J6>ts#LU;1cn^~c*82_AU}%> z0y$_IxUl5jtj!Yu6O^$c3P*8gsmjFj03Jh&7x&t zHEOyTf73${`UF-kIDPg&8%dcAIzOdn1?tU2Q+~%P5V@}zAjPFKKZ4>HCl*T^{J0Lg za;1#Lzg;F^)p*wk6;cvfs&h!PFC+D#1C4>qdt>9O@FlD72;dFX0_o2nSA*`=hYiI@ zX;8fl;sAj(1j>VrDgz@AO3^i{*WX1-t@U8}g|8hQ4t>e@^#&Ylb`dYrT$BExF^VLQ z6O1sa&opX{SCr_=wt!wt0R8dHN_?oNCr*r=V%+-+>FxsZwYdr$%&T)dSu6!QRj|zJ z!9Ibk|0o-8{U}}jGul<|J>ZVo^~6PF2t%Y79OUW56P#&`5FiS5StPB{0;`Q(D+|Y>Wen)| zwQ@z!Nf6uVUp;$Zp+LZ)+xn7=nM^IGUIqXpd;6EhA#q?haimWWjbW`+;5^$?Gm(fl z#v;W-Mj|=!S0Y@~AB82Hyfzv}TJmdRAZ!taRNb21D|bL)v0u5p**gf;$9NHY!Q8D;6g!Ps4P11z(N za%-*YhzA%;GFXaSP?6!ulbONURKjovdBe|4y8g z0S-AnEhE@-?tnNgxc7;>C)T zBu^D)vq+${l@MzX%~wnkPU4}>QTS69Tv>l{BciAZ}9CLf(gg z{!9j}$HZyi!*AT_b|O?mswso2q=QwhD5(s{StbV~M7)9b4Liid*O@7Yp8gQZB zVoec{nHV7IhrJn2wp>exCe+=iQ!*x0*!WA7ikY-d99F2PCY6mEE|C|iXelbDws|On z_nK^HXqSdJg4uO9HH@{uZceU?bzxuXR<9#Di(knV;=7b|U&L2#$!p+KswSti9Tq#c z;11W{Pq^!hBHc3;@8{|)Lbfozn0|P|(c?R8bp9O{R|;&sX5TSm9EItImEt|4G5agz zV|?1g!8%^`s?*)V`lm}$uohVxIL_W3fj+YTAPQI(#>$`6M}O>|~5852>= z!p4q8^62$=Hi5}?GRPl}y*j*yqzvKhSdX6)WI~B6FyW*v!vbr=7N}buM)RAS!(fbjKJ0_l z2ZOq`)d98TR}DSa>*IIn?)tN~jf)tFzH<;cLPkc$gDv*^ot6dlzVU&6|6U-=%!hw| z*4PCf0GDm;g~w~z%jNEB1^pfy_^^Hz``FmU!)e-$&4q>ZL47^6k4#P6LmR@*;X8Z#R@&g zn`(H)1rdo}jde{NBOW##Hl$f8Kb1LJsp#&8;6fSNknA;ZGhxwA<8Irm4q&bBbl z%?BIxSN-)OOeRC~JZn>lRp?(+*h6mN0$VDY{OoCb=rh2rSU6IN{M%u8s72wK<1|di zgRN+LK*UP$iiJZJBOQ1XSFZTiB_iILg>fozt*$UyCf@Yj>ex<^YHhsbTOUJHaQsW~ zpiT`OBsuXqf|k-(9u`B;)y2pr&gc!e;nUuaZ;7MY=bQyEyrFHUi$5r$3x7Iwxa#U& zG-9#|PzTIHEhLXzs*jO#S=iJzc&ja8TBmI&ny{2Rc9sQ_R+d;l^gA0{0y}sVEe8c` zS?b$(&Y;1oEz%^9DlGM$CTEb>pCG_ov~zpy{zV3;P_dvJBLd2wqUq!G1SwWChXVoj zwAIRK6)^HZ2;c!#sa~yE1Ccw-knJa~twFB3KDP&3=wTA=k|u}1P!VUKGsye_b}Uu6 z+MhG?!YVh$szuc(*1{?szd&75c>ylc>sIW{T-&SEGT-W($CXgZvd+YH(vt7FcG9Fd zt{ppGDS3UIxlzIC8Q8wy^~`-b%k$Nhi!U2;OIf=q&?g=xd?Hh2x7CDD6rJy?G?Y^3 z^K5U=q~N-^`{NBCcMXXslzfohoyhvd=!W%q5&`bzRjWCpe$ftrNo0Bfn#=I-RusLb zqFFeDMcFtY+2RoSz4g{I$@U*=oy-aBtK^BHnM?F^=KD2sXry(rPdS`+%7+z62nsut zt$Sym{p<&jaScwemtg*PQEByJGL&Ozx8roglr(9h0Hqy!S7tJE8Ey_*aG4B1i`RsF&``ozFw|_H80D%5;`@OJ#hoQc?gE5Vvt&6!4owb9#KAo0QF7)0J#6F)=1w;U)R~m+{*E{x|_!`5o^SL=Z=cBZB{3a{gyvH0*WAQl+e6U zkY~K;vZ8l%8wg{wx@}ean%0e*LR$G-*hkMt;+>_|dsloSxp5Nscy3{9Vc~Z$xt2`d zud5CT4BdUNVpHLaQ1T>+ER`84qOaA@G5F$Hv`FpzCdMBM zJsBq-!zLYXfN4e-y_RDB8$Bst&8;BbK?N6pL>Prh@=noOQ&Icfo>x-k36nHOOqn{D zjBmgUYXWFEH;g^rr{P(4bD?9x1bw=Z+typ^#8~MUK6I2ltJ?>%OzF(BE8QfhtDiMc z?i(hWG#A25v~<)Qi29n9NS^d+@2*T6Xk1|D**K@4SBsW8LoP<2E7&d&M_z0UlmPfi zJxz%B&Ag5=_Cj0 zv%s~;>1H5li)Z>ekN}-jo}C_;J)Ye3yuH8NU8MeeW7~c2&kYR?osQgm)A+o>{A&r* zfMmFsW2@DHZ^j>7DO{QH#jp?vsuJ7r`SP&CC^6*m`2K3lCly}BWbaw%4cTug(RVIt4TJ$NDcOBbjI6m0e}-En zLc%AZNB3pxkEVVDY9!w~lZr#jwwHS`{ADwTC|CBLR0N#gLvr|C*;=b7#b@An!1tWp zWPN2iGk0b|iV0^JL!eRsa1hV|z!ZRRd%r%-{T~xz;8&c|oumx`GwLjO z{Pw1DnyE#6aE&VpKfo$Ky%zt<3Lu-)3Bpc?DrR zr(Q&u(HQMYyHsPnm!;Jd0H}h3U-?g{jIJ*~6i(t-90(ji4J=2xOOMfC3sd-W%x41u z8eqjr1+K4#;hw^s8u0O`m%I)``iK0Snc@PQqDXBJ$_~T%C4hNuET}{M=vHFoLYF-*ea@%3jrBH7u5y3Dr!zafzz5!sF1WF^3A`>*ifL$k8t@6 z@0qmdiKTe(u!Q_3p#h;5OURbBt?Z-wTpN18fWM+~$C zWH5NjBK`*ara)R3Fpr}T?l}W^$+`N~B%S2f8)2B3cY|oGlE@%K9E0=xEe?hBK>>WL z^S1*LoEYpSjguFsu_W&aw6Z<2JkzYFH@@V6bOYUQv~qgY87bEw$qFP zE{8EK)CrmqTJ91mk;J*gE#FTmFgIjcSz#*y%`#qEi@9F}M>H@ung86d$F{D&!QC%BRhp?WDfPR8t)SE2 z8?=w64~V<;r^?uL5_1()I59}jiXWe5LOHjt`j<+qOs=V+N3KDE?{8m@Wr0xvX5+Aq{L832o00P^~?c5G}=!nx(v$B&K5*h_i$p z4*`J+X||`u zvoZSct#LIA3zHddh;c8Jwd04ZaC|VW6EKk7^Py!BI&B}`<4L1-qHR1h%<3Srd2OhP zxjNS=)zJ;d6crcazC*Y`@i@Akm;W&37CkVa-`Pc1q%UU#RJ4_vtT_@XB7VERKu-WtsVKO~|HTZgg5cu2Q-jXQ}1Kk>-W2kH8z0fJ*9!qep$E2%~2JE;_lD$O-QSf@p$)l|b2V(y@ z)z5i@flh=cQP1#+sPWtYN_SV`(#sSkNCT|&ox>W2j*O$AOe@EkSFCmG#9yDo>O_9u zjGd@JAI$o_UK-pPzY?QiHRV+s0Tm_Y%W8J-iK2+5r^>+5eAtZIEDVN}E`RpTYS7q# zqiIC60GbeA&@qS6F62+4hfDc@>Tn^0Qb$i~y>nSq*ff-1P5LL;JcxE-A`tT(1p^IAj4uv?#uuUx82niMBi97TR0 zU^?19cw4e`yLBxtM%Us(@fCAkr7A(arG97@cbR>xg~-5g3~0nEEVynLVyFQ~MQUJ4 zY7c5%BRuG3Xrazic~&fa+R7kF)$FinhHZ^~DFaVV6UJ`%sN{Op5+5~J@VVC1icZ`L zDi@5OihyrEYn^Hr%N}cF;$3eM+ky#y`l<~QvUo0gj(p4?Of}?&T4i~OJaIv;IS{k1 zG#(SJMx#Qe@EZ7jdLt5s5Tq0^0?}#clnjqQww~HWnpZ*xa($vk_i-7;Z5qqHUZ)ih=Rkbu{Yv9Q#OYY$nZr_H16<*RKuL z>`GiX7hpr5Djv@}HD4PtNrys7PM-lc0S6{*(*eQQ!N<5Tu=?0OX>QHubW9_L z=z>S~Y;CLYRqY2llC9CFc`7Q?hIfsXMp-%iNk`iaIMNmX5!tyv@OwN49zxGoP1-W;vMcoFUf7n1< z$ibZW!0+cS3b43Aaj*se)|{-QJ*{)Sjly?}?%f<gm0xQ!71%e=+(Gjnf)vgkE^qVn*WqQfZ0MOqZ-ChqCnfIe&Z&v@h~- z!L8FM&Pn@i9Y<4U#dohbRR-Q?+h$LKQIR9J^z&da!}=@#0yZiCQ}0>-DClaDKEgIY zv|$H?VSpD0$Dsv|tw?Ou^LZ2-L#USIm92{)lFvOk2O_Uv5Ux8US|I=u7{*Z{YzGER zN!tu)5o6iI+hG5#-EEH#8nsHac92SBdBP9c$uK51ZsS(g`b(~~rn@>v3?>La_Hm*= zKvKTfD+X84AY;lSt-lR|?zHlvxIvTp|7p&6#fi;Yxf+0t9xHp*ZTifMA7qcgt z=^Jer?u19;$eI?5K9@^nV9O=z#*iEyQ$%@7z}|L?MQ`<2_p-&dOK`L`ZJ_)YDblt& zr@8%TcBT~0C-U&dngCMDCC$CSx7Ur=7w#2nunsxSUT<$i;CdR*DWmvRP(7Blgo6qm ziS&_igZBgh=`epq!Qds?y`~`Fosx4Q60+95*ewzvAfr7|-NE!fay5F$wsEHdi$&-e z>~yU*n!0D=NQA^*d(UXjk1HdffG>z4k8AeTIc;qnkK{Yh8lIbv&7-q9BT&e;tB|dF zgz7%~j;R@;xPSk8@BwXtx%B4R>_R_rBlPcN$pDb>S?_vHMGlBD zz4>q9ZLLbiQ?s=w@pIKmK3RJT>*Yp2|l zJSAECXrMHBPNN{X;2*bwrs|)vzP*h?6SJq{F4h%pzp{B0fBy^+AM0;|*K9fPY%Ps` zn50q`yh=gY*R9wLg&*8z31zu4&P4XDI9tr*v8Zk*>*U1SaO;Yo#okb32cA195cjj~ zsW_sgi%po1HKtl730ltSwPR#SI=~raID`#q0A^8HYjdNr=a>e=CI{MVqjF5{e-A`H z0Vo8F<6cEu2~Q7>u9dXw@RoeUEg;N#oH$kj&2&}@7nxb zxfdXI@Okbf|BF(Q&Bz<@PTAkH=^PCVud#?RMQOm3-7FfKR@^|Mh z)Qyi~@T{(69-Gg7%G`HW>=O*L8o`HcIdK}XzM^P>nB2A-wFaZ2I%jFi)#o2>Y+Z64 z0S*~8)05O$e?aI?MG0?xM;r2olbXEbSI0@0 z+xh^3$0@b|RkkFG=v`{JhOi3jPnf5rOi=6dF3nDR~=dL#)+>eR#WSe8qq^n z2Wy|0s5pIdDlh2-7%@Qz9^s7IUD1 zca}WJV?;?On`-s5J-kNvYMHuel3Exu=FIJ${J{uKZy#W8XeZV&d|Ip(uz zB1(>(B*-lJdJq+anYx4}Wd2n_ZJs>47k9BpPT0sAz^-Elc*>ng(Y7YGuBCNWICD`I zfFk83ogui!S>-x*O}g6(1Hyq)UR_`j?>F|!Mnc_~Gv5=|EhnO&fa>Y%WoH^<`2q+` zkds0xHFPrab2CmKDA-(w4S>|BDBa~8?XQVjaY8c+7>m?ulX>8!3%)igd$D43Wdvi$ zniM@^O~bTzE0ZV=%kz!SQ5zm5oh@7P^f5Iz)olgpHt<2G1WUp2;*x1g9rRUHovJQ`H&NNDFYc?Rfw zh3QaU+Lc{-9``K|j%D7I zQPaxGBE-rF*cJ&s^nzEyCN!n}zU&bVPm-M)iEXe4LCmWtlkecfZNRoPh+oPK9&;6G zmYgowAupkY?D!dR^uQkX2P*(Lc{B|pN$LZm-36V4#{+4~U`(-< z&$CbAOLLjD9U_$ff`o}Gaj0)K`a=1MS?%W1FvD2#B^L5*tOmK8jRux(B7cFa-(VZ2{x-JO)1h)8lFgjWmCf&X%Ne$r=GimMfHrg!XOy=z6~qiGx9>nBBQH@KyNGD5 zY+%FQt8yaQawlVK+c=K|x4?rYcXn&QTuQ|KcvlIUxFL~IA*5wsYLkvBGD7zdf7?xb z^`&JKvgx^Rpbfw-4jY%gsj|Wde&t;X5FJS{b4}U)oX!9?19-n(7UH08gC#0IT>l&@ zM88~*9I;3qo3ncbxOeS$AOJ^ifatHBM^vxXk?zDr%+;1VUI&@Py=EAJUuWagD9Vhw z58=NrBWVm@lE(%UgJTep&Cn+lzurR#Vqjxp&4`1H880lJ$MC^M0~pF`L>|>~+fVA{ zZw>=Gm+CvS6f5-^n4@>mW0!mtj*?cyDweCn+J7Y9RV^bGj7Nz3T>0mCI^JCSRvK$- z#?nxgB3@YSI)JUmE>g9s(4dsw-s4!(yhTs=)v=^VCk7 zaPEj(BzcvmGr}sBW5l7$$Fa2~DLV9jmZ;x=epC`09Y}mcVNMN85No&AH70H+|mgz<-))_24 zPy9&39sQ@jF)G7ubCQQP5UV&hd)CoJ`ifZ80G*b~bphVlN)FWzhht?hOFqn^10o5< z=cumoIR)oyNruC{&;H|3Gcu*USPyre(Y0w3hvtFH ztR9%Aqv>&(L&Kxe(+slYcmFNY1{{)TLaiX12tK=uxOY)*jI6O^HNj0!{r

#FUtj~+hfCwV2*Y7GO@h>5pe4f7Xw@(+I zBh*dtE4-Tt6vV|a0icc>p4&F=V2P!X6mO-MQ%Tu1@8mjR#7K-aA|<;$gJ4?vdsRl` z?-a^>P&eYX6;T{?D?@ubJu~iGHITr5X6VkZ5gWCp_!$$nHMNDN_>s0YGAL`R!?#cZ z`1NJv7~y8Ap$Rp9@HSrYU4viiDNL7Ei|l331BBy~I=E;HJ_L#?9_baZmo4E-iJZOp zS?$(D41e`E=Glh4uR3n!qRa{fM*k)686c;&wLyw+a1kh8J0Dgsf(xbPz5C*K>ZDf4 zaD$~>xN{6-mpoy-v4~J@GZ(K>r>Fb2c^(*{U9vD{LcMeitEFz+>Q9iq1x9+`YPI`M z18I(!)u6H+J1^hc4*}@bU{eS$ZfUbGE|lYQ+b^+6A}`vQM-p{ndrx%W{tX;@c23B0 zRo*pXrGameZE>%6yd4XqxKjbMh_kpR&1egVrzxp%CGP~3Fw*_$Ru(@ysi`{IGn)rE zZCv!}w1MekxgJEEUTnogV*>R}=Rs+$=I|$yzl5NanL+v9V-jL8x!FnI-|<;F{^z|A zqQ+-FbF;(o#OL-pWcXO_^@E)L9mw(RYVrd90w>l7%!n}yu{{VJ$l@ko+x)%ia~u7l z|Dv6fiz7P{NGCdXW)N>Y1dGX1(kQ4`;2Z$>N(pY7yn$|YTh6g)>PM5UBchM)7`!?z z{xL2%Q^>B8G#GihDLjI%`PR1O`#3%lf!BRM!uvLwvU9PRm*90&4LPo-4zr_AR>b&{ z5t};LoBH@2qJAQA+QpW13|PHABvy*_u88L?=O8sQ8Q&he;2u3%JaeU(Jx5Z&ib3Sx z$j||Kyz#c`oP1JFjuJIfON7tM()7v1L8K<~Coq=dkdP;rmgoEvD{ZTi%64I%o0|%W zh`$79GSIakFg2F%sko%#D=15Wap=)ZF=Qs^X2Nf}(*$xd>5KPSW31Rjvo|1Qpo+U* zUDnHjlG|e-P6Oh6k6$$xKEJMpTSD8=hiF=_b)PrZ4JnQdAJ_u5292PFEC;Ozc|>Ke zR$xM{1r)`!-eIFFWV*TFl{bME{M^kNsLx@UKZ1vgdCrqRRUJHPt8Rhr}XBk}NEryUZSc7dA0U{ zPS@j!YnVriIl;B0w|$#S-?upZtF6&cM1vND6b_4)uvGXpjB=-_ zhHIHqAr{-y1zfj5#a>_9E`yoKDiY^n_GUkQX?3ej&C)^iar#xDc*5LYRnY+PzW(eZ`wUlURuVJXt+#$V{nTX ziKUkDR-M`-+P)RuQ4wM2Zb7<5v|`itb^^}+r)41Y-+Xb9?yeu6Ce8TsdK-6$&1(Z7846I+GE_tQk|t;3bt_<_Fd;7T?2 zMu*y^_*FIfBgK6Rz3Nl7uP;#$(v!S?I`qX9R)L3}6#gxttmuLSqA--4Db76)Tl7!n z&?9iid=`WGPgC;!cheGu-TgaI|A5lRs@NA?Q7&i=Fn>XqG?{6SsZ&%JK1H3xL9ds; zVTH=`f?Mg1Vl>znmz^FpC2}IXz1+V??Sl`nvTMX=iEio`<|qgK*Vp^>G2PTlg8LTJ zf%Dt+_Qt$HvgFG1XIfx4L{&)b={Qxp8zfaP2v2F*K_^tU^*A+Og=0{^;xkw|p_#BA zuMl)9J%YJh#6eUM3p^E9AHhAmGv!d=s!SBfft5XHkx0TV{rm6llw?>{hyk7tl&UZc zuPNB$m2FX-p@|zEFS-_Lvt)H=kk&ySZr(p%c>gVU08w(n8b<^Gn9v3QApT#12NP!- z!~eJvbPb%%t&EHv{^L&2(zHDot@pwGi3s^ba7a<1zQJe5;P=#%W<4=ZxU4bt2MFku zQ`*?iAT(}ZX(3zv@u_&%n93AhGfm{d;8`_j?6BE-a&~Sk8#8L4T{tsh0`rtDt@8-mUMrCek#`ZRDhi{P)pGNd?k`R?c6+v_*|I&RF|b$h-$q zXVKU>Gc}gFJiU$OYU7+B-DZMirg4AT(rX-=c6xfc`dkp@PRFSC%WBG2T?4JT?72Mb zf_W^FayzLxY|j!mi{aI3f)LplwYE`7xC3;s*SVP16}4w7=QuIEqw3Wo_x`md?WdaB z?G+buEty*pwPabKUt5m~#^>9_Te?gf zQ%sv7$^sTVzqP>i!&udcFC5|O1pCzuD4Nc@qB;AMW zu)>I?(Nd+{PZ*G~Fd``2keW3AUVOP^D$Oi3?L$h(Z&087!k!^@C}Q`?=x%!b9Zw`f zjLcNEZfuMBN>$z@*#DkmTuMB*Lp05QIV65ExYry+lo~h3C|eF&uEf`C-nybOB#kr# z+!^#7p^{_Cfg^hiH)ygoP*?teqwR)=OIQHc3d+$ok~+YB+H>C$vwZ{WFcm+2kFwH0 zf0*g=h>1*tk;8EV`je@xBkBZl@=Er&bRJjC47(_7x+C5yqVW^PA!?c;KSVGadEGbG zJU?R(i8(GKT&yO--UiAkgkqI|1HW4t6?2Az&LHi~(bzEze;M4NG#mq~Z>lb7z!G;fEMY66Zg>!A^I<`8qLj@zd`A z21g2jv5Yv8h&mYqL2SEct1#|t0&&OOUJTYl_n-*SCZe9gw0Uu3ON7*NL0bm`X2@|P!z zI$Yo7=yi*cLrW1L4C9z@8Y}9%00jhadM#kv*I-{}(9h)7TG>~g93Q_$|K28X3of)? z^-pe1K4Ne@nvgj?T+2TavZs-r@jt**c?$&Pu%;TK^WHQz;9V9^l{cDRR~ZwB=TR%I zYdBI7ER~5fTGbIA#(r=K0S!6VNcHL8 z3MRql^dyxt!iq#^#8&0%SI~t;(#A%r7=2;p+SLsTs5>n^ z28jZ87V}v;!Wq3|nVq%AB@uO2+B3$l878QLPg-9u;r5%yq*;?RAR+BR6kB>irZzq4 zn%|#UAF0ABpZG)E;-*QSfc?>UNvg!BE1&u^&}t;8B7Or}wQ?heI>SDOY2JjdC_-pOd!M*)}|7pgSI=jIvU> z&JajzYa&M8OoFbb_M^n3_6<$V5PBKo8+#)J*;3u+nKStCMjW_*l&@vqMAxx>D|p!mF-G zBuumnjRwf?Ur!G~PhNUREdO95A?wkvD>{mSr%#zg7nhpqZaaKHU1o_baF<_PvPvN{ z=L$PTYUi7A;d~-yM@-yzcu_aWdQSrYKY3d>@Q;cZ_?bhbMpSZ?ChVJ#f=Tro{n>vO zF;)s?WRsA6=q6HVrHS(DB$C(xBVkR1D%rfY#nd8mUyv-cQA;3k!|&;>!XLiWmq7+! z5@-n}I|4#_`pz!zJr(9RKDPK+u1C5aXFRdgDs&DpcJbJEH>FPAi@J)VOYP~>>rT?p zh!cO%a4tM+pCre6f#q$3s{^zEIqdPtmPU+0G7KeAA(v%)D16o99_KdBNR3USJ>_AV z7W+=Ja@Fe+JnHJ22x@(N2>_D-Zt59hq~0^5{h5>IXUxQMI@4MPU%*}wS?6ulv13Cq z7#>Gx@3%DcRQX3E84u<=Kt?lJCE+e+-+jTmF8v#4!XmY)*-Y%O*LDoGEm7|V;%;o;!>eFm(+ zhs-R{Qe36#O%ZQj4QTDgzyuBo?@%ABPbW?2mv*xIQ~+Mh4|aX#jc?d$ zXzab&?DSuU&&tvQB$C49=||l|Nw4=={X0yY`m3 zU&LI)rtf7_a-pOq0rPT1+IGPDq_biZIK1pRPxJ+ZVUGAObG0*G7RxAhnLI5Bg1!*4 ztt2AzyNK14D}XL0I8h3>@zZC3bYad6=0m9a1IPpfs|V%U?=j_~c|tAg9A(d|O|w6Y z;EYzq83KI#kqNE_*)A>YRm=E5mJ_WV5f=*oaXN`rR6uZTVtiwhj=wvfSA#qGxgT;> zd+re_Og349L17N@44FdxtHXS0k2^-iRou@$5F@%p)CEdCYR?3e1R~bgjOcqvLuNF6 zqMX8kN%ydGKV0%Q5YG63Kir-t$e|i8aNI&d0f3kbDVKXByt@4amDVU6@SvoLA$R$F zG~QRiCPy{lyf_f%;$Vs=rxq+k^lJCC)FLc*%??gDg37u+@hMo`0^&X{!TvXZ=x*I8 zN?>mwp>9{~s^`OiB70AR@Rt%%4!NIeHB`wlG{|%G!-A09-?>B2nAnN5{b5WU$#H<6X8flnNt zDaEEoO+NFPo;mMU)uFl1cHgqH-5#qRulT%pO=}%Lqr7xH0Ja{jXAqkd+kX(=yMWa& zM`lBcPVm<^ZLmc$kB{}6WVhL4hBJxaik5``*b{qCcfb|bKRv}1$8YTRH4%lOt~~ok zV$W)KQcg`f&bERTZZ{6K-p~>~+l+zA3=m?|@GmA* z6>k@{C3%W>TCtr3oAx2N8W`jKpxF)+e|S#2IsgMqnl4DOL`2gpwCgX}-77CId?>m&LKqb1Ik&8mD(VJ#9E zz{fH7i}Q|U_XfVemKs;YNWzBD8Zlb4!d1RQTVXP)$uo4z+2^Y;c0b0~9QtRk_dc5X?-ms=ujinzxM{~j zjRMxvE9G9Qy19txn5Mya@bkj9isyc)nzc54A9O-O9UNMcMi#zjjPt!sk-1$A|4;Z5 zeS#@yFMdP4ElGm70Kq>My~cf?)X~)B2w z>~&-~9n}cg94HsR0z*`bX+&Krwq2KOuBx(`hNtTlAya8Sr{Sh}%l@}1tStc#09}Rs zW&r8WLf*8#7vIh~THY|{=w^^u?!X^7^BDcQlNX4fioh%PX)C$PJtba9^v2?@4~g!u zaxDVWSSR4l*Xiv9+g1)AZdKa@wbMleF27!jg~Cu;9TLK=L3R*b{WmIV-{7MiL~6@> ztUSk_I(iyc!VN+8S_0>x2bCNp20s9fI1N20Xb*SstEA-{azI z-&t#457Ue$8;jaf*AQ8*)%1ZBA5Bd~lYC)@CC?Cz!F)Jl5=$+C%a}6 zi+e&gZ%HPlIkqMa%jHK3RCwj5b~hGh?;2xKI=H3hr*JIIt(9X(4R77R83|M$9N_(> zS6J9k-J_LODwSMhsxPeV!iR0d_=kQqCU z|D)?1yEF^B1pB0I+p4rHZQHhO+qP|^(zYvY+qOA1^I>|uy}EzIz3W88*?U8Rr(BPMUVqO%B!0m(k@ogG+OXLz9Qv7Y`$`*my+M?-Yh_c4nVzV`o zP9a9xGWpNn4l04;gpn0J9WfbWYhk&uCEypq=L5J)Iqk=S_ul&!x`r!DxtheSS&v>wQ2ouqbO4Dl~@*brzd87bu>|cNc7b_DT*H9Jpvs z?HR$R#ny1)>I!#IBUE%LSt7*f(Es6O$&RhzTZCYuP$PVPywRfWM&?@araZ%x zB^g;J#-Du)w;Vb^j=A;}#sC>5#o&R*anKZ|%1Vj2mThFK*P7TqAA)zrDVO6m+d_-H z9_2_(e)$ZYPoVFgSWk^|Rx+isOC$Mq{@VM%_!5Qqg$?sCq0C0Yp5sjAW})~t5+Qd$ z!D!5p;M3k{P2jknZ4^cA;lD=A+=Yg;Uu!m}^(^}`ix0v1vi;)^2RC?Tbw1eL#WXph z^)ouA6k?<2XL!i&WB0@U5rXt%a;Xc+!iVIT|Ls}F&uksR&^UH&jcw4j7_!XS9r0wB z?MW5)o`C!;>tOd?48d!ATeVdEesvY4ND_?s1lF{bgU-!jq#s%L17}6Fsc7U z72j+C0Kxxs7i42+Y-0WY>6a^bP zVDTK>qCG%`;wh-`e_pQX@IgJq)?}G&Of$0_1c9TrhOZ6v^3j_$nWr&q7M2s8QvXn^ zuuE%Os5HeK9{V(`!lfQ%X`fW$X8k$uves|PW`|_0A9PYdO$=?qQT(n*y{@ocS)xVB zOi96+e~T$7an_75J%aoycANm^FScqT8Johg=J}6q0&vc$eL({ z{*RhVg4u1olT;HB5x8q1slNI*|yarR1H z5&j{AbRoHTe41d7n_Q*lLPY`p#2WIq8uwKw$$oQ-{iB3f@eXBdHk$SjxseboUma4k zp6IyHp6 zdXt*kEVP0%rvy3!luBYNNjMO2^i`qxB!!T(QuG+@?NF^SI5f+4(AxPf?ef=e99pc? zD4mlj$!7C=>=bCT!9Q|0I2E41^xOJy(`CRk^{BS$)}rxzz~`LYX-Fpy$@8QTMac2s z%Gv=k`WARjXN_I80U?*0z^>?_n*o!L*1>DW+K58Mq-f6~rW{LWUlkN6-(AJ=>a>|neQyCh!r3kRx$rxe!uI*Ya6j|V-KK$n1V*Q!%2TRn z+;2%!DktabMsUJBg(WUn+f9BBJoyMbgj8}+QqT*`HGXUFr*_4Pr_g104KQy;F zAS1JDN5wn>2c7^FooG)yLtS~{=f=iuLdY%pU{Ev;ax`YL$@;n1Eog~$lLM}6PC~!> zw)@T?nxaAS8cYgmh5OkH!8?FuQQscHcVdp50qt$Siepin3HzBi!yqLtiKHQ;zrp`8 zSz~?)8F_mp>QcqG-yojz?dtv@ldpxB+yQQcL%yRN6GY&Zp?#Bg1VPd!85Z2h4$A)7 z&M0IQs>y)4d`1us9PWOd#WCeB6cxi2IFF3Cn6gk|N<*Vp*oOMEtwm6_i%r=zO#;*7r+MKU2ZlqMN%L>f~ zjzdh#7~-2DxgML=j+fn0{DlCcozA3Tdln`wuoPx_03kC(tC`6ujbt>i!eJ-|WKLnC z!{0VA=XXr~%!Y1?nY^y!ogUA-XukK8i4`veielb}-J*o)Xdj?wpxO?m89j6U{WbSG{;Q~1f!^9IejKtbV9tG;nKr}(8s~w<2iYViUz@*-uNin)Hl`1?H*Ie?Hh%@& zHU|qc4o1^gDm&m{Wt-NP3n?5pLzMwPOD_x(=Z_kikq4xk79hX4iFaw4U_utvFk%;_ zcdiJ^@9~(La$!Cu>VPa49w&FgA|KE~O{Me*@iV}nKXGBiQdXv<$JlJGq2>pXJakiP zdW3Q~chMqp>iHxj($uOor0(tM`*kN3;PKhbDbb${EsI=({`GFRj4r=RXR^mkc6tQH zij@_lwN~TYsVg!zo|vm3PVT5FKlkM#eP3Qgxo9!J_n}l!Z}L(~m{8A9wFZi1k@Qs2 zMW9d5aN$%ZHH1B1TOE|H)@zHH9(zCLHMAyt?vgK2=>bS@!at=DAd7nPA^Me#ZQIFl z`nLzH^CpqohcRX72+IxERObopg9<+5g2qEMsCgr>iB(@xlQ=)9a8iDN;xzi<} z6`YCcsx$_lLoG3O12+0RUmh0E4hN|gMTIG8AIrv@StsNUl0g6xj!P?D3?%xI!T6z|M53I-ZT@~RfB5wZoy zH}hhSV+5>9A`qymI7sovcJoQ}YJ2zDV(jhs{RB)VR*0ytz)gvI>lcd&Ts|r8odob8^`&c6kY7f<;M>d_q zD(VH7&L+Kpg3rhSlFe{EZ}@5`h4Y`mL~U(@~HHVJg3>TmJAVA zD!oz;bmofA)ugU>;0H|(q1;xs)EkxQnXX{J=-EPG&}gsikz{rf`S}9)D{O_gR(XX9 zw>nKNon^M9|GHRYh52*aoWvkbZG;E`mv}FID7VT6gBwQjIl}097tcs&9H+$_D7V zf|Hlb{SEhqgT2mlWZ2zQ8}Y3D6`_JXj}slJS57wMb?L(E{qPRv=kk2~Ir*zWqpi&w z>6t^2Kyfd%+-vVUZHTE%ld@Xi~}GAT$bZfw!;`yj^Nxd0fULBGr*|&pkU$7vPpkth|42$ zg$p19`|6d2frV0<5PKE~-sG;U3>6?i+hGqJnC1-AG;$iI!rDNcnux_y8i1FZ#pad0 zyM_C_c?Ac_UE-oOLJGbzyQj6`lBy`fB_o@|Ok%0)R4@R1+G$m_hd@tQ?}G5N%y}){ zIN`v30>pPyg)%q^vBU0VNIt_lr2tj z?m_tPGXCfwPY6%FMySu{@4il%3PePo?F`-zJOP?EBY(*K^PO6H*6cVC5LmSX0la~E z4u;!))Rz9I;U1IQ!}|vk3;w75+xuP|nH$J7lN3SS5@%~j^}@XrK`iJ=)yq7z)=-_B zPU01cky^PZT`soIK8U&;g>ST376Gn!0ITsw7Umjo?nu~iQ%UE+Nq@8g$ASdQ8%grF zUhq6$;kF&>(|4=D4awZ~iXPq8r+s9$t)^EAV8dSUFyb;1RpYW8P@_vA^YvxSW>f68-HJexS(RID~BwJQKVx~~= zyIS@&83o^WI?t<<@pOJKs*atWu~)Rt#MlB<#?u@CoZPi{n#A>GZjji`p1o7blK`bZ zrjy|?2l11!@~+``Pe31^2|I>Z+H_2)Dj zGH2eh5`m9)4JcIlr4Q^sxx2elS>Zf9Ewr{y?1H1#hIAJk6jM(2RKtc71hGxWS9YB? zzUlb30y=+1Z%NYMJzo|XWqrDN&fb5-9Dk~={?3i~y;8;gXk_fZs0V2UW?IRv0PV6{ z=|aq`c)QE=bsTUuXSK9H9UF1-bC}C80llGM~NhQ`mF8R#-_t% z4-?~U*FJ4?)zK~ioi3|G*g8Wp92+n=T$0CjLRImk*UR-Ure7nvTi0sw7D}meGaX3# ztiq8;5op&vdi5DgiSB$C0ncByC%Xd^wd_GX|GJdrGx1B~6)Vs}8M^~b#!gOota!KM zn$(H!1TswD+D-gULHzD2Y9aw0vB@W5ywm-bArL^4$5xMqvK}Pms&iZS^vCJi2HF~` zDY{ip?^?|-GaK`)n2+)jX_XVcYK7U`WUzuObL!Zw1qa4A zE{9#8xNthU$gT=@!aGf63A3D)~CKfBsv^ zjyLkVkvJ-E==W=*<5hhAcRSjv3xM&&#K<_He>;57pSNL-Z$-xXw^*p49IE&eL5*$t zSous0RAs^LyvhM*QLyK20aOGHf$8lF($$Q3Gus{FIwM zgaIU*2%@FSxk7(KI}76{C;e?CB43K{| z?qv$ZpgMN@7(l#r75VGz=oMNWzYE3qJQ-3(Vn^4GJ3eZ$kh-0-{L%>+aCqz$~!tA)gdid=5{lYS$Kdz6`4hXm9`rDI2RRu$Ms_S zeb?e6pws1`$=rkD&v_BGioM$rxPUhtFV<;&ZRUFOy)Vwn*3(XM}kG4HS2OC#D2EH{@;<_!q)8nu66KO*&T=`?tD?d<8EqXCa|PF ztOg-K$8Q)10otwOCok!Cnaw4wC2*CBr4@Zi$NYLL&(AT|icdXA-NMik>!n@Q)K)sH zsx~efRwF1i3C@M6io}aMeH507byY=N%T0=X`82|^$j*E;OGLJxeDz8_;N%`uVrpbx z9eh`>@`SY`y#Ej^G5v{j%t1qV?b6N5&L(xFWnL0 z^2$49q8-WZp*bU)KYU@M3H3^kzEn$jUPCk7O@qXzP!0A;=lg#A!Om>{w!*Yk?2#Se z)0K8qvby}#)8YN}i;iI7Lqow<7YImqs`vMgq3ice-V|RLonA^7^c>>;ogyIDfmRn~ zx{wYOpn-8DJH#;afq5bc`9&Il%i-4P(S?Ly(M=}Y116lkOuT*5s7%hxD);J^Qavn~ zJ;RGZKV;6!g+U)c*~mHFN)kvuM7c9l+1Y@v6RyTTw#JKRv>|5h!OQh?*~m9lPw%{r zflxFW@#-8+aH@^0x{tIyKq+nBt~E|7@gS3oHhg6e$8%p!XKTWXBq98Vf*4G(%pvug zcp~DrGE?PC*zmI^#M~%T1K?_B+^%vSdrgoBpkG)vXNFk!{g={=X|%A*|it{ zOQl5cMQp`80W0(SO40hj8!tKknI_78A`pm0p^bu=#eF&AAq20~CBWz!B0jse(GsSC zXCld2En-+uTN}Ge0|01{emJkg$tigwl`GXNg_>LqV7x15efBf4XR0!$KP4N~<;7`( zSqGke(2!E!szte51uYf!)>Ec}mdzFc?}Nk_P3aOM-x9k(?xn5L6X%uCuVmp<7ReO0 z=uz8bK*=(XY}O|fyCs4xdasWBH`y0azd{4V=;ZrSu$8|TE+W#WeSUvlojVdIziaCj zB>=U6jF`XN7nZU|n&JbyKsQXu@t2>lOhlyX#mQBR?7qM`@)Q&1UbyfAH_5}e{)_5< zCLVKS1{Bjg4;SQyYiS%aMQP(8siIlE5GU?8B3EoKg!|-F{+7Z z!Q*3ER~pj#6d5;$E`1}SL%9p8CWyRGg@c%_34pAO7!XmIt)GEO<@WXpOkA7uTA1@H z`ml%yZ3pu7i)suaHDlxS0x@hB)1yRbr4RUiYHh^g#EdT(DK|1Cob~f7H^AMU<60G7 zz5467Wf5Fb?nOa>&0+X~@ z0OMk);H@+atRfUnb!(SgIEx%m9YqbB>B*%)s4j!ohvOS}tSuLmAFS-?Qh`S2F077L zI4&_3Sm6;{oBe|3fUeNPOc-L}*@2*IO{%%S=I|6GtYcPKY1P-eP(P=vCE8z+4$!+l z)vfhhH5z(OZwh@?AO9r-?;x5x=N&hqRjR9?t0!MR3usKaf;(!rdimvezKk zZX|P5B)mK1__{dEzz6r90 zW6D_zn)y=-wJc7bsv5-?UyuMO1HKb1Y9?G%2dxBm%K~PY@8*r7d3AWHW*_X+Q<=w9yQV1qz4#uldd@HJuBopVLAw%rOf z2ix#dIF6CXQkQioD5!q4JaA0ZpxXH$!F~@GD?Gsy@-D>^+LL)tSMB8aBIN7+?CEJ$ zu)M*!?gL!A;&>(>xjrNP7oCaZ@ z3zvfvT&Yen*QNzaaVcP1b4X4II2pHW83}_(pXns!40JI{M+egGl1H(2@Tj@1PXzEq z(+pM7h6irOsD?Et16}kazq~@4lDi=xh2HbNE=^=AoQ$lqO$i7T=qd27C1VZF(TSp# zcr*dnohr&A#tcAaZe}C>!`y0E+W{ZPQgHwJQE2&@-<~cOnu^~^ediJHwQ}Lwyt77S zAIW#!UAANyHdbLGsiD|ItoI8cPW#7YRUNBhSxmBuv&Be3xLVW_rgV-;b8nUT+D(bt zB~PX)ZR7h}y7jTfmaB%kGz*jw7pta?&2;YCY8?cTy)2589+Q?vu~{;D*>;DgoBj8O zuG5x+MNR2dx!*8Prdh?|*C4vXDm?RXn(I{y7qhN(x&aV;VR7z2J)8U3^+GreV!2`iK*6=h$P}4SV)GQS)v{U?#g}}TzoBlC{Poc;jor`qv!(9gp9%u75DxNUrxf=a;*Q$mPn;x+=l1wESuVa@sD$Q8MQ}*F{$>lL+Iz zn1>er!fT#c#+8vr(%pDJb2RL>bNO5cA_}NHCZ~NwEr&*Ad~Pwj$xdIpyPZB$p7Mr* z3+ynReiY^3zH-cs`3&cy-Qm%)Kh>45WXM&)gtzaI2TY zwr5WB-7gcUxu`-b7mUzNTu48>Ni(rO!l{bn<7K1TQER1E)2bQka^GU%S7?(*$LDs_ zVi52i@Mr)1t9lj!;zpfV44LMyGk&|>>V11qs;j)DV~50Rr3K)N7UF^`lM8Opji8#N zPRTutC+vkGYi1+Lm!NU2{Y8xrRY7HNz#GE?nnV+)Ax&~BRAWl^5_f##ab@`n47}<9 zxwX#i&UZ%(!_j-t_p~SeJ@~M}LO~|uRESZf_}!?we7cD3Qga1-iondW47x^$UJc49at!7v7R=Y&wj((?LP4H#0yNpWJ*w0V|Uvi?XgZv zwr@tomdIB1nWZ!DXr1)}ZRSA0JyGyOy1Q*`xEa7H3^aqNe|k;7B-DVoE%fV$m?PHe zqO_(4Cy%Z-tc4Ajku+sQ4#LDFTP@u&&EzrNfXv(Z0(WGLWWCtVXhU#Xf2T5+J^6C& zh;dC{&Y_#?zBGCL-U5h!=*FE3@*q6cv}VeX;x0_YZXeK#I9S;P+_(dBho} zE&W8!L#{BB%qxapZ^M>HcWic{vrlrLT3Ku@q`QQ~@*9R{F0K_PEQC3|?j|pwPZA;* z^vtSV)`>5t998D9^?L3*x!gxJ$519y8b6 zW=Hps&#D~#qd_C;z631lFxrt(+7kVWrIUX-jX2@3o1U5_3BSU<2cdIL0+8X`D8vqU zP$4!j`3J`oYPLpcys`_QlV?Yq%6L*{p|&otN+ar|XuU1028U55j}7O zarq%XTh5>P2zMCKbL@*|ew$6!KnmmhEiG#? z$C82{)|uRJ5n_&H%t`>N4Y|gtWK#h)XU>|kj$f*Bmhc4gb#$dJfg?$=v3OW{P-q2c z4q4u%fT}W@T5k@!p5btiC!af~i#{W_z2IV`NUn8AA&W@ScxS}9fjZTWb%?S6{14Y0U`9A8_ zC>45h_I^Z-(o$mgR*5<~*z0pxO>yHezk+AcvndfBb8J=p zgCnA#*o(M@8Q80}a>opy@eR<_-w4huIX0GbwWa9CLjis~N}QA{R{yM?Qlwt8aOgzE zm7G|yiJAJ4B$jna4)CJDGGP8Phuv6(2vnt+ca_T3sEU$o+=MgA;A{u+=?AqvbQKLf z>X;#BMaeGUzojbp$vmG5FFKL`eYU9iq4Q+aKwp9iGD%I!QCfrv&@|6yx~BU5O#xm? z0UO|)%Hy*Jn^+A=;o!p_I;LB~?c5gcIo|nyDT(9uahI9&rTaX9GPU$$&&nAyrz5OR zgO{Y3L$$;(a~bC$v^ahZ6+mNqGKIue1vMYh(dC){&7V?7S9_xR)GjM|4{Ym`Qzq2b z%dP}K2HR_y}F(+R6T)5wI9JqL-Fs5U-=|1 z<9Y>~EF>mgKS1cb7+G+Nv{Ng?r_!T3!5L2Y=z}61%lW!u=bGS}&RQU?&(Vaml{x#7 zv+{QNA;3%A-c_UWORCx%YW?AAD`E}2u8lHYDCC{Gu3@n;XKv^Cz`91fC#&i2rN=`u!@^h7Is#(6j&G-aCO%?dsM*N zWxBZ&E{@2lN=9o%{{b-`w)L? zAHKNH9*9DX!qu(`FqCct<_S1^7=ffN8%R*l)Q5>$w2C(Zf8qdYH50-(87e|mn#I8@ zvfkkGbBn1tMZ*WiB?u%fPrt|c!nV1To`dP83vx7TKrU&3$4Hzv+RT_0 z(@!Vn03@QHk~=AE&!2WqncL~_13ZoFzDO;#wcJ_&nYdS@PVkuR^+A;GGGMs*!8nM_ z7{dh&IiO0NhCY_AMtV3L)7V~Ec74~kGJT7W5{%^9qxmzG=5zM~HzL&E(kLaAT<5rO zy*L|$IystCM~rn@87N?H%4xSPVQt2o0M_9sGq}etGTs^+^|89NIfED)$}1BpdFQ~wxAD$#>%E$@=!*GMh_Q8C(n@RP z4##q(s9k_3I_D^;RNtee%=*gecp5tsT}hL&RMa5dp}4|=Q0{i%E4Q1^(wRq1r79HP znzpQV^UlDMI(~k60%V_bzpmJ&&hQR-{p)CleS+%HbVQ1-kPMeZJVt7GS zarG%QX@fp0ZmAq6=oan@w$*ta{%c^HuRI(M7Pc? z5Hva7+6o{Zsd!Q(4?_zl&@Z>;3wloI0g`zdrpe{2>%kkxfCwE=E5ABGwqN;IZ!aNa z8n>FgE`u!LQ|uGd=mI6g9p*h*o_-b+IK=uW1$HnB${%O)*Kz;o2U2}-JXEz*U-tG- z8aI8ZR)FRGY->=mn%Yqyy;YCkEps%a&I@_v)cIo)L4BYC?g~uLcf2tU6d`%JPtkz# zZq__p$&v)r@FWLYOGneL%uJr4K7n}=T$$n8xl@DI6#ShExSHOQY%kMgXTcZ`P(ULx z!&v&;h?Bh>Lsd59iSxJ&4XamHNayH_TrWNe$||qrIVdvqMV=b>p1mjSSRqW1;kWDn zx{(66?lyvq-}o?Tch`m>F}37;eVN|@2nQ}=bwR9C<1tkBpZ{^LwD&iD??2k?efEIVspZz%s=#>)O}3g%@l5ehz6{oq)# znrggT$veG7fbK{K5?29#&)55Q)_fsbFCZV&#fP&y8;sL#3{3-*-ALE`?!|w-u@}(W zF{Xz@rsYzY?m8dA3Ng=iHl~j7)d~EJ0LgE318yBUbiMFG?Q31*%^``~YzZ0VHpb+A zE9SR2hD-b^@W>!G>I(uk=n1QJD$d&6cOq1gB+`O*m&bf)Xsolps%)<$Vyclprvjm@ z9A2XGQKdaLnzH#&@j^4`uC)|(f(A>>gq0?lfzNM9#?e8WFRFf~x(;H*TFNJcrlA(3 zqjM@pyLQV$^E9bIjW?-)lq4mXq+y@9fRj|J|J%9@(seEk;@iLq3BbDi5>fi*>wvr#Xs7XlFszT3z_u| z@t@Bq_9nx(`=9^-1v~&i^gnJ!x>!3~IQ*9xJVs3_Zi5Z6=R*wm43-dZ{I)-xy06d=+&_0F$~Gr+S>Xe-xFu)6oe3g!i@PG7vG2y zd|7m+51F>qU6d{W!gNjM&TcoYECJI30Vm8LTxKVjGy=(7P2_YnR2%Cyx)5Ze{iCrd zk{8GLB|6ORWBT{RC>i@ok6^p^>viH!#KS-!fAzajG$vI9q&jgEOjP?}y0q@y1q%(Z z|HOOLA+UIGHyqMmBI^IP__YGeMvv3`2^|U*dB%%F7;<%hM}B?iL9U{ zi9@>3;d+xVkOsybP~Xc>IcX3II@ojziWzEYNgIgQF9X=-zSIIM z^y$gEFzeM6bk5Q@iDj}|QadhH;x3SV!O@)njfdb%*BuB)#6|{{7SusVX@z4^uqO_8 z28Prqa7Z9<{~d9lb$zw!opD`r@!>&=!!i*N1iO6woWn@Ae{etq?fpA-jTa`IBOIqL zGtz({2pj0B;SdRCBrQSizv8d~?zNcMZ|h%g<1J9Hex`w46VWWpWA^_bx^qk^xY9fK z-8vW$)eH?#+KmK>?@)vu&^&*qtyNQWZ3k<$mhE_qqOS`Lu-`v{%TGq7MhKJmlYKP4 zAgV^;81RHZVeY8CxI-j^mDZ&0Vg!yVW3g{fS%wLzcVjALFI>N~AU9Qe%A}~Vq^~Qq z1}O)BZ32TI;U^y-i$Z`3z#hmm!A%-$FM7=Qy$&Qkhetd)q_M$vx#-V~aY3%bP?wHo zCV1uJGqI5=F0$|1LtHUiZKq?N*Oe}{?nN~Iekd#)3uEK4kAm&w47TMGsyTJ5McvJh zJy7ZAEP~wv=DZ z7HHJ4+YFPLN@1($J3*p8V0?k;ys})!m6D{>(o2G|Q#@8D;Ejq0vR7?%2RPsCB|Eq9 zZBPYNOQ`2(?C?(|s-;NSW>=HR)~xCS14v$4zAZUt)H0HS>2x~<`d25?xE5g}Vj}Z` zdf+MJT-T|6T76#XfEgT`HPaReU<^d-B^Vhuja&MKv|9JjH7qxpT8U_qQ)_&kDU%+f zFPs6_xyD&=yzl}fmk;_iNi3Iw=emhhj~5K1yMg^3>3R*JjtR`}@mpE(9bV9J;JS*R zUx!OV{La|kVT+-p^3W*m`-u<&1aNFal}wY?_+SK+6oQ|2jQ%&B^>HH&#Gf|<1O&`Z zNOPR1G^=WB;Tk4U;6#~xGPD#|B7+geYpWsnaXs2CV;YXWY;Af6)loJDM0< z7&+TH>RB0>nOU3sdvC7cxpLh2o%r*F`n#l*CdMQtCu3vXF{EOgblSSoFpgq+d$DGG zcox~1xj;Om_>*z%&4Jzl!ClUAE9MV4djYl4S9j`u6v0Gi(gmX>Wzg(yj;-QsYcKgobSil(dsI%1Mo}G~T5%Lpv5U+*bO%wmdOq`Q;y+X7$=4Y`pOB z)BqfzhQ(#WF(w>d- zZrZK5sk@Zdc-E16(ILrhqswAzeY>H}CbK^N;YytE+w6~|$MT#p+fV(3@+O;~8WFw@ zYwOwkH!7sM{_ZwQ%>XPMMh3dCL5;v#sp;)RI<2jirh)B&%hACxx5tAOiGD|Sw)f}h zVgiG&48!i*feBMPyO+bmBk}v|a_MhptKGD$Ffp6g&r9!5BZLS=FTmZC4%C~QZ=V$kq%F zA^vD`Fd2=GnTxBA*+rJSgUZQD1#uc6v9U54kMdl|()=bM;F>$wa+X3=yNOJP3PZ#H3$Tj3od_;0^;%okN*n+Hl7)<_u2{akPgt_ z?nDVLt!c(JM_)o_aj&p1rmBRbB~ywIR$z{u$lxG^Ol8A<=V-Ud4Viqo~%t`P@@xB0zPP_ytI(> zG+jDr@Y|?T?1HzvJN_kSUW8RH%SH7WwRNdXnX#-!G1KqMek@X~fV63`xayKON6kce z<7kXtQ|y4Os`_nlDgSOJrc1Yf9e?;(xHaCS%vqO(Cudb;#O6Qc&m2%NSu(3|e33wfpcQ zW=ZrZf)KK%R7A(?sWX{GtqlSucWC;@@2w(32AFqy=_1cQgRpe3Vv2uaFq_`y+e z36Nv29xqr0=X`ghmaNRz;m6M1i#QtB9K*84%cJZ9nM5W0Dh^RwT0(rCu}1jmb>l#s zrjZ_k-I*(&)M^iutj@I@j765f1WY||QVf)y58AIFg!iRnK7p!&MCgtUYQ{oZ(;(+l z<+bwmnaW8W&?KToYWR^H8LgFLE`4p75~um@ zoZ2qT5o;g1d`YyWy?pwA+1T?BK!ORpZ}k)I^+4q9>hHI``ThPzsw+i%OtEMUPU=KI zasitUj`360BrdtA>Q00k_K*5i8AeRGa%sIe6EFzeWL3vWFWI2Dj9PT~BtFZ|jgp)1 zJZ@Sz+Gb^Hksm)>tZ`An{q2+<+dgYMFy8d|<+~)j>4AOy8_F#`P>%~}F1Z6NLg9H> ze60%;I3rxiq{FaE*yFxuo01vrlN>l3Xp<~Cs=?AioQHiipl^j4vnO!b@wL4CuVs_( zFP4~VTl??%GBbcDVJszf5yP&TWk@R^{sYr8FhX%~2#TS?Zf2NBNN5L`8~U1`XkLjT zn9T>p9Pbr0gLpboow-GR$s7JEBO+QPv)2}A8J3&HsB$n{mq^T=|8X=DJZD5`K(?FL zb@d4r0Pjk&Qq}rL;=JISMYge$JG=>{p>845!VrfsJDKtiZeMHjKO>QyQ%4pH(9|dM z0`e9q3IszV2DuSPE;zUs1@Kp%sIREqUPt$td+HXSok!~Vq_N)-qAI4N}qWTm3 z+qr_bAmGcsjPp^d6rL{&!5=Xh0KxdvBZ7w7l=%?RC`x@_33tFgSsUb4oZMfoJ`vYL zG4W_$+^Q0{#zdE8NiJj~8v=jtSKC)Naf)@wFw?RAh&eBYU3t2yxos)d#Wt#kqZxvp z&xl_zxuDOV6M0@r`KBq&-Wo9CLSU9q8P2Qh_|)r*fTY3Acprlna$HMl*=MP`B7^n2 zqF{bOFc=*{BfhQVhw22F1)4fgsKc5#gZ*G)vsD!Nzj|h2qXsn-yay6VSP>)#W$+k_ zrIW1;#C1?D4u-xUo>lL`N?1RPwx7~#3cB*c{-XNMlf@9gd}U6WZ}2t!>2k{1LuooA z;q`mI(L+>^FgBqYzEnMB^~I7#Mp}<3w$*S#i|NgR!q*hA1VR`WM&Yne6{a);la0%; z-`ly7A`kGg2CtFUbYgqgOTw2CJWFilAxbuJph%YyLm zNIg{;I)ViHZ_3F;;VE>9dZ^m>kYZ+{3zemPf(b=@eQgsHVRwwwtH~nY5iPglBC7O5 z8)+DA6%e8_u{iCHvRr{8cZ8KLS%2>N4g)g_efZW2*-Te!54kt|KLSdsx`>$ejJzmJ zEM6i)6p4*#3f}V4G>F7WZ~Xt@vjI`AQdqwhU642qN5fYS4W;Q3vpen%9+^MhcQAA?51&X?QPHrvzOK%~al%(`cVso*0ShiQ%l8OF3F@0h77e|(Znc~$22Mkd! z;V_`!RgHXgd9IIAM~T*ti$kEAs7*oQl`Jr#{*>te6e+ajY=Rhdb}!$KE~Obi7o)HC z9{=S3%)}0(tv2SR!xy#mig})qwD;7bLF+(i3r-I#&2C`S>6b|k?#ELEq6qDzh@~BX z)(Q4i)8Kj;l5#N4epuz%QkqqRuWvC3pN$UFBN5QbC%|$apsi!zU*+_)|Mt22KH3?oN_wH7 z-^uHGkK^eK_KGa^t17o@8;S%Wv-^i?yK5z=*rdZ3eJ?44;%&!(%Lb@AGNQ~h8%ZsC4 z^U3@0%K^<`Tp{Vt&9hkA_}pMx0G^9FPh$u&Qai&Sru&);i146lgdbjxKb`*G@|^EI ztCynZW+oyQn3Dx9#?~=z!RwfGU(SJ?T&IXa)f&Sn`e4+;altNuzGU@?aAo=}HhU(M zQ6x4wiM+V53z}HOy)(M;5gnmUQH&fsLvV>zTdy|eT5?Mfg;b3$M0UP@iuEdU% z+oPtE7yhDeNAOA6@Yc1SY$J3rba}J^Q@3u4L2;5M%*Uf4Bwip6gq)?q3keh5?|dps z0?+Uz-~8H%`cRF^zE(2?xdzUDj#Ky1n0o&`_9Z|Mq(6o=cKxtqdei`?Laij^2X+9^ z^K(IZ-76K3U6Q-j@ z6o|40J76XX4=l(hFm)*p7|cszw@zNT7`6`$ksr8H3QbdciW(hC#iEIC?U90mko-mB z@K{O76%!tU>KNVxn`dZ|GsR-&R*}*lz{xZBj(9hlw6^-w_|dXuo`&nO>HWkA81~>` zD$ruTHEK2c4XxN=trqBCdbO8M8$ZN%n$Ah6jkiVX2C9fbuyt2%8<`oCkNUB zqC#Rl3QP4da6~V1VF*Vmy+0Y|l*rg<vB@v+3WU+`nz2MY} zkIAn2tL*)U?JXu%7$5@yS>m|6ulHb0RKy` z;MOpcWh}w+^_QK;*Q(V8UwKrBS+)!S%_10+Qf;l`p7P59z{>7lDa3k9WW-BLEQIn> z3ckT^=kVhmC~@NiT_40!br^(0_EkR4Fl=dQMU4W1ac+O~%zwKoYixl)19D8XAj_7n zqHLWQo846j7^d70<&ndb>6@H3Typ^Ak%bD=1AhvSZil+#LruDq8vl+3@_u{BJMJx5 zBWYog?qHD>L?$owiQT^8^W5+Hs9vO44+Ys8jn%%N^(IX3qT~eP7W{NSrL!W1R1>*e zMrWo8$TAGTVQlMXd$MP&Bi*OI_A>B5c(vEMArW-yGd9gDI6qMmi}X}@lzXsI>cX+O zQ=BA06MN%P<;l;x~m*;81QMSX%}eTQiMzBbbDJhTfS7FrN^+{NY049udD;X*M1&)=4?#Ci68ra z*gD7NT(n?Y$F^Pe!yn@)$&&$-R)~iI9EW(tdCflAE)Yp}$vUs1U)9^9_;Od!qRMX#qUh_>v=5ZF zY%W2L;yeXh^#V}fatly%Ot|V5Ub3^TsbvSIUi_sn2WsqpRu`5~mYvtN(~HGIW22YV zS~_}xWu$e3ulj{Shie}Wk}DO2rZJV!;Py@r1)AZ`jvW=8Va$2Rlg^Lf+Jo0_D!qtb zRAc$2uN4;#UgYop^ZDe_sx!|2gZh)M0!OE@*N0r$C5sGavU&tt(IkJ;ti3PI&1Phl z{1tDsj|Tx>Jm!>XB5udZRCV%7t0@cg;f=D>3S4Yf5myro(QOHjQm=#Dz7PNrG5;2@ zOo>m|t}MkU*P4V{%>kL*muNDU#z}6_*;CLy5FT`fWlqPKw2(e|K`FZjSs>w4BFUQC zK=|haOkQ?6BP^5V`$&z%Me}~XE<)8{qD&6G?i!}5_ntMeGk~Og7m58_dJOcW=@B-_ zii0&?XRUkC*@hkl{HSys1LFx6USijN5N4Q39kk2I@De#=p8z6vh=W3p48Te#j~GAv zN(?G%u$GZQ2hlwqD8Oe?T9&?pwb-W*OepDA?`qgq!G-iv$*=QcJ1fu< z3kYhDgn{~bP>I3~j|BG)-rYISCn=PL`CN+`8+a+PD;2>iRd`9lAeCOZqxOv5bt@IU z9d92v)N$-vt;ZT4smej&;@-0X*cO{d?B7FKTYL=UqV~ZhYCg6gh6@cChcLqd#+=Xu zNPWrBZei-H-G}SrR?X8MOup}odx3GRss2nV!g`)7s||MGNV*iiiq(97z16?pxm^wt zADz4%o^dscoq(&!c9XPe$PPcM1zAbp4|Z#R7xOp^h9zDvdL;X_W+wZTg_7lj$5Knf zjjwf?4;AL>kMqn$eIdU0b^;a>Mtn}(*SC4s0}?m-Fa?^KM26Q1$x`sJ;n#{~E_NPH8?W4(WdfXTO<-ExgM*&Zv~O z>+2*d{t>F#d6@`a@hbU;MPKyAzX9Qw5&#Z8Fr5^y4W>Bsbl~7^2|Jrq-^_=ldREwe z?v%?W1%a5OZi9<>%bRMaIlA4Er0F|kq@gX)GSn~?XsaHT04*TfkSoSQuRSTa^&`x8 zbjQLBPm`ODM*d3Jmw3+!<{6lk`xfIt3qUuf^J>+6G%h4eIDO{6qg(n2`?mP131iYV=D6)!yb9qn?BHzf4aY&;d6-NF4&E~hJ#SYJhtVwM47}pLKvhJ#Y zG?kwrxhJmtlLk@?)tOk7;Dg_{k0e^%67*USZep(1u#Hu=U}umH*QTggOm> z7j=51mPDBMuV2VBnxQo-k;&ia0N0#boLew<+60b=^Ja*wU?QwKs07tDO0c3BC6#w` z3tHZjN^x0*xB&Ve_V+Osl9r3^U}F^^CtsdE`(V1#r+JUt0|2bH*XeuIuUpdz!Be$) zRh($qXC;`?2Gq%;zx?ga+m3h%Gg`kg=9Usl{KPZBPB^`I{_J8b0KN&LHFfjC%FF~u zX2>Tu!ZN)s4zux>U%C@NA$N^<;*w*A6V4_t{o?6y7)Q&GV**&=oAUhjrgXnSd8*gG z|EPG79yXX5<0Eg-1)F(;GRWYqjW%=6mwc%l*kW}eM@(V^)-l0gnG zE6U+~X+W{KU8?RekCE%{_dYV1<@M2=2f1Fsw(J`I(xlmL%uwoo(k{oX-6v;AFbH(c zr^LxVGQaMdScIPKDv8xvSuA;f*iY+2S;7xF{bs-H@IF-o8QOD;X5nK}*?v&kQ+4 zL+5~}$r1LlHqJ(9o3cif?G{;GRiLtapRK(@gO(F>Aca%CTzd&x%IboMt?MerrtbR9 zGp~JO1Ok70-bjBD?>~R9-v<)EWZHet(Q)LP+b7a5zOkzq0IMPvDQ4xhzQ&(7J9>%) z`*^v$i(mIvBx+8VgjKyTB|z;k`T?;(pMA?Zf%ly0R9z%Tq=Waft>l&h!+RRQI(a(-a6Oi~}@Xr;LBV7bF zcRpy0oKI{5rrT<=9KnIQ*T*>mi0iD!@QZ0jmlg+WaQi_?@c=})yY+qYF7L>H*lVyE z>ek(Ky%lNn1jQPS=C6Tj74xN7?|qZC4VcfudvOaKC^zRB;@+&_bWxli^WAQ9 zlGtky(}HJsF8^lzNB|}g-53nWtxDmdyKfN*FMFgy-+M@nLga${Ss#_s-kx2vEIkZ@ z4X|$3@$T1gn#+5*D^96}fMtH@xAt!*6?V3zJ>)I)O!};&I(2193lr^zH^cEaq7$)| zGrfFT#iPHX7|q_UKd-2Fz5z{Ywym{HV3;9sy#VdE+F|CfQvK6cr+f7@^FC9Eruva= zmCe2{Q#rm?c;vbxuEtWHYoaUlUEO3O<~#fldy;``d>^s4f4jo0s%4+N#+fGj9D2N5x(5zQpGb#4GuU9~O)I1)J@@Vt4K2 zRa%1XmObUWbsn*O2E@Ve+V=L~bXO3osFPdF`;$N7vB1#i&u(zXbrI%GqXrRxt9pgu z6><900=^=qKLYHh!~;cOu!+LdRw|g@{X~k7B; z<6{u17;u7sF4l6cM)gl|jn~d2EWnh-fBt3MD(NC6;f>d~4b2x+)+o5_dTN)%x81zg zHh;SFNX_-n$kiFD#%TV~MBSy6FoakGp!q4Lw`R((>&TV!6sOT(*8#D@IJr_<&kY-6 zG=qyzO*h|!ZKai4YpD<$;&*6&#+Y_1Nq^l1e;PMb6Eq{nrmAYlmiNZ94O|uyxVA7= z^RcMCHuPwIM}A@uq%%ubPo-!{9y!i;^Y>w2wD%Ef2##$BL`LxiN?2`Lam(FloM3ho!N6tnFZ24%}Y8ItiNwuZB+BnNcpIruWK^m;YV83^$$P&+cY~_Q!rK&`?#(GaNkW zv@q?}kAvvJgL!Y7Ts=BnJlJ;@=RLK$=h|6+yYD!_C>`eiAmZ9_%Yc&PfW`zt+<0w3 z`Bk%~UUAmMOh@q!{TCI-KV>*DNIFV4pGWx9n+Bb(+OqM~q@&_uMDsAiWqs!#6I%#weKZH2ngn59i7I~06Y+Sb@BEc0VD46~Ed^OM?X#Bf_+ zGqLg{Y+xaw;h9qvGgwK{VpR9u@2xkZU*0()@_mFgJKcW$1FQHGWfwR?HJ9!FmTX_6 zD_5?wdKj;zG>-{%Pcu zIq`C=#w&$@4<%}65*GJjnbBj(q)6Ge19_EQCuiJYX}S%ki?U;VkuS8e^v5r?zDr#= zxB)fs_Z4@t_WH`__LDj`0UXJyYkh2y?Oy-`J>1^`cW6>1!~ z9?&PDL9TbiP6bP^)c&u|S*{u_!MagPuDbz_0Te-E{pTlrHpoFVt`}|w03UDBeQ=!r z)6)@?*3c>E@##7^Cvi63Gm=9{|N8zgoXmh>!k9KLi)M< zq43*Q(NrUiCZE;|8p?0M-+Hk_Bs1q_$nCGV``dv*L3$-^$SE;LIwzyPCZ3bOY9UFA z#}Eka{uqIx5`FrkgwCR*M$c)Q+k=u_ddAoqk9QG`V2bj-su6jszv)icJ~UhMNUlF% z>+x#nRxQ0%oOGOpa1iF{Cr|j1 zV%wA`DOBQi_;pF{6`ohjc`n7{BUxnXxx}So*uW9aN@&sRnIIs-tU$FnE8XGwd~K+5 z2Pd{rmSQGU1hqhS8Y=2R@1ZS?lKITs5+a}|#Le59kC`Hs(5OjM6NsK36NE}OfTBSn z^al&6X4;d+3QkutYH7gG!MP>lmn;5;VRyQ2m!9Di5Z3Mg^EDJ)6Jj%MKxnqM21TAlDqy9di?C zJxC%?An#U2C>T^+*ICdvU);e>uK#fJDxzq*f64#CTp?W~$*88pP{Yk$fCL4x7%Yq+ z?j@<5B>tjc>LgQ%6Ogt9xV0M$YOfGiyZQ#V#|tLJD@un}SIn>GA`}IV7GYSP|MeP& z^*>E{C#heYj7B=;Hj--DS6)*)$Ai?#qN0B>P~VW-B4hNB4XNL0=uQC{+>0;C=oxXC zhhoXPheV0Q8#8s&Ng?u5kR;gWqL76tE(?Yb)-^jMWh@181!Wrxp&95P3?Wgdc;bb& zosN0wM_;!mZqFYS`f+Ia&tN}He?Ee5E(*fVBpdpmWJb87)ZZiST15fVDR#VIP?w@} zbygCgu!c2fF&@b2{pFN%=kFqHCz)2D8>x)ESSRTMI-f)dY`P_6mTfy(lFPsvh`=U3a(Vn5*QWq`RTzNf>|##` z^2@3DiwNKFZ+kJd*{5M;5ISFQVmD@zDGjY4UAt)WpHcmW|7QAxdYsD&xYrqKU&|S7 zl}gGE3MLaHDB$dqR*j;pRDTmsVdm0_(V@31z#k=9Ark96|(*1(i#RyWrP=w$sObW?zg1p z#ve5yKtKlsApbWq>f+#PVCL*%X>0T!74;tub=C9fyeXMP^wsqPlY@^>)@Av|+|}fm zb?8@~c5FA&N?$|P=Du2m7$PY-m_fz`M6I>7@cVgORoMsw9+kA+)Z<;AVTmfp-_+R1 z()4i8wqBR!?&|53WeR)^s1ocg#7cMzJx|6y}?19EN@u{I7_5I28juNn6 zt4;a%#h|5HWx9mGzfF zojYZ=@Czl6!L@_TU?JfC)~iMPM!?_C+dKP&ZLPVoE^n^g@bRmB=sVcG9CFUEU-kIg zSe;Ey8Pi`con7|%fMFt?DNo7|xH#vyO-haOT}`jQO)d1Wr)Em6qaBAr&b6a+<`P6Z zGi|1lfpL`ejT7Eiv93g}j36|w~(o)eKgoD<7 zK$aOZHRBZYw6$Z)Y+ck*WheCoxGbd>#&frg12Znx4w`mm>Z4O?-I<)V^4iSV)UR6} zpQn@C*H3{~>Z1?%P|oy2nbiLE?5vsIOfp@UZr!SDq8Zmfr9Sz)n92-L0`?zw^z8pO zqO)a}pN$Ndv>sgE?D`1HQDMk8%Df!UhKrR`wSiSm`;B=()!wM%At`y5OOnT%7|g_00}_PmufGP)0aL375bRx#*yaeVhd4+u| z{3h!T*ezl8su{N#2H9g?$W$_cj0~zl_=;`aGAC3tRxEobpL_dH%QVGa!)k>MIoYZJ z>T9@u<9CHs=0+3k>k2R6s%?l1W>;3m`f|})ZcPkc5v`29M(isar>?oXw%VR*7+B>K<+f7?2-Sv0 zdyi>qh8A|iLTYFjk6u$n4wI^vUdpC!pLQCE>dq~$TI<($G+l*F@=%JaE;b)-xNaWs zcNim}G%g4Z(5G|`p3X9kjDf5Adc^O+$dhxmlX3`j{^HqFt8;ZM!6&)7lOdqvE~yCT z_hLIKs3N_MUOnTrRTnB27Dmqp6!|rg9c!)PYb&kK(~lxk7Lp=eG?pJ$8H$zj&;bb~ z_9_VT1-eB4`Zm@l5OxEJ8eUaimKM$}S)}zbew&lOKL;ifJ?}}jCjv0%5aj0&c5iY6 z@0g{%l3klBPp}ET(i&e1YMO$R`B7vV4}(zw`)6FAp7B;GRYm3%#Gsi$aXx?VzIWDt z;@8jj`v^^co$f5?+!O04{%NWF)CA0ZTKt;tz*>O(cX20}4>$saO5j4E^93QDl}CsZ z%G#Wg-|d@;i)$G6(_^2J5<0-e8&G^W+ZK8m+DG)>{e8azI{i9W`9rpQ;cq&5`}6!g z@Cgcl0A4seXNdS7jomp^wxIM5zb&Dq5vI*u_>xBu$N^Gp!UMmZuwy1 zstvOde;xy{E&!j|S}FLCY(Yj^3>5kjGK2z*`5u(WxJf&jNA zC-nw#X)^46|Utjuk?L6d>sb*abCQB|*k zh+z*8V8l?er9kflFQ=+SR9BI zn&sJvxgIpYqh4)@mzPMHQfu8*6t+;duk&rn5Yx-&;b=nNNj8MXQ!M%sGO5UDUujpg z8J7dq(O}}*RY*FId6}RXV}ecM135s_w@s$0|8*IhpBaP9;<+=xLa8|yH~=xBVr#|B zG}5yhb?sWPsMdT%Okm2eBi(mxV9FYY>l>tG&mZ^|nM;F6Qg8(i-2Y>K3>oM~M+;7- z8F#pY=-yL<9tK-MAEfjHyNxckao4-3lAoCj!5q|Mt%U#T)Ur{5bW{$+8Yo*yJlt>u zKa(y%&YXZY^r1wr1jTYE!|@eTQmZa(LhVBO)zJ_XQCVP0{E?;319OjX@Cfp1&mfBT z?Rjb8)nO|y4<8TFHt~c9Quf#5W+%av$@rZJeE8$=PrLm%MfyA3?6{Yy<_Q`BjUfGN z#UBTDsE3fW=GGg~ygY~zi3r@A=@NH*4La+!h3H%`HLByvS z{XKB9XL2;#2s?>MowIUYMZ{T>8Wucd`l99laL(iAjKwtVrkkWcxD$>Me<_F2m0TAD zJqxA<0?jt^{e?jkRN(=x(jU&8T*X0UFn_`2W4ux=nRBVlw;c5K(^31U&kX|d2V=L! z-&OAiI{m){CKxsTF3c3-P1}b{k``RzCG=)gYmhyVnq#JWkQvp0-$`k%rV;_|3#MOs zY2EM7joH>hfU2~cBLtyeOrN6K1ibYFt8N>!YBOdlJRV2!jQrqDUG4!7*HtRrM%o$< z$OsN=1%0+b2L@M>TLA94^BEl#k4tsN%~4tU{k|c!dGRI%s1=}?mQnqD--QJ>_F3Z* zK1-^U0CNH13P;unkscW6L7?R$uoQcoQ%929aaZLQ6H^vd*8W;<#i-Z|eC@MU63g#H ztFHR{doMX4;CsQ@Xk;W=q{-SKL_}nFRz<+ag%^VvD=O~wmtYLF99ruesCjk^N9ToSd0tF@1FtuXkbI04@~Iw1sCOISb+ z0=q|~Jpzh?+LNd2zBf7xJYZhIgRUXOr= z5zhCG!o3$eY(&&f_9@+BC;`sk{vM@$Zg~#_Mg)7(9$vpL&2@}{+dOa*V>lqG>w9G~1K)m*0JqOSJLlMmq9_*G z3S?Yqe0yLBGg8Pd5rZ$CTAXA>HI*>l4Da1lMjk{Ul$>UHIe52AV6PXn#Nt5tdPF9N zR}R3Nka1!H9g1_zZi_@ld~uZGAQt^^2bS`|q)04*2iNgiY%WPf(}5-IU4N^5cqX#Z zZMlsGD?86WZLTG{4%E4G8ZX=%@-SVwEk8J^8+WjC{RDR0zMg>w%3O>BeU>??v?y;M z+0NUfQGf!u~i%z z+(LI`_UP!QIRRnb|C93UEcwmR`gxQj)IL#^a52a>pPrRjwjZ zeT@a*n#CJ}<}PZppJv(VC_;?+tOkA@uApaA5OPc+C-`#3@Ksk()nY5U>i4F8** zmW*zMZnN0Ptiw+Q#Gh?!=cfj*2nSO(4}KLI2s$Jo>co;I*|;=BxO4p-5)~k<9URI} z&I`V3S!f>k3Nw&OkB;=JNUkNDuW^a~GsOj0;$o>5Y;z4{>am?L>!73y3}z4a1ClCG zo2A@`Y%JA-(uQzo@mwJ8MgE8ACP~m_E%+1xT58gXZoMnmpVy>^sNkY0{qE!`Mpb=I zUkdeXp`nmv0P?T)KDLc4 z<1w)(GYJA~VJ4t3(FE1?1Va{>tql8N-{Srfx6UQQ_I}NTZ~GawIENT(Bc?H) z2;NY>(F<$qRMnWUT1Z0tO9{0~r{*BW!|KK_tkXR*Mm%{g4S)XHsuiOO<^#DA*+Dx_ zpS%|U3l*?^C_^F_6ZViAN0e1<3EPfqY^SN+=iG_fEkdUet7cjNAzYJfwxwy0q$SYG zni7{0mk{F{xZ#R~oImr}SHMy(8=Z12>lJHE;V@m2MU*SNU10kUZ)0GHnp=33T7!Nm zbX-nh)*t;hjgd8xZsdx;;& z%1CRo5EBm214pd*B%ebS%x*CPLdMo*F{b)TH?4}-7KSXM+y%!#xd*3$Rltc}+VKd6H6e!dua=?#7 z1TdP&9X7){Q_akOJX3(EKU2C>UcN^l z2~M2&NdeL4j0kZy_|mG_h4W$Mp$#IatP!ejX3cK1XAf-zw3)G3V>U`#=34$N*1`=@ zC5wBx2VmkJLdbp+t3)3IcQ6a_UZ|t69zJ5t5gRQFw-^Q6Ln5w7>;5hXN%0Txp?E&3 z#731XdZ09e7UY%)8y1bpqsMV_@C58meP5Ht+oh0gFTQEDFAe2WFSBCZER3?jt%JTb z>$}S-zC}sv$W3>D1~+{@GP62f^k6|(RX+{G(;`NngZ~9Q6V7S9Az%)uTly-d4vICT zY7>^AtPIYC*(b6=f_~KU$K7i8VAV&Oi$d$ad9Ja5ON;3WXfgVQNK_HBbt-y$Z`=}N z4sGK+?EhtUmgA%(J&D>y(1Z5ogn{79EN~Ogc-a%kFZ%UA)4S5 z-(}Zgg|Kvs+2z|aUZb5nJz{4x+CFN zU;Y#gP^<#4JC{%j*<|UQoPhlu0k7P?T)XMw`eW| zF;%o=m8$F*;@fxVN2~ryOnd%}&0(&ogd;mIB#m=AijX%W{bdy=zXV)K)u$KU{05hh z`EnS&VCCo_;2=6=mDQ&8Pf~>d;lAM?4-xzXYqmt9;NB&CZ$%p$U3j9CrpE@o!ToVQ z%(Cw5f?$5v9OTEAqvU*BEG76&m(tyqr~X}8@9BI!rTA&&o_MKe#Ah6&j1q|WVQ&)& zP`-ivo@$GPjMI4%Z-JoMCx^k229Drz+$icLT#5|dZtqQqDgax0-Wz6x{Ob(rlAT6m`c+3O;XLJfF{NH2T+b-hHJQ9YIbLkn*qhOX~od7rcn7D(|Igpu&*WWYwM z@lv6DWaOiA_p_`}z?F8Ft*21i*q$V=_QFmgGHkYX<7W$#NS|C!E-L8;qV`{vrE6D$ zOb~IPf23JYFTS2x5_!D48S|0|$NZ;^8UtJX*N;M>LL+@yT%YAMM8wnh6Dl)Fkj8R! z8{>N~*h)1(AY0_rx*cBiNm#2IXNf=rh`DyvCf z{hHBs8gGHAW4cUz!pEs$WM`>S4D{BCRJRJnP#Ui4mQ9_2qAid7v{s|U?O>tS`8gv0XwNmtE4L%*|n>5)#U|Ckbr%8;|#{WimE`Dh;h{A{FHG8 zehYKMxCx~x4G82RcrBTvDiFkeE}3l46hwZV$3gTtCcKFLgUTcJ1iL*Yj%6~33;Q<> zEKqLWTpdjYYdnMtth@lI{+>wrv?IrD6*i|(l0Pt?=L(k)z< z3x&E*Ov*yP1KY&Q5LG27fnuj2iey~6+;OQWmb}8i+PR>9vIKkwo-LsL?l|=1Yb?Xj z`6i|Yd-MR6o;Bx+*xaEIyU6r0;Gm# z)QP!|5|nCkv?lH1=+2kbCx7Q#JAIM@K*=romBVx*z*cW|b;i|7E!C84lYPwYtppH; z0E#FiSo7MEsnjC7uK8clJ_a7suiT^_ra?n50-BBwV^EzYZFEZ7kxejIXsupq6g8JS zL}rG+5Dp0-Qqh8S+U#oc%XGtoZ#tW)a8z*42(L7T>GqfT#}`j4R?H z<{e&k{+k+}jpm<|FD#T(vrrgjqtwQ`9Yj{Ns{n>I+ptmYQrm0p6At$0r0Rj5r{ zuI%taId6LOU^Ig%Qo?#2x-z`FM$m2PElTysB6`(ro6jfwVK@dY!kX&!uHN{M}5u_T&@H837V2QcKX~?VSr+X9oBjy zcQqJPD{&;_8-lEBrtS%OltZMG`S`_rgpHkEq*wL` zx+qG)WHRE)b$dyILkcZ<+HE4bwjDeelW4 z=8&2Ww&Dyt@-R*x$bHu)kAjeeejN7VzZpK+U-#giPkkOnc1lKpw0rQFoM!NPMIi86 zPghTGKi{<5i_a7|>19L-IHl1G1(v@`JvgJrgAE+)q(H1Ao#%*wLR^eqr4~cNwp8*;bha7_tSr)gSa|ULw8Xg!6B2&kKm_>-(F*|D7Tt_!zxFWt4SlzMUTip+WR> znAG(x`1+?+n^j`(CvyGOU6So2_PHSBGsHN9ruQ}zO1>kN?cAbw_#|p7)Loax3F4zW z8$*5&c#-Uq;RXVAun>yf6T$sVhk>^h`UBGu` zxnhq-Fb+V3|M`@{x15=0!^*g(3#{f^mju-W3b+VByuT?E&~|m=_KTH+M><+UMD!ss;hqli9qNu_;y8t&q6l z-jwq=?8BOV^==h676lZR&#S>($1`&DcB;@?<>fts_;B&lB6c8QkUu04!Wx%&qG#yL ztUGJ#WqGj;`|&~XjS62}6iX4jQ2xTV0){Vbndk&pjYFLpfw1u(H z{SD!0ws>bAKvzhXzDoZEiEVMt#C~!;5D$muCUScy=GtbM8d9(%MQe*D??F7$#z0&L zh$aN221Bf(;G2Yvy3?}CGzQmA9fWO%KV(NGlzXPk@{$m@VTrtT3Afs#EvMa8KJjdr*AebzV#No*BbL(Y$@ah7$aL{%l^5dVSW5h(nH4kOqS~E>xbM-~f`dP!qVe#=*5}_tg|H)$lVs zR0=#=60PH~5ypL=&w;{f9k~BbX{%OEenpvC3+w(Hej4U)8Zy|YdI7p36)*6PKKn3yAtVta10igX+m%W0BrU|~wwQ%=IX<+>V4zJ~g z;KT>XwmkDk@ZA6*4Wb@-Y0~p&#zEVEsH*nEN}R@`h)TtWL||*Yf*1x+a7%J$Rxm_i zBf{O2v>z4``lZ8uq?pkexnY2d^9kCk!|-Rz(t5=YP?2*k|^GQ4)E^ znf}#xE?x7pEAsEHbDgjA@+P?=$&<&JxU%+xPPz&15*Co(b7uGHn*$6GO4?TZ34JYV zh<&!8!X%f`G|2f3$scJAZ{5DMWh3=(KSe8_gOV#rHGlTy`j zBU|FmlU{5hvIB3!WhwOZ{A3f++a)>0;J1oJM-Q@0IBrQ#Jq1a@ZrADYmAPe>RTaPQ zfBFoG7!6z$4#}5aK)F$>)o5GUVIB%eIGz4Wx{(fSGEiJrSJ5G{t02lTr8V13V$WLr zYoRX|grSGIaSiql=k)I?s_4@MJBx+rIyDuNB618VkXj%Ko?c#o7>MH|8pZk*c^tG6 zu9prlc^#8@Xv2jq3<)b!_Mqy^Toe2vG_q5ad5_hBdav z^HNYl3B}N(8sdIxjgzj$xR#0?ZAz+h{wx0gMTE;zU>^&_g2t>IY9$%s6G@6;@k{jlS;=qBktVm&$pB%3rh9r|9NkS&@Zy zSKhlZTYBYyvnTw|w)B1~xc~^YNiPc$>tN>t=di&ThrYMMPPkqr^v47xB$O=jk-_Gd zY?~exO!KXX0vq3@n;B&bYd}E}*6Z|eU#9V0ORt?O?)%;>Yk5=)tC7I=NX4TqvUH?L zxVdO?nh_Tx)8T~Vp{(NMFgiGrZ1fzdR@%{~9aNCR#fm$8V%LW9vpM=ywBqY z+|PP7mbJu$9gY3xM`_GV>lv?r$I7)*$>h^*@C1|eszh${PP8{6E_$4Xz-O}`R=(>M z9{lE4l5@)Ke*il9_JzO?7kSd#bDRnv0{Nee#$XLM!k3hNq1SgpZfOFKnC3X7-!&$~ za~^V0qZh8mIam5pch$)r)&msJ&w!k$Fr&QWXC03Ms?IS=$U4N=Ic;W%jnnZ_m6C8y zyX9myE>*HLdnT~Z?T(-Zak7wNh3+oNHdd)%BObaKJqJt!VyAVHsuYg$; z)l*J$iCsDn;wX3~5fEaSiQ8LIKnkBB_cCGD)gKorZxvq>anWWru>`0>tztZc2TX;V z&XF`C;;c52;A{jNhX&YA>|%9QQ*9U#i&|7ypN-*BVb%dy@`Hw@G&?e}>5L(6O=p(B z3@_c!>rnu~TDvH(k+3{IOT!EjOV>rPTVC4}5ZsxWR#OMeeRG%DL5og25nn%lrZi>50THe z3#Ri5 z;nm?E;458RC~6e0F$JgoX}^E1%=9h}AwN+U@&)+F@*cpPP0_e_W$18rX$leVu2+Y2 z`3AC&M~`+W$>N92-k@_82+B?FLDs7?bkym0dIcq4j6Yg&*k^9Vv2QA86I=!LUrxLJ zzErS$F}Z=cKKCxacld(WAJV5`&<^A#%R1)|8yH_DJc0V7T@mAp6EW`)ntH; zka_ob)*zf`ro&oRJkXkcK22nvIZN8?CNg)DLf0t>#QX1|Fj}Kw{P+=8ZB06XnPfA~ z_27do8hK=4uebc_UeF33sA}z#6lxbd*B*p8X8ZZ8%hoZrKZeF``d91_w{TXjlP)XT6_$Hy3=Y%A(CCSQ5{ zuqdT{I77>vWou46Z;ll`N4*b@?DvGHl?x5X8NFhA`L%S9o0hGE83rfB(Gc;b6T;pZ zF_UO`CQefuwbRIsmp@9TaktS>$PpWS@~u7J8!W^{l+eC$K8h>x#LJ@%>%Tti>&#Z* zdcebkrBR8CYE0Nm3CnZTULkN#Os$B!YDqB`yU2MWP-92xFxuK|XIsoW6k|~r1bRWW zBb%@J-qVx=u-B-I8q{u7Z-^K;&Bn^+j5mly`1SR!oqL>Ku;i)SP>(?k(Y((!OknaI z&9U)N@!M=Bh zO5ga+0g^2Pi%}2Ifd$~xdA)_lK+AWpYX^v25-PYQo8)M}cQ$5k?=hmO$V@G(|A!j6 zJsHnaKbSH4i4-z=BO`z6fAw9)kLmk=`H}b4`4Ahm?gy6=BOmZo*cwz~J;#WptO8>r zjRUahm9o+#D#ID9E~ChHhfggjNc47CA=yXej`~*~Av${Ya2I>9azSlaLU_!Hmd%?c zEFy=?Bc~ok%yA&@hxuCRx1DOiTe}?t{d_)R{fW;v)9i4z1(DRKRB~;X%>jNd)NtNv zSDns9DfJZ|iK5e*99^7obZM zG~rJTgr8rd?JdBBCdtycyI{J26t84rZ9!o1qJ0usHQqsb?9N?AqcBx60NY}K=f1vF z4-m+X1Xf8sns<+u4(cbTiY1~w^kjIh*A#0T(E39?mN zxO266HgwjO!IK8;K#0uQg0{){QBFwb5-VWhphU5Q$q~-EVGX3FLxXe_DxzYIYLn}* z&$BGB0pLHATpCfSA?t5;H8L#gOaR6XA(r^poz>PCmXu23mS=4bVNn_K;Ys+$R)^ejt40@yp3-DuHDe;c%V^_kIt@8a&9N;xYX2>zYF*6bu4_l23sp&#M z!}fwgC9a-B)zpkcgDE;WN#7~6$aCQ^(VgYy7?gu}=S!hhImr2wgw%^*-yBtDg@(Em zhlMQ0l^W>Spzm{pRheMq_e4|QlGLbSF=^Y5Zjm!#X3?Tghx*ej<+X@mrMFXDl%QRV zQb8NWg~||4@@@@vZO>fIgcVCB_;5+T?9%%1$$3-WpD2l!YQ|ih%hXJU3ES5(1^9oe zi^C8s8*3mr3yR#Ukl4(1pzV`3Y~7lHzRat#{?&x_fGd}Hn)&g_e)PokYQiq(%G6!f zQQrKl`7qYX#>=lqjq6O`RFFUTh-G;Ft%&_mMTjMP-<|3Vdh~R)a94thZV7e(5?^5{ z&=~vSxKj(I@73~Ot))NW;5ykxik!era3tKf=ieg1&sIlod1Da=vG?Rxe_6ls!`f+7F7@CB%H(N zwwhBwEk?xhLV_1do_b(qnR8;Db88z9#;zHhJZ9mbEGd?6aLnLULJT5&b`Usln603l zwA{;5FXoTGEeFJlGpWa(MK3#a^kN|y7kol7wHJy8AjLI-g1Lzgm}*kf4h;jfs=^ds z0WD~-cRtR$E{{2s!jz*w$&-i1GLasW$j=z$HO+XQD< zHh^-AJ!%J23wrq7~>Q! zQY(Y})(n#)u$%Piaq?;v>Ap1RcrT`}ol1zxnRQFJHxzaisqJ7sKlV(EWMrpQKKN&j z>kfj{*`_eP=azK4_LN%s0Sy`j^SLtM<0^9Cq1HG>;aHmy^5VWC#$ z1iB-Y9;wiL#+OVH9c&UNFvUK;eW?o)g%~oX1l@}1SFqF%1WbB$BLUpV*x+RN^Tt||O7&+0cawl=w!qPYb%*??TAO5zsc04Ezlt795%?wg;?t|8QeT4)ibk+A}sZbgQe5*}5uE zS4<~{H$vJ2e6sqilEzmHhF4X`Lc>Y}as`gzN$8RrwY#WLqSWzH)^)K9dt-UGy2iT~ zvqgYI=g_zf478clUI}KQ?3stb=5S+P1UxA z0ln9&g1563&j1KG%;+T$8T#aSw@;!tS;TJUDpI8JOV6~6FbX(twtuk%XT z+76i;inL3;>ZZQ>9A#I$;FxB>CU}pS>y(Xf0O9u_DV|tHj4ZBrU%kucAd@2W!hNZC z%E5c2Fv{S!y&RU7^~EP1Feb?Rr5EJxK65}^AXn3#>`#*qlrh=&Q7#*?{@`fT7uEDG zG55qa065=Gw3le~Xj+mG;JoA<;~LZVq{(SD^uhX69RPHt!^D*F$P3<& zS?xJ{_&w3wcdu34biPBXY{XQp1@MI2 zL%!Z6Q^99=IR)^X0a)n*xDq`YUMfB#mvdfeyxXT|+YHkPaV96VZ+=x5g}Xhj>9<*3 zPGmvx1Ic(GO+Ki$0PGD0Z^fHXx_*-i(7>#^qGOyId_gBFQoq1dNa*A!vWuK~gv)1u zDVB^4=L;9D4KG#PWhYQnw4_G>hx!D}Pl2)r-i+JhIlriG?9uEgZM$t=HgkRIV3Rn5 zj2qH;Kbm_2&#tXe*&TyhVX#jjUQ@)QCKh$Tsbb`deLabcHjVWK0;iB(h!mMx1yTRAjMRE>G&gV7*qK7q^t z4L(V{w20+mfLh(gJyGkl2gc!0-qC1&p>ZE<-VY~x$Jyb#?r&2BW6$g>FDTqH)#4^9Ck^XqaZc61d$PEw{Qw)>S#cnu;T>cKr^=fUK~0= z0sz~{hyK8~H*EXYMaLOh{Q8aH&T>eBA$u{Mf=Ndp+y>`Z@EdZ>Nb0YLCSf*#aMaol zMG~`|fZLaf5?Nl-`Gerz$V^lbm1Fg3a%2o_8)bL95F1;o!+ya2wP)hv&E$gZb-#a0 z_uhGsO!}yO*7fJ;po%3CnXJRYu9M0jLgbsn#|Q3A+H7=!`5Wt=T^LdihjirJ@jvcy zBcW~gkp38PgP_Iw@~pjOWoG>c69%6oJb)NHeAkBE{dYV_<0Q$t%ym(u2>ic{dYHe5 zhc#*cJ($kiM^U;?X%7;ckE-7aI{)dQ2~`wb4Q<2d1mVp;P~7GvMRg{nv1h$3A`*pt zV!P6yEp)ik=F0vd{xm54GDUTjJzQ-Ju|z!Gtp1C_V7sM?bZqtu2IMCR8;7U$C2Y$) zF*o_GIAURY5*ki7ktUv%9GYFuZgUXTn*6fZJkwY4nYImXO}MmGRZ$`@6yK3<2kpTW z=Fu?@QQr;?QqFV-G8U?B2AH5Vm3!^%jg6W?5#`xVYj&5ihJ(1XhS!5^Ce}^Zm?&IH zM@6wVST0zZK=tJUBk;Q1lLAz<$88O)!6D!gnAgrKbXfyNqjfEgBn>I+Mm~c7zKEOP zzejQVU7Q8~J%qoov;AT&W2?UJ<2CqiJ7MP0Oo(*yHKdcru_aG^A10$D1gtbigwYle zi$HoWa?)Fokyg24o!6yYFFduSp(To>#|mKj#VS{{!Nz+tG4jUl(&R^=%0pDlUZ|WQ z?CEE3^P*aBZ`Q_ywe7ewOg>s*VHz?7fy?nEY%{;aslCrJOUb0BT?fat5C)^(A8 zqULDC6WG$2bz(@9$dlWgf}%$6g>>dOdx;;nJn8|bRaOJMA>3G0BL)KfE#^qvG*ckB?y$J8CV$DoA&M9upJ2)#t4It;` zNXBrsB3e6_d%(mZ+TitSwQP2uKfk@bP5FWV&(E}~ubyA6VHnS4d+@z}tc6l8UQqY% z#Vg)c5fw@IC-9F8awab{qL3?T9r4KD=*{9ojRdj|Jn{CDu{mkRMAMf=?egSuN+V>p z$SFq(A{V(22S2VDG61kjC7Kk&BQ3rPHwEu@5_50SsGLl+hq;OzRh0aytkrdmiS&=9 zoHtwB6N}gx89NMlfr(@ig&1Uw<7TN1aQI%Aub)jfk}G&q8dcNSkgH}Cxr&~#0rkjk zg2M!FlIGr3Ig@q^Qg`61uYTjnDCkX<^ty$D-m@ylI@Q-os;<%-h(86aL8x>rrQ%p=rVQ*KoApnV39btvILd30Wwy`Ym^v%mN?k*BKf|+ zgF$l-PNYMyvsbTD|Y{Z#?bxuvu4fwKTXIz>F-`>gCsHr7hQr4Vp z$yt)MLF`6nd?!_gZ^Hyyk zEsBl+=gL}*$BE>QqySR28!?@EaXi#7FRR%U%^_&CrjE2`QPjFYxUD7$)g607KWtZM z#v&Q^kg>&(5KdbjL=#ZM=!m+c3fm;UbXVM!a6)3uI9h|da+rKe#8=xToSEZ{CuLR( zXNN3`C;{?>HVh}gc7_QUo{_dY>Sh)wK>W#_m+07Hnj@J`TfsSVH)Sqil(LV@c0uqPTTCNe|7z zG)czvVQd%{^8Cnc=zcK|fy28(UZx~kTFjRqf%Gf#al8VGY%hac!_+Ds!rnYnJqkt= zOU+wl`Frrv6xZIMjZv-j{lY_EfskqMW;{*?yAO(!7~4TM97ixrRV2=@ZfuNfs&{sR zlKx)o>e(-4TR~Rc)Vv=OZy2QCQn3%L;muV2* zgc#eF#DUw8md#OhKbpSIzf({MZ+d92OS|C$(Gv9^kKzx)aQq_)z(*-}%qS~(?<&WZ zNLjJp|D|VNVH}o@_I@42+6X}5+LkQ1PQTNS+>jP)|ZY`Fjk8>J)VKoN{} zQPy`mIYG$?%;#^E6OMRDq&7hv1c7LETcJb!6{>=hHwZwNla-@x^Gq7!Um>yY(2_H{ zhC{pVTpcYyP0%Vet2$pa0F2s1fO8i$xG0DO$cD2qWH=BB4hJiV3$vibF^Xxc_(6m- zH#v-}nHcjM%vFQt6GI77YHo4iiO?`AI|&Vn$xljlvee@DEjrOKA0|0Tl`g@_QMH!b zom|bT3n6Hdqdl3@ch8vODc=D&b~!dX$CYF&uR}2|5?Yi8Q>NtwucL>hRo=?whoO-ey8U**9r|_Hbe+Ev8uuve!wqv zto3XdExD~9OO-JR24#a=K!tJNno&@F+Ux6X;Hsq#8+&~OZAUG9e2}GsCG(mDne*5jG+Z;)5!vBU1eC(;o(y{qbwKI$z zqN>7qyEz>k!fz_vVp>c4VCLe#)~TOi-Ui9pkmgMXN$t1o_B?f*CeB-M0&$I;hkMTb zanlZAoVyG-9f^>CF`Q`HSzV&1syN9e z$VJ}hfpZIOdZ~(VuvHg1FiecMah}F)%H)KWz^nE&!147z6?a1!-T)OydVJ-om@7vD zdx63EcTk-Z94FgH~zx+Coh@sunx&|l)#CdwR$miuG;k>|o z6<7haIg>#ubKlkETF-o-QCoG^HZYWfYy0Ru+uy9j?NP9w+4qq(OyMhYvewLAh#mW zd~h57sCSc#s+tw~f_=>sB!fT_$siaV0unCU9oxea2{Pae*BWsGsg4BsCdXt9-2j!$ zc!J;{VGG13dHXIfn9$J#(Fkye2u7i?G_(F=F+(dlQ_w+0G32ckC3H}ZP;z%j#=*;V zKe~<7|9+bzMR!%%gAb37Wks#mQ#h0xNhNv z(2@KDVy{CMmYA{Dg&qllo^&5-_ZnHcnc1p$^E zgrj;(_nXtAz}vm$9nWUW7NnA%gKS1AGAlasuo8$2d->vS&a_BbpDyoGY!H9+wP`93 z!WyH-3i3ynJw?ZD_9kv{fTz?I>W*B?yAf#9F6r3?o>m1{#p^`7G#6`?&fWNmNmO5l zywuA5qo-2mUO*0fWm{qR-QSLsJNJ!%u>}U~{Vhnjb7w4UnqVb+1d3b^I>!PoNAOOo zZw~Vv@4vMa4*tZb+C|`mRfRz#Xk$I67@%!ik>_+8zQ z%=ABcE`i>E^v>-O5NtUwWu9g?r2QZZpE4J>v9I*;(OujjO-C6!kghf&tD0d3Kom+) zXUr4t8%y`>`@OUMAG6cLe?Q3n<>1f5u6B(S>#+>pfbV4}cJ3X^`_lu|Y4JdC7Jh?n z724m-TpySkGf0l9e23|{@$y{y`nru1*+2U?o`jbXQ(|Pq)rhUD#*sTijQ+>cqJFRr z?7txsdbAV1?P~L=A zs-1Zp{c@qoBxr@Xi$EXQ;SU!KNjY}?eXKq^=cnDldcYlOhR`=>5}M&8&p?+TY%@%$ z%aU_)T^5(Dj@sr#mVp^++l#f+8zkQ)JrJSps&#UkFIK(eJAJ5Il5ZW5Bv{yz{n6;# zO_?WwQEJbTkq&LInSplLf`9uxU%7HB#Swhv)uIZ0qMlQf%+52YCw_BywbD}D^gPW< zxVFY+EWBv)ea23eK5eY;gC#Vj)jf`*x}$Els?a+s7SPdz_!cYCr^y~r1{RwoE0NV* zlm%IIQz{`;XSw(X*ogM9r4e5)8mBN92Q0Sq#VFgvE*YOm3@}1gyVzGBI~cTTJ-kLH z-op>BM{?Eu^`PSaX6cO6YwZL|0jD$WB1!kJcH~;dY9LG!3Z4X7s zt<*$OwOgl3k7-3hgElz`gYdL$0R79aZrvkC6Yu=iREOR~2c1+(ZqgLh^0O0xcpOzZt`rB+<$_%wN|uBMdnRDBot3A7syTZXnjPm1Nr5iOLLy;vV1`x9c^ z0X0x^eB4=K{m;K?cH#eNa!8jVv+@(=ja2D;TDaT)rlIXLtrUi1jaEs6(*MzO`huo6 zl(m%Wk3nl2YL`v?$1pZ>bIfc_6irno9)~HB%!FTxP#K~}hRg-tI<}EZaaAUQik_;^ zABD>wC+v}qgL~7CyUwQkjI*hEMv-PbLfB%_cywkK@Mh3aIw&b4=xUm|vRIXrK6O@P zwk%qaK6P!zpU45R=EF72Pt=-knCQ>HK!^ab3x>ZNsQ46L*-rS;WA^DgUyimyRN1)G zYNK?R`5Avt6~SG(6|lo+Ne5qD$VD9o50zGBI9i3IHdUZ40I`K$IBIVN88Te>%XL$% zK}_E_K~G=~YtDVGFIjayw@7EEtYjsNtg`TAVZcN<2vds5|H7RpX5^Il9Eut@GP5v* zrsh~0oCnLd(g(J`fTxs@S`+ZqbzQBmu8pH1yV|c8icDSxPE2R2cFwU!oYP7^s{n9Q z;FAEDQ0)a`3565NjbX9M6Cvpyhz^VI(b;v-qP@fi%0cjpcV6Dyn)#k3qV}+iQ zhZ)7+E}hXZV9>|%9D5Q^Yl$Be=07CcU2{x5RVzYAe?u&Qw=@`KPsXkKavLm^vHjOp zgqGL$Y%u(sxo_Uqa`aV=!%(`9R?5I8XfRWw**CG##iE<$wp!$m zrbS1CDR#2p2SQRX8WCD28R!|uK%I*qg)?bY?4nGBH^b5z=cLFnbxXP8{L^ffQLx;c zqx}TOKeL!=Y_-WZmnn|4hIR^Gq613EjdTCZ;bl8Kqdi8+hODCyW-|;=tM4B8GhJ9n z{eEe{y%wIr(INH1O`2VBmOe*2mK8$k#Wq_p(vD?} z^cbPZU2OI{4?6e%&1`}){{S@xs?_GFvzUPe3oY#MqQhC4*rV43KoVWWMv-_79bogc z8XDBcXvlO@;l{)hX1V6_nDGlvvGT%jj!$FU#ewy;xh}-fwIr29jjpszNY?W?sZi4> zqp($0h`#I@4w^pD#B;-fhV;90fj#^!WB9A8hDI~~d4y(@Y@M>;+=YOAF>9GkY8Q|q zQ5S7CI_(|pealKSasjC^FT1-?j%0SUObc5PT$#XlwYDc}&B2S08Jfb9Zi+)50!twV zY;@*QdL#s*8|4ZC#E_?u=%fax5)C~sD*Q&8Dp{66N>QftQ>zVa2(mH9> zP7-V3D>7Org-Y?imCJrd_a3|A0X0yG_)3wOu-yOlTiuqbd#|44UQNl@)RjC$Tk;rv ziLWuC`3E6j?}wxZ$d=}%!ftEKd`C6108IS5Nm4CBa)ECt{T&)X_ z@-+DYL=qD$oJ-oU+KmIMs%&)mpB6waZ^^#9lC~~q>*iYD8(tzSg%&NIq=A$a6T2;f zIT&Pb*R`>WgDFyneIE9xTBec;k_f$E5G7y9MA#NI7fh5Vj6#XdD+VFyWeOR#7b77|nzoXy*tus9a#vgU|UKIOgWK zs85m6oYGG39MY;-?pJFx%F#O>D8yt<0oo<3A6{&$Yo6y#F=5$5dZQQ9b&W)}W^SOH z7=(1w*+8{E6*mRoGRMAeLmL#>yqu z7(6x#b53M1NbTGmiT5VuJq_E1a9;Ys6;Ts&Ki%hGUjCyq^o7$@C2ljeIJ107eylVo z`-tm0)QBK}Qg0}cL_^n-UL(nKuNBb-G2B$07RbMiHE#4Tk}S=eS&T{2O-O0z>y&@> z$#-G&QQ?JrN$HZezsj#hy#V`rnC}_~pK$BsbnwXpjTlC8dDkV1EQ z^Qs>Kk-yU0HWbV<(um>TFe-UJ1S~LUSG{3u(PNc0Or&NT6oxb&;#JIS#y*8VQdWri zA$-vOH7Zb&)59MQ-<>7>{j-yw{{Q`f*LiwY9xU)SYIJlQE5q(qL!0QzKnVk4m^ z)#Zw!Jz;R8hAyz62c)4=ljY1a9RkSoMtMmoeYZ(9W(=>M>!{G(@tQpv$Ig>t6idI+ zlEAoF@(Z5#L(W5~7;DC)i^s?8^_!pWKmRtid)0$0&^H9k-bi!ImLTo9CkZ4H?DEJTVDKzV}^;k zds?jWaz<7NCORPpj}RRfnqi<$F-LyY1QsI?Ee;{T0m`SwHP2>-9Lm{`EDfol`F>}I z0+*N76t$WtJsG(G*@@PGIpP*YIk%S!O2kJ`PPRs;@Z-?%Gg4`DnVp17k)UBjI5N0$ z*{R9c%Er@3llLUai&jCT`JE)*^~5FR9=h0TfcKno*p>Bj)$b%^($zHvymc5G%YJu| z_0D-cpOOCG#n4j~amNu1C^4t&|4+Iz%p?N^Ss!T1qWdrWCgDttoBgi^BgQtvlQhP` zI%yc3NnQ-q_Qi#ze_CKznF+uMn~+Ex{FU|ejClh!FRj6&uztozb*qWmYa%sYT#m@x zs81I|10F{94?n5i1Q~=mZ=2!;_R$fOzMaI(UPs(deynbhF&mO?E0%{N1XYg4mD8Iu z2OBl6eSz7v!9EQ?GVWHl3rJXC5I|S{L|51*ThUI!H)$1WvP!zyYbI+VorVyoR;^tV z$X2sEf?^9Isc+9nnj;(aWo*OR3F3It8*N*iI&XaLp2En}!B|;O( zWP`CWReVy?`dl%MzK5W)peam#}ZMa$ZWNZ#-U~MgFk(%QQq# z$|-bP)t^X%pL=TmK3;*5i5h$d8Agpe*5m| z?Avce50|5!eEO`qe8#@eg;6AOAN}#sdUbSpf_xxpCmCEAsp61Qu0hE$pyu9UO6nEe z9SK1>f8f>#dPUWb*u|0hs+diOGh$5`_eS8ujcvZzQol`UU~-ezldzm%O{g{olow1yI4mk2!=(N zl%#=C0uFaOGQFz~M>=W-i?ei*{uJ4%THI}0b3?S~dog0|mADW~5exGbAtl9M!WTvyCAV(^mt*H_lCp!J_kIct1iZK_X-Mxhw8*2DhNVGRH2}aM zF17~2sBf2fn0)jXT9jo^$xU6|dUznTHYAO`I{0$Xk=`ns<#(u(Rb za)!ys%BEQ(S_FYQy0N_|DD~1#!cqJdB}PF;AhH?8;exg2c|$=jbY_?5`p^wT4u4M2+P6%=?>$4} z#|N%@aDW(&l&(Z=3}h4ONJL@c9z38>QWgpY%oMkf=)rLAuqK=wrqoO%>NZ)HFlHE; z5*Ej39>y})J}@+U$A?Zep{nI8WD!y6n|$E-)gUd|*t-~*`cl|bN|MPJ4Z{tPQaND| zrZjM}e-W1^Xbm%WGJ}!2R6`~Tv}JRx++Fb@0FpD6fHNA72Q-AsO_G_ZNz2MLIYc+_ z24?e_yEHk|5Ejf$#a4YMQM1X{s|vI4vBWIe8VbT@nvfl^l&wMCXUYD57%9BlB~2JGKU1?2x^YC%6-{dc9svtJ{USU=tER_{DisO|ZiH z!MvYk{g#@ZZO{5e_&X(GA!9DN92Ke1;mgP7=o%wMLjf;Gdu(>G>~m0h6JEB!XQlA5 z34AFTE%irQ*sa+$!>wHxM5ac`A71UCG07B2nLKcmQ=`j-w=ZA3Fe)+=j?7HG?;W$= zbw!L(mveSb3E}9Ntzov+JX z${A95%&W?kFNKY;z#+xTOJfCa%%>_FmT2fZrtpA*J3vj%6O!mBVILY0Ag2Vmu>{^~ zl2<6{<&;GS@X~}!gJUee%1Hm2qc`U1!O72u?|v8~j7da=0}x5&rm~{_@xB4{D1^Y? zig*{|0O-n){g9zB+oPM*!cBpWxo(J@y*B5=?|U|&P$_p3F2sEAP-kzzW4>t{VPIZz zWMhcbyg^H^QyCV4=@~ItwMlk7v<81u@dih(XkXC5p5 zxMY*@Q{Oh^?rZf7KlY?P2I{dRZ%=U_OO=;&%Tg_5rc!kz|H@g_fe_lC+sr_%g^+`- z%_}TXjaonW$y4KHyu7LLBHT%*jRm7t{2QZN{F{;M&gym7(A7E6J^1?yMGlo{IsQ&2 zo86u9H9&mKf-kG8yU1-`&oig0~5)VqzBf6+_Lpfc@tZ6k7rH=BjK5v^={$i9_}b)j-PBO>0E)w%&ThK_R{CZ0aY7!hGSR5A3R z8$J^F&jz0}_X0FfGLu*b$-bn7H+`D+m_Sf_6$#Xs^NX}as1LZl zLrY1wj^aGyhepC>aB%9rK2DI~W_L!4sDfz@6o!zP=W;)rSPF%P@pi1+1RR|Wkw6P;1_VL}F|;j4T!qq8bB%AD6_mXKd1!K)#?(mV7zgT^ z?QKIQaUAoK!mJ|xUz97hwbvJ%dQ()uK-IVrmAx!qho%W$AOnL@-KNPiqqu~#7nepk zcI=z<4sD7~YmSaltt1h~QPIpn-wdlsMlBI&Vr`3n4ig0aBkM$pj2)4vhrWqIH-6dX zLR80H>h1+$cw;;o03lrnf}4{3SIDUm#d6!$Vm-e=QRC8BcQmG-;OL7(Q?0xLqUqT7 z(jLHuj@!p+_ev@S&{yl<9n-jD%;q##*fT7lK9<3g{GFtsAv-# zKr$sq_@0#@-gyki25T6sM4Xwup*5!oK@U*@A@nxRpf1yY}#w!TNppL)I9ooj| z27_*uj}K0=ACFGYvY%g#hYF=t05or-GjS#dW-&j}MBQBhixmmodcY>n-q(!DudWr53nU`_U*C@r10_haT#oV&u9(NLQl~kGuBvz&hZBGJm>M2}Tq zeW(H6aZb4dVRJ5?(NSxn+c!2PyB#XB8l{{tHkX3@sA?PImR$NYbh6a zTHO9}A4MrHY>N{(SsyO<%^u&W)xyEE4uD%4QzAYBtTL8T(T`MSMxA3qA0a^lSkFQn zjg#5_V}%sR#e0gJ=ESI!bIf_+LT_wGK6I>O9acWWWG?DOd@{op_b?0(l3|3*J7^2* zGt{D$fPnk)gc@{q)X%zL*;qpj_;FKxDyBL(9T%!GPu|~dsc{6Ql4N$2rfJ_ttWZ$|gFmV4%W1S&vhH@r?%+#;4slSf8hVSFH|F^MmxXn6`xxk3bU-X5GKA zGij5}+Kp*|$AI-nLLYmBRm#Ob`Gv12O{kL>$j(JjzQ2JecA=;{@HL@ZFn(lY@8r2ie=B z*9W`t20r3cjOC0;RH`O{Ar+z~8N;MyjbWM*lj;qFu|gRs?VrDV`QpVtzIyeKe*G4(xa7IiP?v~h`9-yq> z;#d+kv`naUY7s--w^@~FIwq@7`sLR@ZAg<8(%=pKi6NrUDM?kkIZ-*^KCp?2&ThCb zw-R`k8IKRXI$i`eFsTuek1f|%7bLs*3uBG#RIWGH0g1kN4eX9l>9X4$wnD-Rvvp0H z9O03t$HZDM&Z;7plHTZ~Jv@$0ZV`@tDiE=w4}y#)I^ONsMgUOb90scX#J1tCk%%cz z>Y!{1?1#c!7tx_kW@K!PKw(@bh0(*hOC_;&7^@W=+x85siF*2$a<}fPl|i12qw6)+ z78A)mT#Px9w#m@CDtGRkt`BDROUCew6oj;m;J(Kfee zW-pw%8xsaxX9_vOs87Z8HQITq4cq@TOfui<3z-6Lr+?#xMI#mOFu7?*@eWeq_(*2k zZ~?NsWS4r-wkSwMDtfZ$!SGHSC}yfbYUAt zK?(YJ_#lP$tw?;LB6=!+XrxeLL90P0$_>6#kQT=6@yniE6t{X|T`VoVxGFWws;wqh zO_-2ctV5BF>_wc56mE|#T~@1iJPl%vK|eUgp3^nViX4-t>L2t7?N-PyV(U zci5$F+-r7tt!nqu0-^+y89*|i2b2`WP=vsU^wT-egMlApHclc!m>FotQ?q48W#;!c zYEMmZRvaEph1wB@iVlx3q`ZpYL785fbxB$s6B?n37#&hWyX2n52U59k*ZdO~sb00) z6$e~*ZDIx7d&?+R_qwjLkPw}xgk+ie_l*MO7>pP?4vOenh*AP6oJd89CjzNVO(YwR zqBYMy^LGI)dqdigv4l1x62*_-vy;8UcLUUHLTR<*bP z$EQ|mCe5!-=VPmc1Zv}r>>G)@;ld$^8{nAQV*h*Fw7a+^lSfa#;Y_Z`v{6h9-JOb4 zjZ!B^%%C>d;rsq=; z7jg`0l6^w5AU0?(PqSuae{w#TCA{P z>)Omc#1RGM2J6wts9_#Gr#ml~xZFX@y-VZ-GEMLy6c*<)lv+43SQR~gICq!`FV_2Z z;cWHT^+=>!W_ne^X4BIVveUQZnLY96AW1T8+eJy7_a%!@i4rqgGSy(4W6%wh0_Bj? zW;u01iYZB*ahj$`B*sD~u){G|8JiQxS${azPMGUG+S>Aj_lz!mI`^W}tjqS&`M15! z%|s*lx2YSs*o$@JDZ5%b)q?7Gmp=Q6yQzBOl4o03yWHt2>-k2QwX>evCJ=S+Z-7dj zi^f1p115_Xx>Y;mvtI6U5vK2r%8-v<6cX_AepzVMcG=z6yxTrJGZ*3M{Q4H39-Yi(9So$ zQ77BcztYmYv3E^MQ|jn6jTGj9V&$sSQQAjCTXzQY* zR`P)Z)ElAJCj8O0hsF@phD-A?v$1MXW2CN3)3s}?$^XR>Gi-yZHKK8ibt*Au4J9*W zBkYo60a4r_4$xpMz3DYtBf4c4X-hJ&&o7j6Co#{Yo=Wedvf}^t-p**U{;_lu>;j|B$iRIBSUZkcrN9OzSanT>Km{s1Z_qA?XSJB@fSy9Zp#=?9RS{j+}~ z8bb-ILteXkdl^aF(nG@$n$)71nFJKcyx12e@GQmbQyJ7gl}lPZXM`#&(wo3rkrqc1 zJD5l@#9`}LfQYYYGR{21Ojf_+c?PIkEr16Xu`oF%<280R?uoEy!VnZ>>UOy%lO;)C zkzi)_Hsi{j@9HJgo7_Ol=gtU+1b!UJq-`i-nQHGXi8c`f31@QUzqbNktWJJI0wP)? z!?YXi!Ri^h6v%L8WYvH=dE&oLY=J%s7Lt=-^apL<)lfiY|gV<&`74I-tH@8*u1zKi~BBJM;DoZ_gv z0ZWr^#7oj;mudaR^?It<@g#l zwl&6@P%trs?ZQ`ch#{{O#ky{!-3Djx%3?Onm^rh-o3tI<-v@`~x_C1h^Q|nMv*jji zHelYC9(y3x4__C@AgxgZUX6Hj7irN%M`a{Fl3|ONiqPNgq%k8xTreR`JVPA_U)1M7D%1($b)ns z&OALc`J_fkM(#at79aRczY}^MO$i|BPe~YlTj6OSpNLWv1rUj@Hy(0`X^yQo9(oZ# z@P%p%FVR{37WWr*KK0gCGonfI(j0oa1Aqx>4{U)j0b8rMQ%Ug%_h&`U@fu=SB%~`?;zZ&ttKu2jWkP4?G#Oo`QWl0XDHFHS6Zwyx8pzKpb0R^a0(5e!tIvQ zod#~wYE3>1A<)_+WAk`5Rz7%HBLnWTA!um{eJmUW^#UCIF6nzHH1uquqUH;3B~h0_ z(Ta_(i5Q%uH|95ErCAJfbVPTa6d5qj4%P)C3o`iYFpt!#40hwiJJW zE_9q*Ej7TMm{yUNgiA}-#zBP<%lC##@ z4uVv*%JJJ38j>C7z{h*)h5B_iJ%H<%eZ2F|4&MFi`-Arf+0pSS2u+_?Xz}D4^fr8O zGMJW8J+`V;@IG~8cssA)a#wSp(Xj@WN-iA&HJCS(;wfTy*~Qzj!db0Eup{ruV;JWK*YU}DC>dv zQwzW1vbUPs4UY&(@>sihM+R#Wm>CxJR9gKBr0);ZaMQ;?oOlk+o6#R%B;R5{;TMB& z=I8!_G21+5r5XQ913xLqqj6tZsy}Mn;YPjB>z)64?-!WgbdhXCm*mMe&68g8jSrc@ zj@O`%uEvtG)n_>WA8I&%FV{1(fH78!|FlpFw5mLcUwzCVU4I_~{ z%3y~6*u&z6numzDAy;q=)<_m3uk~pg`D_ST>>InU8cIG=L_Jb?^LagrW5<2fS{sO} zSP^^_Rq>q1D$VnTvWsd@VtyAZZSY-OpCxaf_cE;38fs@6M~yG%U&<_tJV2P0UDj-) zj5#~WKCk@~`+kIaw3 zrpf>3BcMVFn~dEbqlZB@2R-opdF>nR{&aMDn!PVfU&dS5Tze!hH+PGP z^UB4yeP1z2k#oeTz&VXr!N6&xDN{6>JawZ9vFwy^x(Y8w>UU@P zj*Auzrjhv(7KrSb?FDF^Ee!v({LINs0x7!DVl=6}J4I*}8RT&ibY%M>{vs}m99`8E z?j%ysV|M06zMzTd5ct#bCLFSwafk+{)&y9$e#gLuY`Zmb&7}0=K{A3JYq*)8CS-{o z=7S8ZuP8G^pTb_*Nwbkzz*juQBvjKThmUU9o>fqEloJCl1+OQ zD9AYK4e7vO$6_|*4b&-hhf~12`xk9}Qk9wg~c7bg5 z6D-HaPiS1wzxjyqii0r>VpSc~%|5&N`0{+Sj`t}BVqH{B91L#Yn(%j?a%Fgj@y>MhcOMu?DMCO=JLLj=7z)C>TG>VvE9k@ z*H7*t*<vg|#I{IkXw_w0}%l#hC=n1os<4V^9J zsXduE-~qW`5Th=hSfY>R!2UW(jE=PPR40wV#dpWtQ&s4Vm#B9bWN(mxH2!PKU!?Ny zspg!61x%7n_(*8r2!6nZl(|bcIABczu#*k&GR(7SX5gs>6Xg={CS+od+ZB0hRz!{^ zI}|l<_ANz|tW{&u_>w4P)3PAXQq@=a0yE>0r9(9X3~J)Qdzn(CPk)pqIFcD6=`l=n zbe%I^3aa*!gks-g^l+Qm6}etclP0U5-&7?-l@ds!cLs<21$w zEy@K&KG#@6$S$%NF5mMi1le^FYZDevP~l{~K(CRsjU)4-)^we7j3`0Z=Et^e+qP}r zv27c7Y}>YN+n%{&+t$wa?QVA8Y_^l`PCBX7ALmr}Q>Utb&lyJ<=a6`B!DOL>roH8@ zhzZMI(oQ1rI*ynw!*(&{1Ye0-j^vvCsDvI42i)6EL587?{NyX#PKWjqQ_f>diV`{v zG|+_t@?^Nkj75@=2&I+E-kM2OAvlGsc+JpBF^#P2Zr-}LmM^e*fSO0y%ca98jrqX9 z!s%GF5&SW+(e%@};O&@z7Su;PyX83xX`NWI8V-}8d=FMkaw96s2Bb%M=WOxS4+ik2 zsoDq#`b}XHlpv-@QW5^0xiGe(i#A$yRn)uo>uJX|4h#A6bK{+6Ga7uBzihcLVE=+< zk_!|_9*)juUP~-qSf*&2+6J?pwd#yqywKtxjpgH3Zh0xmJEG8F0I3oeWTJX6*BQ;~ zSZS4^%o1lYztOSILnqHY6*7s)Qish=`1!&G)Yb`R0oCoT!578I3@Lb}xu}bH#hyL< zQC{LQA88MXK;z}Y$Bww_vRY%)q{uAmPixY_Cq!+^DXh84o=&}&vqcJ|7Z~MAOvAXj zxsAJbFvn&;1UjF_6Uq7YXWXzUl?oL49jpyS!5E>&D;M9KJ@MTry~s>t+7EvWL7MMo<+ zNOX1KBgF!Va9q*@4}eK^xL|o1djF`=agWh;_CeLCbb%N3`zu@EeaQ`{s=FUyF{yS%%HtGlIFcmVx-qXr8197S-JDdC{S0vc17EhmNLgeI z+9Y5?ftB#6+Z!F{hw6aFqO7*$Sj)~L#j!UMD+WDGiY<-rlJ=kCaXbDdp2VWD0UHOk zhpAfpm+`(8bGb)Wo%I^6(e85#ri{xg)ZClMpj;vPX1eTWX&tfpI?k<`?dUp8QX;Mxloa(3P14oaf4Li6D;m$C5bRR`-)@<(t)kh%O^e7n zGuREXI3tLVn6~;l3*FYwel`2uUMK$la>5vDnl^#Fed&q(IROi9#3sw8Cf|(y!E~G} zip)XB2sX97N4@-KX36MIA-mS5&`Vu`P#+AT=Y!e4wjDN%2p{Zo2RouNlz{k8Z^H?# z5R0m7FFnY(MWyK=2v#I$Rb&J*7YHz>oM71P5lA6}fz@EYR4G*JrmM6-%wVqnnL6<% zA(dNi262yc^~KG7M;1_Cw!uQx^D*Iyb&A|%w2zZSCQG(s%x~fv%Q>Bz5vg)gS_M8h zUjG+`vRAh?JdN2IRr%Cojd?I+-uK_wwW5EZ5a0C6)f3`nL9xCpoS%99OP|-h7-b*D z6k{zzjB1G?YM{M{$6MkXtwfixL>Gj|wdiF8S8X6qVu|sAS^KJUYzW2l**lJkmC(MI z{oL($uIj_SkZ#J}KgY_v!Pxeb*g8J_@11KF@ar_r`mWYdUa==OZewEXbp1XT)FU+p z=) zbvuE6G&~+MQ(D+_v^n-NMiCPLCeFJ?$_o6I9mCSc96Il3fB^}E@=|vSnOCQzXcyE*bqQNY^S91x{oOHMfGTy`<3$PhjF zAoyoULcb6reaqNhR^rBH1nk8R$T5*Tu5CU=X9MfhPqef|CQzCd1*62q8;96*uZ0~m zCZ@^ktl7}(0MSb|Rcl@GwhH@eQ#2udMIuOc*JjNE#@jB0p`Zy|&%!D8NuMAM)9G8^ z(?F@F1zf^m7f-2XiZr%LDFN~BGmN<=p~OUX;Abi^k0HhYw}2yBQVLO*6G0gK!aNq( zO>VBQ`!8(#vV5lRp)I7Vex;tSg~Ysg%{K05+rw$>NXiiir7WKNkCvmW1Y9Wdphw?n z_`9~e?EFm+)bLAzTUboZAu`_S6t_+vFKaK?YI%0Nxzj@0S4vR2xH@&aJrJ|&Z4zkp z+)Yc)WvlcJmock&p0EOmo16guiVUr-ezETflFGR{VMc`!r!yvBNuPK3-^ZhKm+b)n zRlq^Axm*_Mw_@Q!QsS;%dfh=Yi$V~OhAO%q{X1aF3klB6C2`G(4%Gc~x)VSTTi5!g z04~f4e>Z{RD35{zbRZ~M>d#bC)6p_pa1E;M{ct*sHoMD;hIaq%8)U`S2LJ%(-Vk+M z1+*<-j`H(a7F~<)>(xZ{?&FScl_2kVd`A)iSa1l$o?2C>ZM@P~eEY!dl>*I~M_A#9 zdmUeAvSxhwU4)fJgZWw0LDcy({Sp_gTkjtI`FvxmU~0$^FvN&l^!~lW<=5B4L*O^B z)Hy)lXN3F)t?HEZ7Z);3bG=}{4>%leMeh$fLhon@C`Fow)*Eapa6r3p#wdGo?05`0 zB!uWn0lu0$o^bec+|Z#oemlt(@)HOr)oaBV&Ou$Tw?e*L&Pvf1oi!oAb3l{ushzbZ5CH`WD)noSwS@-TxorZZrlYVXOZdxbR1o1R znu75#Y)X~A%=-YGiU`)>H)(eZW$@5+o`=P(q%0L}NBfCTNZ~m70zFSxkW874*FU&O zOtgxok0cco2R&EY$oGX*Ef*_01g7cMs$up|5s~#ERc%Qu$x6aiY*$jahI4=Y0T@u1 zj0MK@ZMQXs{(-Nf{iuQf1DU4R80}=A`iMn16F?jMJd9Bg zM6sQxq$wIZQK?SaX-2Zp){+|8NYVH$F7cqsG9kt?(VH)yUu5jb`iE|w7&q%`hw8*( z3V1Pft+iJV5}dj)^)-PlaRPVF!qu%Y@ue zgeq$Z^06Ovh2XX++=JM5$A(~9(Kmavc^COuwE#vXAi2O)8%pe|HH6ygkT^JKmz6eu z@F?!6=$-pB;xVr@pht5QZdeR1>{TSj3sR>?v9o4T)a)Z1nXCe~O-tTDO|)C%Fg!`JKw>})}@OP~Q1=VMCp*S8AL zn?41FwP!+mS$$;P+DUQ|S z-Cs4LuvIeaxXGK0BR6qAL*Y+w{3{c;1u8)7>=nM6tpO~&Tl(iF$%vks@f=qtVtpgY zkGCNYFBcNZ^8+q^B45?L&mKofvDR>kiyGTGDtpFsM#{P>Uv^VNwaY+ckKOetL<}y{ zHT|rn1x^53F zolS3!^32jb}Cc5p!{<1T)IV3FaKs%+Vmj$^jMrDFL73b1bj zW@g$3?0Dd^CWG3(E;><1nGg=m+t3MU=VY*Pftk`g-d9AWhGM$It<)TQWsz-c272~% zXl+3Q5?qV3B3;G9$!11I4`CPoH83p=LvoPUejpoSx!HDMu%=h4VIz~Lv<8IUQERrR zM%*zf&>H##n^NHu9h||LDrSn}?7_RJg59P5LflH{;08hEn3{z~>6c-}bM4zvJ8OcR z){|s01$o^j>NuH1r&{vDW>YCFC8LtzlsNaAA%^;p!jh}`maF~i2@)sD{*!>O{gmc( zG?L6zFZ*Yd)*ro1ZP=xVIucQvB3EMXiZg){>=s zHj^t&4qK(}ybEr7c4>Y149viaPds!N)zfQ=KHAxZHfp3uS!02)X0}f?ZI#Tn7_|#+ z8Ej||GWG5FeJu^sEP7lP2O|(U*upT6thRd+_Hd?h|MhEA-*lHW56NYz^d)VAuJhfA zJPFED(LUay!~olZf{Wb2e~Ej6=(g|c*YZiS{jdz{HJ73Q<-LlMGoi8)1&Q}8IT4tg z6Iq#>BS&c9U7M3kMvu8b@ABzv{Y7mPcQx-=uJK*1D)TSv5eID03$+FP3(^$9%Zdh* zjIyb~i^H{qtt!Z-arWsayjxu&J-MWSwA0b^g$Byj}wg} zbxP8%bb$N%VkYNjMnX|8)2rgP+MXWoJ^uUY;HW5kqT?x}lq3@L#%#au0b^8c=HVYY zG2OgPL?&h)mhtHXsVqf6*ces?TXaVpNaAt#jrNZ4D{?I&MyK&5KIFo6hgMF*Q?2-1 z_HV}>F-0LsbA(ojlubXCDd+3XV)A5Cf1mJ5R9tP-;qHn#>S_+zcr28Ha=A6TXg_w+ z9N<{P`p6l4H?(=6qI2cgZI*%`D>PfV?k%P$V}s99-5GX3Nsv;z4=58qbAaAackzeJ z^{lN7gU%mdlIknI+W2ASgC(1=b&T2FSAV~3H)d`Q9PFpAD)l4@t=adOe=^Ez`shiW zTDi{ue2zZgPaSjQH+mWiNU(p2uTS`;vYH89V{pCa7STjFO`o{c*N6PC8vxsCFJr{# z++9V-v;ibj!Bj0V@$~K!Y4y!G-Ia3icRFyrV}u--elwO~Vx5>BMs>R`iuu(p9Y}W4 z_h?X%_Y68HrM4n~of(ADJ5%I~lU_mSJf5tKsZ+PK?a^q^Nqy;VB*+2C>!NZ2t)q^E z@QrTkHWRZ>Dm{%7`@_(MA1`To^04$YBXR=DS&CEx9(DX}ldf**=)-PeS&?nY%ys0B zC>?3C0GnRp??{%8&x&M4zJLuZ`zRGx0MN6&!3IJ0KPon?zl6_Olx-n~Q)beyLTV5n z7O?u2KVW}u;GMIhcz6WLeV43WWW3r)^p0gM%8!HC^1q>~e9^hU6oyUVcpEdV$CI#S zMI;ivm~F7A-~yMC3XC*Qz6eD5DJUg;u!u*g+auaH;aJo-T_SkX=h=EIz7T@68mmkx zF#bjn%UD$)k2NDSd#d3~v|1L&N;rdJDcn3h+9SyKW0vR?*24d~mWsye=VP|qs<0T$8FrOo#!4xGdb96~Q zd=~>!ufe!1)>A%puP9lGp#&jz8uYDQnzVF3m0hmD$sjk*Tt_q)!!=<<`fW?USai+t z=M}Z%;*39ME>Fk-lBW7Ieq$ui{T8&tx`)6({t11V5{u&&yQSQl#=8)(N8>8#4TLh| z@GV3{xg6}0_?Vw&JA#>tBRV59rJ;T#N46~HUzKEQ==vZ0?p}(_2sFolre6}{dl9xS z_X+HP2RGnKVI8^DmyzMwyUV%Cx?3DHxF^#r?cQ$_3=cJo(LsR1JYz}9cD!^LM(H5m zBcjxZ7bjr=WbGE56-&}ke9U6KC@xfLuqjxMdti<11%IH3vKYWRH_?FuK~r#))ulKz zY5$B!IXZbU4l*W8Q!_?dY{yA4t`j?ie;Gb+8fDa`P+MeD(7m_xjNbP10?d-oLve7i zI-!d&+ZU0&kzrlJv1vk=6p1df6S@&38>8_)zVhKHaOHkyu7&^8ehuURX^Y5Ue^ zFV?+cF=9?@!ekxmOWhngAHwUwDb2p019n-?n5G9Fc_Y*8G}9KdAZ|&W^4SeGo$v~z z%ACH7FJTnvDQUZfk%MwGqy_*qyx0M0ZJ~Thgm&zSFsqtG(mEt$gx>UPVT;f&;U|}}F1t#52kuDZ$N5xI@S1iL?f4AfZ|Ui;pMS>J@8&x_^f@m1!*$q$!DGi$>0rkl%CKBaL} zfQ^!_iz3$%o*nB?6BwqiC$mnvddZ|lSjL+Qk;!~ofeYfI^*je@z3l^r8hxvLM*SWQJ*z9@YvqR6Y}vHMlHn^7X3#8GG|Ka=VXC_& zqqswnz%EgASqit)FLK*d<$hxSBj8}ni>(TMHf^v)n>^Dy`38FEPbE4ofb|A}c|C+8 ztwy1FvO|T_OPoR!eWLtcU>8WGnIUvV%VZ@$n$gh?v{-vXT05m zg6-Mm@IuNNosD7mi*@LC=v&+4>t|zG^bvreXNPf}Fr+v68NC&t<;IHZ!nyUH!o@tn z#zt}Z*r&OEDghWgghOgowtdea?>r9e!`Gxy2Bj(5!I?fOwj`hVc4!>j?>G9tW0M}! zXj9lKN>%@mNJX~lyQPgn&SSvTIvZ&?^L+OK?qQ7bu$|*<8>{u1ns)pi^%SImK~Mky z03ZNH!U7dMi3onkp>*299)#>@Cd4BnNiE!O(p1GRq#x#D`A^FmzF)))hruY`a&d)f z06Xy(*NtP$Cm87NY1_ZNDIel$)Ss^-_M>lu4lTXf>1(ko9Tn0{M6X58a&Brk_--su z&M`Gkjw*X_$T-}jy6%k&t(#zGA*g|^`&e}F4C9K_5C(Aw=EHCGB}tBEC6S~^WIcpR!#|>RHUrFY>^ms|3L#{wIIupWQHIO zorAQG4NNh670}~FXN-nb9?Da{V4!TEx+emGQ zx7Y^lKg&zlc9ws?VBbm`Qm=Py`YT~lfZzmg5oZJmcI%!tdoNDE!_af684cIY!JSH} zRZ#`e0JmqTZk>FSTa0Df?vj8rZzmP5_tkhc7=C6e{mW3bt$ta$p7yW(pCVG)CXMp^ zmEjq29Oi=$1)VxRcbnobL!g~pk-kND-(^F63MX(3$ZpzN#tXPTi~ac(=o=&9*9cHR{^d zFjg-AvO}oPNX+4LBS*$8b#g<`lmqD_a$bExR_P@z&E&$N5k3iSP{149vLz@^Vfakt zj=m0N$?4sw;HuhlB5~q&Az4QljG&XM{afL9$kpBZU;^xJ8LArx^MT6Z~X=L@L2-56~fga6Es$i%88*We4Qz}tB{f=ebl)Bns+ z>?@E|f-PpwgGStrkPCWPBW>@KY=BBryw=Iv$C&ak{%|D32w2WNn-q31WNm zbq{=kq8Y(QC|UVN#s!q8+(Fx|!@@z?ks%ko$TTd1_zNIx-#;qGYes;Mjl^4p(dC3i zyW1fn6~7pvD;1#Af0oZE42VRG+3fIiq;hth)<1^e5BhJ=(S z3!yp#+C&Bdj^)s&@EwOi=%0Z&p(odky^=OzVsF}}?y(qAgn2=+0 zG(!kNAN=Nq09bt@92NXYHw07Vf~JD6q2{n?&pJXYG+B!Z8-YLTUq4YDh&Gz7C;txJ zmo+5`oWHNe;_H3U0-nLpj2V%ZICBNZeDN@>a&owbBnW>87r8mcz-HzoI;@w{E}s=f zSavrRRqQi83{uYbNzV`xjrpga4SEj7#+4~EqbC;=LyM(iofS!P6CjNkjdmGUmCAz9`CU#GsqY~-wO z?DY#*gz$k(k8}S}j!@mx?TLk^vFsP)%=NBTXWZvY#H<;YJ4~3ul}aG%&>iZR#=0_5 zWy}pc?+6@Q@DDN!;Ip<`?_mrY>1MB2e+qUaX67mCrfda<*tTHSx;Y>$Q=X*p=hshj zRnhDJHBBO%BzOM<4G(W0CR&A~0QgS6z& zI$zJ7cAt?QT<5a41<7Pk3ZP2DHWGS*U1XdO>PNA&yB*F=20skAnB69Pez7+Tj-Oo4 z?8`9>l}#{Yvm>iBXDbk9t>=`nbp4leEBvGL^h zh)MF%GN1;(D z4O<>%4V@NPN8F~(jDyB1s?j@J36Hm6*!*TOP1pkdCq4WT3kC1dxTYM7Pxd90CeoK5 zALG;7$AmbHSxofot3^|j*J9-pl`ky;s7=7`Q)%qY#ETuDk9%T9MG`Wae+u3oipPcW zkA#>bJ!~V}(H`fq^kSQnG{w-^R#1TMMvdEw{Lhsty{<5(T=|L^_fQqVp;U{2cprkF zoHU6xN+4K26AvFDNV4ZM(vm^<3*KKcJ=NYnLdq7S&56AL?VKBiJ&3=mP6wU6qsYoV zEWUt2Bs+Vp$y)H=fYiLN(X!Ca)zrCUg5!P+?PwwT0fDmj@+jyDE!<^@6!Sp{#)NYc zc$Q@Qc;)DfHy24bb)ZfM)W;~x{(QbSDEquFl#6FEV*fO4C3IjqmPHAb^D-@Nd^yr^b!V0T4m3PVOH7-LnUD@FjeWzyAz^PAh=c+RaLFwM zCarRBFI!;#00tD@%vAyehhtxDg7(ab$(B@|1H&p|68f+=~>uXIP2;C>U7?T zen@`?7=S*%(lLO2GQKPV5E+6wG8-%xAHlho?7!?9LCi_?6?xTOBxT{^g(~cy^<$af zK31jEv5ogYyh}*q$J308u%gtd;Q!e@#kN|64=>y`Lvo(q4FsX7FF_{*Ohm8 zNmc#4P+5sV^9}YrrAma#xQ?{#t@B5K$hGH{k!@kLYyGFhgx}0$c>?IFBiWE#a&@)y z;4~SrB;B7@WhT&eq0WUE@r}T%CP_m}jO5VuNlXg(r#V*R#tOU6!va4j zm&c#Lke|Y<@(W6iT|Ha#N?uR8o&xeQcpTXaV{^#+%v71#%J~hX8Ksl`Fqm29q}V+k zcf0}6P|Xz7vu1M}+2~|8E8M@On>@jbCJy2<60%Ff{-k$Z;_7I?li-P6vHw_)TMg#2 z;78nlVsSb7(Du>)Pq080gA^KA=_KWyWi>!EcGe- z*T%JZg^Skz?oTsA31scsIllkgZwApMvfeS4K)fLNkLc&iltPJ!bix7c>gnheDL#vS z2HotmXj4ZKO6gWm%jXUlpTwk|R59L!2FW7cso>eM!=!%ghN&bUbkakSO!H2Hf}E&a z;X&pfXhEBi$2;}nSk@8IQe+Sm!&=G<%UUvx9I;~39%$lmS6G zYIJc4*uFl3Ns`ljx-R-1pV9ZNw}Ac-4WU8WaXEx=ZX*q5ybWR_&5+ai4K8XX;$CNx zC{2ySsLVXo0TBGl;ow7+|}3R`l5MJ8(}BBfl$QHA4+wjei9 z_cmBJhE@h&%_z)HL>SahA{!E}=Y>g718V^ubP<}WylOg8e+)r`c8No7Ujuh!o=5S5 z+h}i!VxAOr@iL!SZF`2UdIO8_0C_OXh`+gw+F36cohf^I_7LG5tm`hqbOQ+=1qH=E zQ2@LIIJbj?xP2@9@R|{--^AY|l*%~Xi-<5sb}VB8aNY-4)bc}&vwN0IsJ938Gsa4f z@$uzm-^2P>4~zmW{PV%#o`-p8lcwRU z!i6B|?n04%6SJ&&H0aXkHuSNm~3i2BjPat z*P+cj*Mu-OA|cozgCu_~DatMa2G{^63Kyf#NY$U%Y}hmV_=32}LsY&XpZ8YhMcuhGqg|R$7S#6{)YgBoACbNPpaO}+3)>z7-1fLBNvmf z%LO^0nS=O8$o8ROh?;ylzzs`1fCThOE49-l9;ZEEfyY$-{n>&5sQtPtz!}o0l^;61 zuBoOEy4?4=hMH%#vX}pAZvx>tpT9^uE|?e)Be?hlWYEb2E9Ly8s4-wrXGW^218G@j z6hx+muk;oZq<=xjSIQYPhjEhlaeUaAg%?y<^%~kvT)Knq%$7KPrSZVq2HsbfYxxhZ zzdCQW?q4kROdXfMvf*LCUTIztsz?IQEv_#r$up664XvF)QSgMP)QlmG>uCTEtU8|z z^$b5ui#uDk62nf5@VxlojAsRPmiI_X%coCne9@VY8pBOi_+i)To`4l0qs?1xmNl73 z7d+K1BH<<0tDBT=)|-pOZo3suwBggsOs=J}tB#eydy{2K86Du%3&;vd`dHXm` zPn8i+2dGoDs5^kjxB>uo{x7smUb36?;msIX+XuT8XGURjE}kt~L&Ll}uGmZLY3`(U zQ^F#AqdGg@J@$y;9T~#|Y($UGlf{r%yUJz(Qkx3oh!j3!=l*`4!Z6q{yOm8SB=$D1 z1Vdp_oo0$0`m}v@5Lx4Yf!w<#%<;MR&H}<=b7_kp=iz8}>@+1oABq)mYZ*BDoIMD` z>>!?O$t?UwGQ3MbK0`M$XuQUnMV56T137aL;Sv!!Wnf8Fw8d9&vbsOM6(D`m^s{}^ zIz&B`Pe9J@aa@#=LH)(|vih*jk9^P)QHk>^SR(;ZKCK5Q@FKWx!S}pYA!M%a_Zf{C zy)AklJ6xGj1NTaCc8GOv@hPf(Fy$f#4?h`rGdXa2|FGfaSA@H;igAvbSw_XM+~jHP z+Q!r{yQ8W69ID)K-T(4{7MBkz^A|yFRl-8JiloMoqGdrO{g&`meLSelnB-LmGnAizRY3d$}h+px2xuQ@qfg#C_tjmZOhuS6mc z<3ehJ+eE?^b=LmnW&k1z#Lc4DCL9c1RGZ$3ELKWQtX-l^s=GMO~dPfB@A*XDPiGbnMFIE6=+ zREDi}Sf8@A#{R`Vt2Wq7Wu4$=y511gJc>Mg+hh(mj{$_YV4!^VxCAAbPl<>at+T2) zkGA+msmIkV?J3-xpLj|pXf8SjQYi|Zamty7Nt^kDlzfK%9$WeOV<+V|m5IdHl?deBzj_R?AG?k4(@A2%Qlou;F}Rh&1}2ch-s$%LDpKPl1u9hI*{3o6%1$OI008v=R5Oky&W;u)t_Ie>qNZDI+;)=z zq5Faw;#eQMzKc)}0dS_c6bKY4uYEj3Kw70*s{|#%5((q`1x}JyMYJ%rfM@J*DswMW zl-O&%oTg2LU$no=bN1|yXfR4n>Kuw(g4rW_wA8>jyxhbf#?q$~k~@Ujuv3%ZD}f2- z)Jdo|of_%RX-ae>vDyn1^g-F(yB-7aDZO(VX*_};Wm{C{Ul`2h!H7oCB#+g#k zK8t?6s*Z@?IHn!37##~-ju_agdK(%Nh|HPi_JO)ZEi02}lP`%SbAtR7k6IL}?b72Y zcwUU(rLsZuKv_rd>#BxYF_O{dtoEBf@J<`7Vxh1EtLCxaUO>Pi^0{HjWxAAWaJaH8 z{+@H3TKoSkd?wY1DJkQw-fhCp(9@1*5B8cp1Vkk@QL{iqq_}d-*CJg|{df8$^R&Ui zI!#>BO|nm6nLQGWQ5274>?Ua-~9=AlOfqFrEa>>%q_Ht(mB#{Hi`Eg&GE`{wgfw0 zT&>BJ$cZZKp=2yxCjcQZf}`x_`Ij(IxSFo;M(Xs@~=d*NwM>wW1co|7nZIm+HJK8hf~pXPeQWh zsF1N!s4M8Rd4v6GM#l~O7s|S4xOy*Y&M~n3&QIzr3uYCO<&_Kjfvk8VP~r|6L3as@ z$dNH!e6y1=3#)GB{*Sm_ zFr7`Dob?RsEq+ItkBW5cA_D>+d+&f2oSX<{=Ynoo5~?-_0inPpZ3IngdXp(Kw@X&= zvgHpqvu~4yN12l4toKeAZ#65g6*ngz!!rB`*K<6bwCNs0|7ptQsCkoRZbMm#)e6#N zU?UWsNotxRfMJ~4brsGtpnDwTovbh0^pao->s$jaheO=J0EY{mxZ88Nh8Z;Y7r_`T z^^YObegh~Isu1Giluy}&d5@{?R|0K|829Uu*pfOcmTFCqvqe-yX?1$j;Dc<7d9=xP z-ohmYP)MT!?L%E>rd0e=KKc0SLk54_ zZxON0(WKGBr=MH1(~gcBYPmFhm_^<}-Td7NDl+zN=MIw3B81WLHkokVC^W zWthwTrN{{O1xekH90}fUkVxD;?{RmSj9TSK3SsH85&E1U+O7~_wm2C`Yp56qR_czj z?#6AFxqOysW!<}NJQB_gibgwTsh$uF*fL?5vKnZmyx2Y}xU z{_g|t_xj(Q%{z)%tZ!2hSUjh(THwS}$O z??4#lwQ$;$aNT*M_B@jbepHy-%Qi}8K%HJ=J}?$@UB8#Ju|P#4%0N;h=mW@$kv-}? z!SyK=ToAWWX|#eo_1*efjK%;%+{!HspNS{*xYHuY4m+^_^s*0yR?%D69){5=Sg07P{S5RCmBw{enn- zG8hHk!%KRQf`9?`4jr_h!&ccmTj+!Jv)Oqwyr~%SVAf`{Mjjp>9}h1fi7Zp)Y1*J4 zHgix<@8|XW^Q#g#jltzj0j^z-E6Ar+YrP?aIdTYHtm zFU>f{JPC9EvO8%TBn4HzOmkASX9~TJuKA38xw}eQBSvG=Gnu_D9ae@Om_oE?Vza53 zJX@4BXwN<@scAsxZEv{~y1s%*Q(v#oG=5(yHPcb>3)!3pgXY4L)U@%aWNXQhZ4gu4 zg?izlgbIL}`Ma}LGiB$5qr2zjf{T7L9nX_WXRu z;Loz1?01=;jlq#>H%$e2JX?Vd99WQOJkYyeoXdxiD&0e-qRSUMhOIXj0ub6XJUeTC z{Ifxfji0c}G$yr>u_NTg_+(f1v{*DvN@BpR-!*mKsAl4ij;rq0u_l?cC;Qj+<_-7F z}+H<3s*Od;@C!)<-;ZMni`Xkth#hxWKG$RrWEGL^|QOy zcu?S?CjZu_st{&kgSkTF4ejo3`x^Gh7197>f!f*g%wrY6royZU{)3xLqUu}_J^?kh z*~zC88&i*!srcbG7N%Q<5rXYXUw1qc$bWb3vXV;CJa zA%;ohOzB~y3^%{1h$$}Y%=yRL&XDG%se@!y4~4^G89_P#y>@l+KXncvXMX|bP?pcmL`x{;-=qF)4Cgr4D-lo zBBpg$V{giYC8oxaXJ#R-%F`x={VzBQAu#k0%ci7QaU)2mfkG+lMMy=o7jQI2YGqRz z)`1Ynt4OF(Q7T~~+C0^Qfy8Q9<(`4JwiU^LcLdk^&w%${T4e{ZBsSK%F~<+V%)v0g zY&Ipo^~1i7gL92X6bodOEKxs|4ZNL&y=bYb_kZQ!JcKha4Q3{%NV;655) zaY8_7vwE!);d%o?vdw%S{0qzA`WI(Nc5ya1`|w)?L(K#_XBy#nQjcl%U&^lw5T2UP za%lAP^8=Bug~}Fi(5;bak+H9ly}gm+Ohqvx4tjEknKXq25n6Q*sPXNG?}kEtGh=yC zQ)diaQv^y!K*_B@aL0^lQQA=3`Y#!!KH}L+qbxF*O&bIo_W(vlU)F0v#_^hSGSDvd zCdy2(ehc+Y>$*1{!p#_)Z_z08Tgv_1eNVS144bl9!f1K5XjWtAgz4V912OC_wJstz zJ)XOQsVvCQTNX9c4&T-u%`JY1dx1~%8lgsq@E>Agq(vnObtz%>afd%+!xUc95J8D! zT7(!Wk?Zl6=x^so0asq37Za0LY6XsI(`)tV%^!}%y(3370t@Ar z0d-@*fV}OHbCmQ0NHASh-A^R;0=)nwKe+2@>6^(>mbIZr4%kuhoy(F*)SVJcPa4NR z=IWX|K-mTc7Ec0Mki*hxZ3>4sovY<(7sigVEW1smk%wNzHaupAB*O@ZE_mJ>T`jy;ECcI)l>r@3a{oPF-i!ry`tamnN!^-L#) zP((&h_p_WHnx&IzBUf~|ye-5=LSbDw2Incz4Y?|_Nr4Uz5=rK&9$?Hy2Q@nVd$5`$ zHhyIr-*FCD(2C3sy(1b(BzHf+-GPX1>?B~Iyj^xxNn5w{YCpvXv8-%XL%jDMaMw30 zLb2J@m;GBr|NGbn(El#`8GyONPyH0=J3L1E)Cs_62>4dX%6*ME?4v+A|$4 zhI1O%PY7rpg5dsXr7|6?S87PQU4~s$8f_7w>dm#R>{Ta&ePQk{SMiUh>Z=mxs%n%#unAF*eW<2}V*xvQMgX{V`_L0m4bE6Z^Mm-jA(F>)n;7D(}9U?5JZU^gNkgiKd_9?fv5VwQYj0O zGUoJ$oI)Xvz>KtPM7czwaN^|=XCu*cvmobDZKX_OJ(1$I=-f$k6eT4ojjsxABeA;J z;I?gee$+o44*nKILI(L3iz6bsV2W?27I)0%s!T9Z|8Oq3+|yb2^$USGMYTfq7N0F3 zs8u{`1fLpd_lk3J4tIEkZj33kKS3{XNu7jXHXZb%Br6<_ROC_ zbyGA=4q8T^5L;ine%d&NEjm#Woc{SxWGS+`L(guJZX6`m%PV9P%L{e$Us`B)XNE2H zuW81|?IeO#?uXvfwa3-qts%^p(9oO>J+vY6>&RRre-zBP9T**|XB zrh|E~^w$jgmb80A7Hq3pC*sRy!4V+2~uo`H+{7FuK8J%uR{Y>oB_3XQ^}>>kU~2-^M`{#D49!hB%_r zIpj@t;6(XDyh(lZgLw^>BQd3zO4%FCj#Otx0z)Ws>~lu&#kzPnzX?z;!0J6VpOtUI z#3qrp-Y0djDGN5FdlKp~uS?KnH2M$zoBG#;@A2n-6n@3Fe$f2*HG>Fg1A6-d&P#j-nDZFu(;wDb4Wl<2l@?-uuf z@qYTRW?RO6NXi-5dg852MrN!v*0jdjbWu?4?ou~m&Mby4W|xzhQBeeM;oX2TUNIIr zqVDN(__Uogx*UasF@~y0g))JVvE1lS>U1GgI_GcrVUK%VhZBdjw9D4OgPx_>!FvV{ z@Wk<$ZDYKhX?$#nc}QAPQ|LdGm`aiH879kd%cVgB{=krK^gYd{w66(b3pNZG41q?> zPOJ2|rtcg(0fLfJ0`rvjRApwqfy{E-)oDr5<|?Yki=FBc(N@wsy`N7&^yji!`*WWf z!@VozDH0nwc=)AH#{P1eq3146p5NE=t>g1Aemwu+`C9X}+|5dP=>UG)0Ifc;Jnh{;(KZtF752kU3|w;YLu)6oRcUzr!vs@-ojAFq;s=b4ZlUhA zYtA8NiMMCwuj;6w3*L8HUF5KfLUi!u4F=>vPt;*7F6wIV+|HxLipWbI=#_*P zpFohCfq`M@a$Lp!wW++Ih8x-P8`%=nOEKJt z_XI4qDN}9YLJkrpEI*3u8LVb4MMJh|9#nl+;RjSc%x)e4j%(RSDkh8?>$XSqb|=U7 zx`7(()&h!T#r*bJ<80t=pw}VH$?QeLo@iJE!;LHm5NSr}fj$LAZ-1-??!Tvg;5D>q z8ITknS=jafvUhZg;#1a86d65-$1*Tg5i@$qd;a|vBnzob=MI)J!UsGV~yNFvHWz++Cj&^->kOrFP4#(*rj5^_VywB<9PlJATfN%6ZHF$s}W}$ zt4)7wuDrKpObxB~&UCE=!{!i;o@^MchnCd|KgPanC%$Bi-2Tphbf|~9yOI&Ixr1KD zXhwgC=D1AMO2M&| zg5bY#FkGIZE$Jn|%0Q+7K|U*Fv+&abXC@-XtA)ixwp!rjn_Y)DCNvsgvET)>^{t>c zGD5Z}<{|Pwl)BOi3Y8l%GS9)rKQTz-SYIj4C>g+^S2=*`xi@479AH@#P-z2_0yK*+ zn6j5WCS$Fx!ZxCB9E*bT%ieq&eXU_y7AJ_~18F^eC>qOwNUv?B>s(ZyvVQPssDhU^ zFKh_KWuU~5ew!}}6`k%ykw1|P-~auw5(Y|DIOWN=sfeebWhHP~Q(Lq#m&z#W@Yw{9ign*@sHaO1PAYnVl_5nHX>`XC{_Rd3{+40^h zFK;}+R7$?XEQtbjnK}-nAbOdFGT3|xGEu>KwVW%H35VN_<<~uvx2LV{-;!92y__33>TP!Me1NERV6r)I(XGMf^kr;y2VW)PI_y0A7q{dq#q0djq+7k)}4}ve!0LROc5H2Wslp_W$=Lnj{ zbI8za6YMyJpS6#lKwsSvT8MI6azi5&BwW$ut;OQ96cogJ@jM(?{t zk&34}6p@LIUvbj#au;fcX&cr1W}#tx4QDr8vV~1~C-%jvKX!zu zF;CD90Mu2U5MaOXhw0i@@zNio)2%uS%U@P?Tr^BXq*b|iZD9y|i%lSr>VAo5Le7j zN}_PWOl*Q#sCoz|TQVB_g)(u|l!kkTW$++6kG0QJz!rtaB^Hpbta^eFbw9*1mqph%hu`YV@d`Fh!Lk$p@=efgs&$2jE} zS!@2Dnk~)N)5&W7MxI%i3~Q~J?M815K6$Y-dD?fRMHn`rL(?fyy6lBg%(E#HB}35Q zJm~pea7o-!JjDwmb==av>%CdS&*P&I@O|D~7C;+)BOdK0bm0r%&j`7HH1DGqh8 z?XZ>wl^BKnSZ*V`-R26;YKV)X&{*byp0S>PVy zD&>?0hC?*N8Ny2^H}MAX@=Um3T#iRmxc4z00z-V!jdw){Wxo-cHfd zZ!uVHlaT$_=YzfKXswR`rLfU|jgx}DU{YXY^p_Q4UkV!&U6-;$WoUha;2c*TE?Po8U4wCW}Pg*i++Z*m|#xm$WSfpkuSrP}+WR#pFA$&$+xRjLon=wwe!>1E zx5t_=jdbxI#5c$VwqCh9T9@ulVpr#)*+qF~1;h)~e&A#j#4pLI3fK5AbGKK5cmTOK zuPYkeQSM$C^>n;1PDqyA%i7-8X?txmT?HbvwY+Kuq|F)R3>uy{zX8}mk`H1s2qBM} zK?qhm39~x$4Rn+sLxOkkW$`WaH6&9AADVqsFrG6eH`P2+U6~QjJ-7os`HIA;)4L)o&I=;hcPk=rS0!)5=dke#5n`pYJ;MD|IB0-k{<; zW%1Gs%Yc>VsG`J9ut87Fg`mNn4>38Xh2phaZ3MLr_etmytRZCT`^WEGry!lel}Loq zoz+s)ZF$L7`4js>To$peKWEG+IZeAbjzf-ElqLC^y-6d2mIV$zt#J^_u;?-rzT(2_ z`Bp{)9_0ehD|51^Wb{3{tx|UH*>etJkkIerE){4s57p2mB{K z`dkVflCqe;H#Zf-DR%M^p$88%P)HBl5In~^Kj%zv;XfvzPp3dq zQJi|5WGs>mm8acP1>&@MMs$Y1@dWvGh;AZzy8aq#<2R!bwT@RRDQpLzbbppJPUu2b zAsF?AKtSisof;KXS^PF(^hs^7E3wG+@fNNDfl&B$RwRSXP*?%EmPU^^S#T9n& zTMP#8(mTg`fNQ^P=Y(DOicaXyBEu@KdR%{p{i0wHa?Ec;c2s*X(Y&Z~B!86+Yt^p< zHytgTBzg@_3|>`JoOaJ_pun8QB1p10dOrIIGAKU|$?nEn)^oM6S!pov(R{A@V5H0h z*WWkzuW5M1OcnG&Dqca$m1^%=~ zroWSYuM+ToPniVa)U+cBi zr}D*(0#l6}QDBBqE3Oh+4q0A%y$oYLzvBCLA>HlUc&K~8zY1E*OFd)l6V*J3%1D<675Cos?S>2im^y6F?Y;m{V)mK;Jm=Z8^c=-9@u5 zwyFCHgO6)p0%VdH^-PaGc$>9(@Nx0`;#17Hnr#eCu`{!gbY9mKhLI@bbS&WUr2tJ|`IAFLbWe}AJQgd%y10r`SGfo?IIi_VfE)${_P-4cBJ9AwSX(xPD=nR^E0DU@uzmjmP7LtX~bDq%s8I$V0 zYV}v7dOr2*E@EHc>;=zy%_+7sdzD9!G55b79LB}WbXNKuO3*P<;V}~4kM-9n{kz`r z1a1hou}!ES^5_K!7jE}EQh^eqi`rVr+M)L$c_9X*0x z4b^j2icp^sD)yZUuX+^+>&uIH12sR*`oMGQ>la+KSFeYic5)Opnk*>Ol&xPfdwce^ z$s#S>wo~bZ%QfWRv-_@!I(YRIcI=));RT#7)#X)r{aZTClA#-OP-Nn_ih`wt&UZ7qa_ZF5p|=#_x8SHdl_LFm9i$Nz#VNb(3Ej~}n-AobJqUlt?auw%D_kt_)|9Y7~bq+l4JzL>);20stAgYtyC_EcT zK}yO8j)=wQ_ZXw~g|xer5%oo1;`wDp7xW&2wZj=>`huDbqb?|&_pVJaIlET1d5;|t zLDU4WJ6kP{nN5uM{Lg)i=5OfXiUGj>^4}ib1^;S!=*Vv}b+~lvHfrYo+kG==VRcd7 zRB}+}MAWDm+lEomJ+i*Q9Ifql)pLc)Tfy&{lX_ig9Md=w;$Ph59 zi`b#9d5@)Nu5sZ4qjB3?3$O#L`p!lM5P3 z)&1~cZ~cszPZ*&s?){$o19Ar8f1R%~lCsfH;{UI0Lot>=FAJwS?BJN>Z5(|l=lJ(N zFhAMGANOrhviJ`FBR;p@d>#EXpbDx4iW{c_E)+qmI{xU{av#Lkup=3BG*|-r&#LA=?IBb1*n=|vtg#2os@{KY6=kng&JEUY? zJLu2(&`X>+9vcQ1^K$?XY=H5c4rK_f0sFo;T}&;VU@69Oy|dA&77zw zZ}!zQZPV|o-S^GS$8j|BNGxVV{-Z!h9wzZn!GME`EK;r1*&agm-(s%yxsFMHvfW|HUFA0K>~ia%@_~xcg8Q?paaVHuNmP0Y_9_y~ zEN}9E&`4XOxiWu6Z*3qZr8*v}+#=_Y-U^ASttER(3A{wj{gQ|V5A$;L9Ur(LtSYnZm=1#|75nu3anpEG+H|EpicMCPTw_^b&jnyMQPnb4sDA`y;vL- zPaTq#F<6Ng3xqhSTI28(n~Weg#7MvFPmla7*?asly3@TEa}Z8-)o2w) z1$uk*1(_D zS|sPw$|-RM5p?z39H*Cpal#AMGp7QM5TYyt4!YM1n*iu+1%8KVldn+`UzP;`{b=cA zmAUrbH)FBBYZ7#}akSR+30}i}CO%!AIaFis2)UkxxO_ba=Cxv`r`qw?1;;wlm{gF# z{e0R5FQ51N;8G^g{WqhAmO$B&HFerZa-XP#2L%9bX?@LuI}e&2`aTfuNPTZbi3h&7 zMNsz(ULn3{I9&b4CH(gLH(|#DhzW6EGXfMS4I!|@8N(zy>KE1+Is;O)`PocMmyega z6hxM`e4Gs=xDQF&?xLU%bq(c8llSMfksaZzEablo|J0e$3Pzp{5v7XH^tNcXJGjTjKhS6!{jcrhgaH6R^uJSI_6{zVww7Lo zE|&KHr>Zuip>2P~hUE8FkHG;-MIPmI2#RLgHLDrQ_JCl zV5I5PkvWK4oldLm{xG>=4Yl$izuUOOTZhd$+Iq-}@~RU^XfL z8mT%I$zL%oNfy^0kTrt**l9$Q2{r=Kx{_1Q(tr4gN#Q~_BSqspHW{bYBy_1MqrK~# zCLZo44pc$62JCRQcm^-r!FcxghH(rR-%OsL*ZT(~-;U3>hw~qc_ZtK}{=w_P@6EO4 zt3!PM?(obH{eAw%&lUzp7gYO!K9=M=qKTm0G3Up1;uBs#XhVR?aI=x3iA6f}r&3L4 z%>s@^M-7zdpb7-NIIXE7YL+FllYt5o23+{v7T1y=s0pd^xzV%fWQ%LM1q;>35=EfJ zuq;xWY}}JoNcqSFI8)BcXDsXYn-v zneYmmAIKB$iY#{*9za29yBM)26j!Z*O2V6{aSvbv&1g=l7`P0gypc!}=~`t%WiTp) zXfa?N6(_Tz45{3fD`uEzvzEBETAR-%p*r>I{l(K6hEwk-J&Nd6=I{831y?ZUDPp}x zEvlJ7Y!u-c=2cwI9+XA(@%t{CGx3xyZ52==7VN?R&irrC`hP6cN1-o%x1rvd<$r=X z*~c+?HlyK_XDjvu+vX(sy=J9{h=^c32AHt;>!*mrg7Zfh2u!l6D@oJ%7L8^SHaI_ar1C*TGTO{KT zJ2PltLg|9w1h46L#MkujYs7?vOXn=;tfec63P;4+p;)iXOPqMZ4kpmu?u9wQdW{&$ zhpyCcNjRw=$KVkR+y_qWOArswrSw1;XX3FBl{J-Z5^LRcljdXYKl9nDQJp;5#%2isNbz5;PGLachjS(8Chq)6Gv z4uAJ1@L>iaV+luhPVoz)&lH~PLq?iq#Ua%E z+-F&(>KxC$_9jBad+x>m&r$ln&DdW_skXDqO~kP5nSAhf>uJ;8gw=*H!%2Q6 ztzWG48CZU7j6+X1mjS%1g$J)DO4=$M`D>`XVn0QllGSA01!Fd zC^c_I&YKCcoLi@B+#;VuG96iV*X4A&GLZR2#SCFCCIIQ8$#U&|L5ZS!BQlEsi}a({ z650!zmobwR+(WOp9cEYj(PGt20>=&_^TGCJL7aH0r)vl8%LftKZ0Ti|ME@x+S50uL z)~EN@!UE&$sF3v)kkwtvOnTVxQL-#}0%=%4>kWcU{%lC>z@k=?Om z)eW@;45ToZS{6pNDDmKg=PD$nKd$a?AJ!H)5^! zaB-hf98Nrqyv(4ql4cv`3&o9zvBnBifflXlA_dcl=gTF~(W#bTE%=OHc0LqZzYg%6gqPEKE`@_8f0x?`87@(lwt$@LBY0 zVRV+}0eDT9Pw|H0a+Q_NU{pyc3L9I!)W!P#W$>Sf7zN}IoB}5uq)9P>W$Z=ZEi^_P zW|?MGDIG%d8dUYJ5Ak0K>8@9?B1N138N^vvmf6;eLsS#m9b(E<3&tO0Dp>G>#c|1+ z|5Db=UnZ*@&fVjB=h-@nMo{Z0?E^c>uV}eyeK$x`pOv((L7DjALp0#hZ4tXQ_Cg-a ziQbuXvEPsCB{^fwpVZHB0CfK)ylNG{)-@e5blp=d^|SI_7^8@o+M(z;0l5HSE{NWb zdo@c#PTu78bi~Hso+GU*h1Vd1y54pEKtJKUAkVnjd(V<6QC*)Qv?<+GEzzjP6^VWu z>1m-a5N3GkT%Vhw%8^{^-9)2X))vX@v<}namHrsj({fW=dG1jS5#6BB;3aAKO{#oc zv)mO7G^DuPMgo8q2$6PzQ7uDYbXrL~$8aR?sIy;J!) zs(m(ZR+8TL{i&k9S`bC+D2m_rJ==z3=}D(f$+Bs94E_z=hsAg%ohe;e`Q>^iYel{h zWnp3T(X&P>kGoRv{j$bv9@lN(w(_y)f5REBvPYbT9NYceWy zvUOe?&B&PF@c$25w@M09c;p8aqo)7>==~2`C;#8WkAst`iKVfNy_5d`h2Z~7Kr6o1 z&RbG(=k7mIeJ-Umq)f(V|8}xg@J`Z>NSb#ixiZ(ymAbpA!jdSmjiZ(ihU2+if4_jv z(Yqw2+|10{?cB=aM2i+ISg>NnfcFD6(5hMb^VnWK&)Ze2YR701% z^~qT8-)g9gcJ;+igEZ7ER?#hqDs%r^TprBe1Zbe16?O2UoTUeTxP5;7c7oyP%hmt6 zI@7NkXm#43r<=2Ut(;wY9i+(#(Kg*}zUlVuqdcZ%7KSRG8CjwLxWS<;v6Zs6B zK-tKsNia{BJVa=qBN1F-(=5Akfjm-5_S4-aHH8~S+$z~FL6tKDJ+1|iPP1F&s=JW1 zRyZJl2&$K*+x30-3Kj19R(Af>%LyT0*ZKh>3kP7ssF^k*F0$BJmdzAFT+~WwjX+mT z1e7QG=!UmR-7S^S*!!~0JZ0sXNtd`bD8QqSrjb5ZEXDsR4SKu0lhxTf?}r9f1bP`2 zdquH}QcWUsUY12A^XT~>5Owoj%A_Dt(H-am2elMHB*?TJM& z1X=9p*+CPyk$6$CvEG0DBc{vNOEX0RR$lHdSsrk8%s%F0fW-@Ctx5(z|N14XO@<=D z$CL2TD7!nP($%ZXKhf~9Y5Mn#bJ3`N5&YX6#z{Bjsi@|&TNPpkd_Sxp%*n#2YHULPzJJgAXFGUJ0XX*C0Pw2;_|M-YaEB1M z;%flt(GeKq&oVHl5IE&&YnI1Cm-Qiv(G;kQIGv-YLZPN7r%!ihJ4?Vv=Y~gS^j4gPkRS-Yqt8VZuxLBdh?|RN%;gt%<8_p7I`8Di43jp}DJqWkZ zJj%7E5m^VEx`98=kk#33Rnc^o>DEHi5^c?Cd2 z*5se1+KoUGT=v_HppGkaOU99MKlfXoeEIG z*ygS-ehO+Mi%aMXAL$WUN)7-euyBC5xJ1|ob*`E+09dobrCWIpPNW$@aiRahuao@! z_Ot#eKH_u!P)wm343Lk*+sEzsEb}NvVSLP@GHcRGgfo5w7&^blG6OGa@*NgV!-*FS zr-zk72qZKQG;wBJ#mz9=7H5Dr3)D6k)EVF=o0sL&*&iaQKk&xTQmtBV(;g5{66hAtnC&sz4 zL0N}P`dK$Hh~x|+!1G-IlEq>-U@L>*~8$ZnvdNB9pv97K=Rv8;3%2U$CLl%>i2 z9gC~z0ZQ#<8@1OI>_|dwH;6&k^}Tqr>Dnm7b?O{$jq;Y_MsH$_gDcY`81RXfx~@Jj z_rO=CZ{|NFjtU5!pGX;r4KbX)O4w07^jUnK%f01~v1v>f7~hj(zDMFI3|iTn-KKV@ zI9bq_U+Ls7({SqKZPvan*t^`5ce`l!xZbWM?Ok)~PHNAZ z+^!|*mzH6AXwMqoFX7et=9rZpPIt!~m0yx<&ot3(d$_yy@J?HUSeN?6OH~g!RZs8b zt>)D2WYw)?)Gee{o%*EfIfn{cgV-QhWvo9~sq~ka>yZ_WmG^MSZ8g=0YG%0ny4K{L zS~?|`z&eJ8KHuf3jwc_W`oi*Y%VcZOkIG0j)H7w}1|8^EZqLx&p_k`V@1maGttJoW z>K~ZY0<5lh-RZgn9U%knbcY;_bBbPKv6FAaM8o{L*k+Q;9RQeC_mW~XA!cMdt()xi z;c@Ome)Cb}o)z6!z}uK%rEj6s$|AEz_wl=GdQ7BnaE7W%B7%+x)v9!t`Erh+-3&A%PdV3z!J+8UN zTwcuN#$xi9x?+5m^!z<_>q|4NaOf&_@N*4dj+7snqFqAb4)adu>1;TeBE#je!xQiE$*kIxqSGZm>S)PTd&pjP0>$R{A~K<%X>~U?8pniGt$% zeU)aKte!OUg^i8!=?{&6bo@S<$r$tc%K)rlB-B6>B}ijOR9^cM+QPVz`7ZH$NL=H| z&0_fT)ShHx-Cs2T&{#(@0ATSe0X#b_*w~B&-jJ+6pczAsWk+HAB|h;Mg7xZ}QG^Qj zWcvTT{%9U}^!8&@l>i2=KF}@OEFZR|4V4*1X3^r8MU;*1D}~p9^NV1j*kL>MebP=v zZ=;V$WXL<@Y>y=Wj%1svuv_>Wl5dQ`wC6y8gN!!wp1LtCc!e04T?2(s2Y5gh1zFGn zNk|pO?S1)2Q)*F=jS29Vgd32WM|@wL(`l?X+;ik?J`+-fm&@qn8?33qo9lt~hi#CK&im61S&L?a;u&6W}T2!}aYKcc#?#$HfBnSv9xlv|6F{5ITQaf9y zb&?HMFAeQ->1!>M5Dj*pH48EBx_|fH>P7!eU}tSL^LNErpS|*QLRT1nbCdrai<9r| z9^OkkSf5=GM=8-TNCFgsmuFAW#?wvMA4=sU5Ttg5R+vU$?>2Vn>MXz&ou}Mdg0NP& zBqJTkTNfneG!_b%c17n0Rz4&x#}no)Jm*Vl;t2X%T@_;Q*1^hCJEOx_IJ?7}@j=2a zYkB4L#ietj>+7fvSmZ}l+Cx1H8mG$xnTxm#{>jdU;xoECR&7U93zQPEOPjfj^OxnWF+Lk~R$)BUrU0v;@cPCud z?-OzsVBl&JKJtW0-e`332+NJvbKhY+9N>!4eQ=wy7y>Xl__5Ah=9g!UoDD75>UUt%Gf{bj$|gG+s?z45TNJy^;ebkyBCbkT3fQS@G# zjo_?4E8B5&mT8vt9y6pWk(kd--jRYd&QI2ucv2C6c+} zMm~PX(;Ob`?-PaC8{OdL=<1$cS~(bd{UvA0?QPOqCdD5)jZ3tS&Q>Xnx0XrdPTU1vH#i z`{hBz6F%{z6Yq>+td6FI>EZ9DEa(ov0>SvjXhv0FjV9i#if%Qef@%)}7fSpxS=-lX zCE=m0?G+J+95TXAzE`dhHI5iKOGz`D9C=1Gh|ei^%}KMUF}6c9l2&zF)SxlwRdi>9 z0PzgMwv#^>3P^eVBfMXr?5$iFhAnwJz?tZPyR(izC2&Y`S zIoH68SX+is{)_PA47EY11y^z|+l1{nRQ2RO$s~rTq}PWBPS^?R+|p4^`i~U=gZ>2L z$3F#^fuWp#k%B_1K+zD0T~@8!7vg|gZN zr3-3l#Ful{Di$7efhmS40WC6Rf!T98d9zZj4}wURea!Lu7>Q%GPbbUF>Y2Mdahr59 z_<%*E3cUeeBx&uGfF}Q;1pFn4CyRf7#^=-$S(xnFHGWIRnt(hgirpi+*HsA66S3(( zr15(x#am+xY=mJIDBQ_p6EWiyk1P0xZ_Jj&Lyre8UVcP6AJB`&Mvh?u-XngH<8e^? zRV{uoQ~0y~DM+4VTBlKKbET=`wKq&kxB6Ro$`6-rzOqe)v;G?^&lLLMxghsL0Lc9T z0t{;hr9s#R4U%(%s`#wZyon)aSph2MpUN}x53Df@WQQ;{sK7~za!ot8^jiyYiov+2 z-I=CbE)9OW@=jo0Cg&of@Rwte$UQIST}u1(G=GI{SP_tYR2{Gk2dHLDbobyq97x;avxp>@InS-b8G7 z8R71zX;izBmr$^&T5lOn`kEbb4~94M>CVhe$Y4|~Gs{+4KpS1QPvluQ1^pCk!tgG9 zHP+^ytYK9$Y5rP48BQxLn2Z6w&oQ-%$7eh zTP*#COQ+4y$86M-ziS1 zqq7IE3hcBPgB#d=nP?l~C>xk3M;P(84~ek5Krg+$;pTtC9?LC(u-#cw<83{LG0)g8 zB@_J*vwr8r!K93KFO15**4Z8C#Y)XBEyb>Qv{@{`&J)V589G+bVEeA+F=lZNS2u|e z+aVI2fs}bR)Sc8j<0oYsm*f>*22x%K*J^W{!;iYjzqrpXiiPF~vSHQq{XPN7w_x(I zJj!aq)_aHwiU0Xhga7_B2Exp43()JLAIrkA)W#h?X3h#A%19r@n zi@9cK^d5`-fFCG(u(G!F1kKu%8$H#?6vNWQI^UXjw=?y|VgA-(_QqwF-=81cF#u+? zf<~bw^SYKT#k$<+YZk4MnT{wWZCd|UZQ7%Uz5yk&8;|JR8k)9Ts2yt3Q;fc1RB|1@ zb0QtaQkIrvv}KrAqbPJjX#rX_?7t_wZlfZP_RYXk#DE1{Ybv<|0GxN7v{JYye$ zD`Wid_jF=D(bx~@!JGoMe*FDJ!1}3*K+18ioG;9`X z4$gwSrir-5-`f}jvm?IMq@l3{m2WUvDf_*T#UM`JqZvvYH_@fV>Yz11!<_s+9n#W^+SlZpHaf*%0A-b&DC60~>89wFXhuoMv#i zz^+&pjer-vt=9ztp)TdnC_BmHj7Qw6EY?A-X|=8V6P~_cx^kQH!+WXHMZ0*D9<$1_Dd%3A29B~I+6~4jyRxkm{+QQp)#gHPZq0kZZQDl8 zJ0Ug-=l^L^6z5eH5y5-;P(|b;?0{oa-Lx)d+7!9Ksov&e()nKZu<4vJ1vbV&B?WuS zg>ta^kJ%&Ry2}aL`I*sILM=Z@CpROEnd5y`!?NtRd@r^Iq)Ax9otS$Bcjm(3QTXN~ z^Q05PPdM(q6&ay=t~9PzCh9>B?WN%HwWFLsDg44B)xgUuv6Ag7k(QzK zy$s3xx3W1?l+R-1I@y0N)z< zUz9g01?q8(Hl!{gVYkULl@xo4c4^RXU8-5rIA$`DNb2TjhIkwjQp`pp>QSD9uPigm zO+i|cYKomI)Ajw9B6`B}Q<0om&Zqod)$&IghJro{@PHMc6_%7|(i!k)=z~~i;N@Ip ztU?02>fqCgonzxKf{brv$cVtWxaHLB(X4s)%H;8!>|Bk@Eueg|<|3#I%J=%*-7#0Q zabIWx>7HJD|Fu?P)!fNda%f8EI4+i(?J?l}oYZF=&L%6dJKrqxfyg z(7-Gv*^1ZEi*09fP}i4AND z)e;HkH2OzJ>@~Dvg#97&bC8ymeKlZ-$6)^7%DFVJ{qas>;`y_PYkdccV{S;>iOlaf z+JG=FNxq@}LOK%7<_tPBy}z@O(!MdvBMbC0#mRc@dPY74>n=AEk$4f%g~0e=cNuH> zmviS(6aAgzqZkwyIb)HKLwBHQiBOBw+SSi~@EOUcQ|8(MuiEEl=r}4?=W+Xpc|15( z`kBBs*(BP^_oEtkbA<$it1Arr6WFWN;Ujg=1l@Y?N8=+2kco%^Sv#hqxuYkov6x9P zH|N_tAfWgxi*ZOp+*rftI#o?8Eie}w6B(It`btRX4F5Rc5$N$z$EqP0j^=LQKM8p8 z&|*I@qAW9Wn=#Wn*=BQnNJBNpHK>)WK|00pOhMj6ACU?n$%vKaLapsO```=d+cgg$ zyxY_B0jB_M1~G|EEiFq(C5SMAx+7w0Dw|r+S*UE#7O)lM=~&A&=}Lt}SX|dRekyv@g>FEHIKRjbTrFmg;n-9UrRXhi9m5T!F?z{~ z%Uyhdp%9ZT8C1@=r*U*}b)TTr$+nc^9%^N_=BMjYk0SrLte6bpx(=4&kM>g(3OjW; zG(6yjo-97S4T>+-;egojkd1M}Y#@vWp`-Xy7xOvMU)fmcyrTCf43DTC4_}W>u(;h0 zc4s%ctJoHJmYPjDTdztX-9O}adW(T`V6CPyw*jp@7+L*aj7e@7Zj=~m(mnOr<*dtW zWX7F+p**YP{E?F1nAT10+M!;@M>u`1`&)Qydk=k{UER^Kz%2c$t`gVLp4{ zgMW@Wr%U&e zUMwBvNV&lSr2$?B+&KDw=sJhS%%TR}#j{4TNxgGu1lh&N10Nr?pe0n zjdUFxB>SP_K@};BZz<`?hzddXR?V_^7Ijn$WP8aqt*B&eP6`<*m3#`5Wr`?QGg@?b zmoYrDuRu>yjTN+gA@3r^+`X=z$!Y#ktcNs$L2Yg zNlIljm6Ldp50zctm}a^p&HAqTGGalZSV+VLAcU#Zc$~C@^w7A3EI6M@j${ouyJUBb zMp^>9_B+Vgk6n~nLXq+77w0b#LMK45%A68laTd_=KQ9GaaHIut%(gqCFg~VCtneL( zCFqVURr5;?ddTajILoE2iQ?E1HXU}T(BxMAfc8)+8I{${<&s`X7`N+)Z&vlaFt{`IFPLZ`6y4Vtx4Mc+*_vy* zZC_cmuLHKPUHu1Z`;$x&r_fWvO$`}?GR6ae6q>nJ9S1S91!w=hW@e3KfdTsGP1BOM zO4G!vjUKAhlhUko{;WavbRck3&<>px7W zB6Oy>0{uPE_u;gCt*shVUEx+A`^7R-4^$mev5V1BDQ>#JR{@PZ@M2Dye7xK9=(!=C7@8=Vy7>785V&T#MMTLmX z;tyq^J|SjzA+l%3O=nzxKku)F&NA#1z4{s}YfuE*hw&#;GAUjhV&1M2l6UX+H<7C}PjV!J7K-!lQ=y=F%66jz7wk znXVi-XY8ah*%>BWCI^}@Bs>t=@lI#ambpn&V1}}YX&fa2p2dzlxsEk9dE>WkW9cEM z2M?);WHxGBWOCGI2+4&?O!YLAo{;tfMcF$h{GQ;fB6XQ0Ll}lEMOenl|6=>9HEyZ$ zJ@J(YxpsCJpCtOvc<(&4vTF+{xoV|X7R+!OPQugZA>eIY$lvG}!jaJApno=1Mf~3>E;O%*a&jVdrbT@xI;INDsN(w1EtRo0*aRF#j+_}JHNY8u@*&U1_gk40TX zMV*2^Fq1r$$4+4nt2x{J@3~VqG?X_D8`t5?8`o#IqbDK0$6lDpW=x$M=2QQw=SWnD zz8{zJpaRW$BcYK=66r*wx$tQ0ya}d( zAa^t>W=3{zu3y;V!5^Q4D-X&vWqM8h0X26Xn9x`jO_BW=V00qaKg`=>u7*5ws0Eb- zn|}Io;LAeuexjtvr7HhKJHEIR8w9YY4HDC2>41~X1)5(!xb{jVFaI#+Ge5s96gK{> z0PpuR+gr?19l`_>N+3T|%*_f@*)PhT6@xvq@y)Ar@cu0pj4F>QCy5s}j8wQg&0RFl z6by=domf?On?|$%dczu3oEn@!gbLP%+ai+A^ z*-_k}sf7|yw%%CyGP3g-sS!D7r;{aGwWcd3gqGYMHC6dZQ}`e?TRkpx9he>OVy}xg z>UwfrQ?yAQFuOIet;O|~@F^DbNnwb8voz*TOGC6TyLYZRCH+A6Y|}H6xzGs(E#I$3 z-<)AHW4*HPsPacz{1?@ZQ{HHfZZPNEZgY)cax>3lNy0PoswEl8AM6|ZGG3JC99Fuo zAT04@2A97*49&ur!DF$m7zP?jO5$h^Tx8z1Q!?_FdS4dHL1>9n6U2r>E#S=R%p-gz znYLPmGl44EY-~M+kVJ)zMN-4Ckdn@GPhnO4yr;iK7b}OD6gj7!K_I;fwb+(trcI7F zmaA>eG@?aPOM5V*|f$LK)Z@PeHuwIO;;)^855C53kzFiU*_yQv1iZ z=bpu5H9LVHgj%s*;mo~KA7Wpdj?bz*=lE~I#c>E+n}vRIgakXj8nd&YI?DmneDlhe zK@Xu@f(XXxSNN9fStPX0=of-A3dq35R3|1PWcf>F=od7vt~8DMSpY@@#71NKIR-wT z)$$TyXzedAi5h*v7$oH(vy8!mDF;?t{wnh(qu40u9y-KzI0syFZ| zyq?4|Rgy znloi8B&$w<#n#74wo5}+BUB=byP*i|DF{SZ!0%=s|;}snVLR zuZ52Fi3EBQ5TIg-+_{1R62PW)KpwhVY%nG6w;;H|bXyEI0bqDZGJP`R*Og$>O!4M%VYJ7XuV41I0s!*6w5)=M= z=VFS0-V?Y8aEd@tbxkZ!D{_6G|53$*mZ>-Mj>}f;D)Ke(DQ9_<`9y$DeGM+5 zg!LN#dMYfRO?2dKC8yo6gbYU0!%1PspRch%GistLoK%Q3`kz7e|CFLj$o}vgGXVh& zw*mnX|L@GUOjnb`!YHu5!{+4 z$zI9s#@6>K`s2j?z!PnyM8K;)r@KlPLlM|JrzH zPyC{>f-?H-vdVfIZ?bTAcV7%i$W2>*YRZ9Kncd0F?e+BkK0!h%rTHDrTVtbE^2fdr zrqj!{E{T1QRiPGWlD9*}tR(4@Ce%SL@^wc|3v?;Byi=FMT`CnMMNFMj&8@|X*BJXM zf3^`QftGsd?zjWMLs+T7I6FH-33NEQK1?X!)2nEp@Av-r_iK1P+xclf8Sr)Y{j@&3 z9{zIosNUoALwKR^ud%D33R4_pzHoa9a}Pr5#HBm8gk(Zm`z*9doO|f z>g<-Q0_iMTv1*yH#+$;K(*a81)iIM>n9Ei#EB)8ZeYH|4*Kpw1GfbcPY4m*4##e}M zX!QdjQ7Bk5tgoI(jXL~y^Q>xxM_OyPTz*u^;G8ctHS0T!v2GU@@#Jfup*7#PX&g6( z0P+6RJ@t^HzkIU1YyDd5Uzc<;^2uNq2~*Fg(JI>^>6{-&-)0{`H2VV%0n!g#ByTxAYDG?sj+Eq3QEEdUwZcB}vW3&G*cNm?PezewUDd4IxHnY^kw zaD1ettnOcv-1~<@yp*O^0@U<#Urn3+zMC#>-8IJ9mP_e&k|g&|Bj@QevAnOzQD41} z|FTigGw?H(6b{dIO_gaq-v~#qa6lCvo@mcvTFOd}y*xm=@=b09XxPZHbnreOb$Qo!<)@V)aMe0N)w8x%83lrAV2KWH4Kgc*N8Cp8>_!MAPn1`iE4Tp%D5H{d z@$x+6H=={lgj~babKYP+L!;+nKb8<}C(#Vf;v7kj>vVp()k5cbn~@-P`$E!;!^gjA z(MYly9J>p-q*JB?uJvRBd%QNt_ewhF?UTY7quvrVUIpu`Yq1t)dcO`StC)b*iiiOc zwlS^1!QJBB>iuI!C&= z-#=$zg+M|{gK)y0BM zPZ+=hbbSafL>Q~QC4T(RN&FkM0yW8F=N*@d!8GrYsnd-TiOTikn-xKJI zb5P7c9z~2&5PX0zQKGyPpS^7%lJwzH1 z7@=_9Gw{+obP;ZGlmvM{Y9F|^7?q+~6DD|1F>RSq2A#?61pXL~@tr{MZ8RW~YlUqZ zMMWnZV8D>1#wfZ6N07^X~-Z5KG;4R9|C6;q{N8OP;RPW`vIU=#tDxp%O?m5>XTkE zKb{navA*{e)m2_9hscfNzDRJ=&ISsDu9dBa(9w4GfM_sd(v?MD_q+Q6NfVLB)Gv83 zEo!mcCbL`%#*x|=Sl7^`o(DL_tx)tHlpuY=4$GuS`YOX2j?P`%BZtBNJ88t2k24Yq z2=IKpX7*tnb}mu?8#)0~Xm3LCPESq;?ODzB?hm)tu_HW^z#)(QBB$Gv`K`f8G9CVwh_ z{dyNS0ybLg%qt|I(PA{qpPfl#lkN01ISaOPOmC!+={wM?aoR6Rl2aMb9X~N`pjU4G zh6Zpz8DBWO%7*1IXD?7_k;yf7DXco9xIh)O=@7cYiux2~+KnPCf?(dm29N!K>IF+d z3y(7JchBg~3E$hvFF?yw%>LfFMiVFxHGDK8PG6CCMi4*?V7oVJRW*15dyf){g`j8p zMU&O?#KKrtSD@jhZ=Fq$3cj`p17-2a=3ebMH=BeD3S|`<0w&Tl zLTzj(tvg**Kkwp8J=g50m{GiI1cbbnRH_1ugYZ5yiOpusVGDJ81`WVCIA!k4J0R%^ z1GHHg#Mjf}e7~5s%mnHZO^-3ErPKEC>!l2ta+Xh43vSo0M$6@mvM&&(l}?&%ZS_fR zrMv|_ek6(vqN|2Sv55k&9qQn1*ragG3`qE};gLo_1MRb9oJ#3cJs5C~AHchttLhW0 z#4F@?>?57U4F*-&(|_mfH_krhMCe;QF4oFGQ2;VB{(`FaIZRH>!=A%QPbrx%lyFRB zv4O!q?BAy#3p+-;R2*ciq}-RwKbXmk|70yWwOamJQTK|0?OxH)EvOINeOFaqZ<#OB zyN>SM6T))S zj}G+#Gd!SCTfb{Yb@9g$-0V+5hL1~VcdZEZ?RqTlm=fPz7L(eBZDo+rkW*^y{l}Wx zoP#L*)u&^*KY!@1>@@0AJ3-~B^-)cs6jlL7!A$%RQ_LvH&=6%us2QXOr=pfXz;)P# z=rqAV(`F)kT z!#Lx{9 zXd(ZcuheS?Vn?zLrGqgG13W;dP2!n;dyu+$*xyEWh|(AzOpF)Ep_MxW*nUe0sL;YU ze7_N;#@&0N2rEu?s-o@1A95-FzDTgJ36k3R=ZIJ{CgUR~Gnt2NO?>k_zaSdJV<`uA zd+3B{CM8ex#jDh3xM^MElB#Oi3j@->$NVId$7<@myg#rrHapI`@^w~nUq-nY1QdhT zTqCi#LtHf(kc5GAjf%15UPCQom9a}#0-TWT@Q!-qm)Y-oV90qv!);j1`S-S#WPW_z zIpxzr$?Tq9j=vnmGVnhSQJ6w&=~?w%mpI1(!vrYHj^UhUy2?x_SdfT;aS->DcMD@I zXMOngt3ZRZi}Sh)jA!e*VnojS{%61iyFXQlh^anQX9d+5K)}0rzW#oXpEU)M7(67HvkoVYE=bLM!#6B%wWPnyKGEp0tR;jol0S`AOZbe8F zEV;kcK)Lo!kMbHtvEWyA2Ae|D?-0%cszz9Juv!7v(v1OR8*EyGIsN~pAA}kirN5M=cuBK=0v>`-qeh|Q@{HEajE3TXe_sLxBdt^0>vPzg+k4+NdLkW%%`f;{#tGsPWiI_Z0_cAo_c{r>6Zo1XGCHIXfTSpco|~YR%>SD5H7o- zvURyywq({Dh0FBJc%_H1?PzNC!Xffp!)eC%ALR#$NAs*m8Wskv4fRfnsK%c&kEn8q z8&#V-4EE!s!suhg4L>?Bf^S_+zeQvSP*PMZujVfASAI<^6mbyYiEy`4r(ZfHX@Xf6 zs>+^5d9<_Y3HIL%NL2ElTnQ;q*H`(Y4s__dzGea&Fhe6ZMNqh&*I$FM=Ww>#^Hd`^ z{zAwAqPU8SWSHN%<}DuA*eqYA!~2(12Bd~V%D#Px0aYiMoa|TqD~@ONJ(~Gb$*k7{ z-g_cL5cK?RO53|D3;)XPB1hTBbw3?OPZk5~5~m&B7zj5nKlBFVp}aRu*@izCPF@m_ z(0o82+fQzJ@aR)uXHD6o$y!t3lE{@5rq&2B6dQk|F{xHf4XqCA$DPOK6nw8+1dV?G z#YEO9$?nT(pkXMMYsUg_ESBAUCHHgGd$$e!s`jgKV<=tX57&8#A{pVQ@i$G=U@{Su zIXwQCEJhfIr5tikXP{8;aFgZFQ zLAjYxn)a>Xy5eAAR2qt2RdwBwqG#yloq^3EQDXG3Ex+SGfgG0-y@*2zwo_rel6G50 z%|r7iATJXJex&`S>W)~%S6#CZ=JX=!Q9vx}0&QbvC;D?4J~{ZgAuS9Y@UQl(?<2AK zn9z?1y+(<9DoLkKtBt`>sa7**6tRzw*i$<6QiE#_lQ1h@&Mm3+F+og)cHp9u%Wg?k zVlcsCCnrhqG1P_yvG__Ka@L<47RnzG%(s-5ovv8{x%66|8}muuEAt2!N#SkFIGve+ z`QZbzR^|{a%)(H(t~{G7rCYS;2Yk_B2k-{4W)w z%M&W-mV}I8uX3iG-uH>Kp}NU8Za9is-YxEH>YF3REO>3QwXt`8$YtlC(Nk3A`C&Vd zb^>E&9v2aKYp6q}qn2e+_@_)vyAGVyTBOz~3b$A{QO16*X_cQv1-))tk6oR*mNz|+ zU#yxM?;XKnTRDvho1r0xQSYWlok@7C=Z2JXk&O13w1 zmqwQhA`97u9Oz(_W2+= zWOd0tqRd@%sdwzg81qz_R$8Bbc(e8XLXE8U;7~Yp@V>Jw^LmSO>t$W&6xUhS?yt43 zIQ;Bqt~b>3q8}Z|pVu~oUgv;`FH(!sO!6oUIwncQ0hbw7vs4Lh8BVF1xX45rRbnrk z!U3LzxVKOQVq)I)reHC8DwPuH^AxoPB7u>mL1hP%>Aoco;e$~UM#uNxR%!&o-r7%d zCz9y{tOHx&pa)y4{R8p#FfFBdLv@FTE92=)>_~ti5*WF_-i5Uz{b6tvy<5KNiY8=9=ja#kjk{pTX!Mds>!y9>6TviPXwol55b%v-2+u z^&{p%*5Z?H*57Ww{FqmSss0EDo}l+PN#LU}B4rH3l?=05^1p8#=Z(g>Vs%}dO74a1 zyV1P<2X7XG5SvZEX}h(%MC!TL(5e^)msmh}UhngT!B>ljWB~5r7~v7flUUkvrf~6=SPzsVz(SjLZ25XLj#~2N2;&zGAET~x zY1$wWbCrFcW=#NSt$cCQaW$CTc>lh%1DZM~uU)&Q%CqAkee#vHKHyp6F9F?SoHV)zj>+W0YuiR0i7T3BzpzyCKsI>!D|RfE9Z&IobaiW6+G zCv~^kWO1PVcI&tQkppeSjEG;}hq=y^;4b_$#1w%AQ;!`<+lbK*AYnq_iV~+WiRFT~ z;#6v|{ADbU@v-omJYwh5(!rOl$jQh6hgI@rXBDZk*V;lxe48-4y8F z&LI!nw>E>QcNlzIv^DIYEia9mT%yS>jolsGPn0p9lT%Twk>zh=riNyF z=I<%w8T2qRh56+6DViw$!!h)LnEqj~(d#h+z|> zZ;%nTztq7p1U=8LL{N~r5QJZMOQNRQ$h^K&hC6GT(AAtRW0$RzSJTJr3DSSN zj(XM2S7Z~lUCkHV=_Yv@Z_k<)8Q;eL#D?E-b&06dGOvV*^vl%SIZG!fnm%+o3x4wf z+sj|3z^apDgkU$XErD3(>6)-bB6xwBPh#$KA>&0KXCm;8VtqvBV0rM~l!lyysTxI; zv7<7BUQ90&)5lMgkkhcVF(5{eJXz64nk*mOHUF|v{k4mPrjeQPJ1)$Y4&^m(!Iz7T zT77Fsfg!nC7!uk%UdS?Y<~NIyMn(`w_8}{+B&zO_Z4<<2Y*$tn0a3Lpvh@PXzO(2(g7{6QR~V07~9@Dy&)$wl9D921&)#*2Jy`kA6d;fuFc094q`A=tY#R9<&)xa&A*ntS|)i zw9W;qW=HFMJ*A)qD=sZB9ZU7hEsUc5RJb1w^wNnmGN>911s%e%Wjq^hHM13k@@=o; zr8DRES#B{%d#7b}a7}*#YqMV61UyBO^b&iyK^&Wb*&AoA0xp1&M<@cMPX4_wO1O-Y zZ`vvId`YVRMG&8-0)3eF*r;uoYkfg}4;e?-JNGh)WOox*h~RRfzP z3^u|z##~9aTj$d4+6dyLc~Fonr&e}Y!N_zSHMB{@c9(#R-!H4mdVfaUwWPFb*x z(Pc0#NpvfjUVX>fx}pVBwzFMQpHZ1KHS*}M1xR6p=QzGcjg=y1 z_ky7P$c_QrfLur;JLkP?L6t_iYw2OTYN7!>L)`{tzBZjVeiD2~L`5L6C$AJbbPEHI zZeF<@4eRldp8qb(Q{%XE#1td`MG~+OGhdIVAXkE8p$t?1``{B&}f z&*(%Ulcuxv6=$vDmFpO9m2-puzPKd5*{aw8%BeAgQ_ruMAeX#1wL6c2he+F-g;>0j zd?67Aka`XU#VO|NhFAVd+;V^1{%+ry(p>vv8X1BR^fby821=ukI|VK8*tktnsHzm2 zj~RO76mG={!j|vjhL7w>BOj8mNlMf>cYS?}u&X(?HRd7&7m9u+rKILUeUsK8+Nd8G zBlKZ3YGlgj;fP%YGO>!?#_sbfX_#KC@bm~%ez7V7LZS-aYY7b$eUPOHJMc=oKScsOb&^vNAKIK)G#{5B8 znISvb-sN+92lcuJwYnD7J{*S;u0UXhB5Pt$&NWMv0q^kLGL$ZXprUDj?V#t4UG(Uca(Mqm5v1w>VA}@1bcD7sP^^ zI)3o%d;Xq9b~42jd|P9InkA#jw_Ujs1>vTYOU;BIu{a^E7c4DBG<^%DL%Q)wqCOp6 zeXWaqJ}-s~jF$Pod)%A83huvP(USy&{}jgiw#Enwsz_%eWN6duJ@b(zOnVVeukgdJ z<5LK;y6vr@Jy@B@n<@TkX~CIdQ6ku9OWyT6e5*D1&~a&nGY{6|Ynj$BP;ZM}o1n9(QS^-Vsx(^Ri&7$IKQiH4^mV{)7s9rLZ?tWxS?RM0)F z1z!!xvszPrSuYnHeBHIZE8Uv;_x5BEmhMN|8N8$T=0$1gNVjoWd9{@C+iw^@r^q&> z^hPrOehCD>$|9q2bA_m=dI+?I<@QOHR_Kf2-vX8%ZNdEu5YOzv)hq>;lZ&qEI=cCI zw)EmTp0p&J%zNjh(|$lYYU6lZH|9bT@}Mp9s@ zS`j&U;Q)7@%jqn)vaJHDPjgu{zC|&K;h<5?Lod%FR{!?nSXpoqvAk1rZPjBy?Sfhm zLuKVLHO-O+=xPn03lt;4Ua`kcZ77g6F*Xm?+OP#WR%_e(i_1L&{UePLTkx=c@VQZz z|C^C5gdWz;cB(FBn<{1SC+Bjy+q($xCnz-v1+QQ^aVj8$-T7G^y8OY`oxR26hhywQ+>isvKUFD+?d>L%eV5o8fN)(%O1OT4rqB3{pzQLwyc zPHT{u{6cPz2yY^Dise?GZsLEdZYd@$MN`qI6uoENL(}VFfBBWodYJ~(5~|c#jAX+H zN-7(vvpvqw>4wauTf@>?mLlq1p1ew8D&`Yg&iD*9$57$ktEQui$}Yy}?v5{V2YXxr zZqEB5_(-#q>f=tsT zpo%HAXv1G&cs0XMyt3sJ=M8MY~xR|nUW#+j>kie<4#{v0|wHs8!4;O0%h&p5F? z=`jj`&LIE06!-*U06$CFVF%n!+b>4tV{Uv=u6-B;z##U3FD{dUx&|z(f&6pcuU_t3 z2r86b-nt0Jy&}P#4=;1#=IDL$Ifma!T?$q`wbybr8hM=PqoQTg~kMQ(gRgr})YdS<%eb-u8{F1fe0x%~Dg z{XyndZq9B9>3O__lwJSpv_<-paYKTYSdLJcS2k|J(VzABCa2 zU6znJ^9RJaU?;l@5{ulw!kQ)>jLHxis4ztqO2S7e{#&w6V3 zNDR|+X=_fS^hSLXm~K4zCE10?oXmpWR6&L0mZ!SPd*3lYAJ50u2QrLn4tK@`$*sH5 zfy0P$*hECjFP|LkYcD^0HDWNZR|N5?y=#mHL<ow!COMPmx!sQH8%dTFQ#m}{fmSn-pTVEK-xoyuSig!J35%kV56xH zj2ZN#no*Rhh?iHqxK9kVwC%uyz$IC7MjroAX1-xe0Zb z`gT}Nj(+7|dC3zCML@W{QPPxP`I}a&T#7u(-`qgnA*Go9ON>F6yPq}*D@QC);@dCw zMK&Yn`)X8K8OB>5dPiskbpvl@5LLnBt`5o`^4+))gZkVv&$}LwmDbU1 z>&X9dZxPqXx>$>z6z`A!R>Ficy>Q+(n2@0;-6QO&*lk$_x&P=bSR|`Y0Q5se=`|}+ z2n*={0j+pRE*_YvTnW7e!t*lqsf_zd>mEYNBkyTyXjNyY&^FX4vbszzHlgb3>WfHvR=Uk{*|2+G3JhIhIMRMg+mY}Z{{9k;?VUsp?YP^HP1G#V zz}izRd%r^HiQ6p-`@xsS~XiGRBbG$i|*A85144WlMA{@>q*&HODg zav6T9(ZInr|2OH=MksbiJt{tgxcY^fwY?-5j|um+_26gocyai}JgKP)wpedXJ3T!T z&#-73363=_+18JKJV56GS}?SMm9znIvKTQc`X0&S-cvviBOT->)Xwk#2x5kkkwhb) zaLv&FTiVP542nJGS4k(a$s%Thn#?5c>Gk8!G@qXzF^DI@@n*3 z>56pIe!yx0{#uRt!)hV7Go_G~>exYyQ;WV<1;(~2*vwj~@);&j1e_fYhIf$`J89!V z70n!QSTm(CITtozS4(hfB#WX1s6=zR&t#L20U{39QE&Sa(asT>f&A-YY-)k-|`KWE5`}zy?@>kIj+WZ2_(2uPj`bGLk>3)PZA^8rYH# z-ZYZAnP%xi%vS%PvI9FseJqXEW?#z8WJ{y46q}cN%+wU#QVCZ^Xw-i$xxK7>Yz$W? zhq*+$K8FYOH{m+&nxLVXj2jM@06ra@Ua{7=6Xy)r$GYB;Ij`+=Ef=cA;7#5jL+yjd zmtt(|gQYh{i2X4F375~-6f)O$b>z19Y;HDZ7qq=vS)IPS#?=L#({tYvrb&lg{^sh) z=U5|r*AmiPu_w6>A~@^dCSQc*DPvisr9_5fMGqxP8-Y4eCHOu zx8-1MG~e}2Sw@@D>PGwJNF(8F0xl18$@EO0sOUd<2lBT!~e{JUK$E^QVsDQv13)gS^b@p)=Gy7-V_X!edTTs7di+H zym4Obm&0ODf3$SS)aoY^1YJt?hZyCQ+vS~oZtCd9s5FjIX7Vz$fHh(#qYVJ$Zx1aQM@%<5plR9+IIB!X-Z4m+U)7Qgrd+#BnFfr42{$ zzX%M~C90#`hcCjg(RK>=_l)FCQ4=SBY1O4R4j$N#33_RX|sAc8I>w zV1G+G?)bMu!8-9LWxJ>_&l>IVut1paa9;+=b%BZ67`^Xfe>1IxXU$Ct__9yKYm+2GtoVb4uNM>xbJCe(S**)wui> zHA=wUlbHM-lib5@@lnFfpGxMnJ!o{oNB$c{tLg}D9HgeAhJ|{Cv-|+!|A4cRM(=b8QfJl6WJi$T`2my9#e#2T zv-cyRo0PnUto@U296xAR`l^#r)epu{!Ghzx>pvL^FObZhA%s17I zVNwWdxWA9vS>;kuaaD?_r~X5X@ZRW8Q{)V#O`roOi~Goe6bG-XZF1(Fco}Pfi{c&G z+R7(CPj61MJ3V=j^p+dV!e8eZhq~IVs)NxAI0~Kl2o3byh4KmgfDs?;P{U~?S z;s!!^PcldT&k(Qg4}c@HP5h;K=;&3c#HM23qEZzYoJar*%M8zE41!%Fd`94$25h@q zo_G|>E*53|G1O)`=m5Nml(!j5s@Dlnlu${~^NllpvG8eO)!vP>*SSHwe1oKeoh`Jd zqnCqV*(@F{?#@~@V=xD}qmKHI3*i@*@BLAfVN5=kmDf@4&=t|bJmbr>oYs(*6)BdB zzg!Uq9b^P3hm2i7mfzW70>@f#d?F?Nf+AVWDp(nAv(kz~5K*ohW1hA|^O$K0daT$v z#^667nXawF0hc*YVqmFI6DtZX&bEQo=LLb(4$3rT^Y_!5eZO_-e9ak;cBev`do?2u zt3Y{ql@9)?m@Jc z2_-I4>xc0iEmq7Osl)=`iQ33>Zmhc_)h?=8>MR3F-a_aslM#Yy#^WsDVT0Jw%xCKc z_kVn4?O|io`Bj;0l8NZ8fvZvy6`q24YewN#f`=iS5IU2>D#_54&gJHaBR8SS!di1$ z2mV^?1F54IhQ*gZTyZLm36gD*5G@-L$Da=08vIu6`*5e;!Sr^OaHZhl@$*uqut#+2 z`(@zQO7%05F6nd8A-1)6rk3i=J;%KSdsQ%RG2GvtTmi9x({diz@5}wa&J0DJTBn%I zE%EY?p0-7#{XLnq)3AVzQS=e5QQG!!I)9tN;J#Z91&_fkvIb#{?^By~me95Lhw@(B z4|Fys?i%#GxfCQHbvQGLOzp`cmx>-Q>E11dv8%@0)hKC%Q+zyHbf=ODh#mBFzoM#> zF}fuM-OBw1iX&hgaWkxyg1V^s(58pVQlz&`?AF@WyB2r0Wpx9lmtc;>Z^)}+mcXld z?{g%KM6|fw^_!!`kP$*RW#KjJRA#{53L>ptT#K{@*rOR^DyQYv?fX-6lYt{(z~VV%ZE5}WfkA(^z8 zf;{3`Ku~LZBCJ4l1gy#>Si>&F5<>HUb91M zuzy?u{rlw*OH@w|^kC0p#_OMF*Nenos4)-H0n(vdTl)L#_x$|>rog9>E&rzAM}Xv! z;XJo|4A#Hz8G~u%4bj#284ZXPR#}tzD2jGF3525}2!#$3W#-mbqIF1MC?9q>f_#w5NO@t+a9{91=;H;fbARvAU(JTa{j{ut!o$>DIcEEyV?%|gTQ zQUh5G5Wc%s?dQGY(inp4hr5EuN6iPLI$1G_>5$Vfa}_{09`_3JF-S)z7=cDheR7Gd z<=Hs1tw3f!Y9XV8<8~cV94c^;p?8ig&2(USU2k#d75_eR>8lY!b=_h(-1Jr|L$-wN zx^$~yHqn1YDyhM8%})5rbvL-G;Yq}cvf4AlZG)Jk&pDUT@aw1am_&Sos7bzylw@2> zT)h1<7sG{2ri|px^Y&NI>0W1p*(o|mE>+USVTr0!J8r9Xnel35fTIA@8B!0UG0TGC& z<$%`VtehUE-HJ0%vfM+;Er)K|#L~TFE7!Lhut+V@ZAXFon+Rku5ho>drR#HX=~>xSPd?pqVNA64 zBOpPmg%_w07e7#iqeTPDsP#~JqTXN0pjY7!VxpwZjRgfl5bB-e2m(5p$n4x^8Mr*{ zmLZ~|t_NhFdPpCCKm&?86A-j9&t_(wnNRHLDG#iq&3Chj(2+3ew}!yWx;oU)>EA+l zh+w#vYid}1*;AiGEOIrxMJIz_;~?+LoPTFVq~O_JU6X-niVB=l7l`cWalQQ9R~<#T zmarEL4`hWjgBU({|9_YHKaNE1AuxIuDG*Sc|Nl8k;cVn)_CJ!$(|k70TO3Kdry6@B z#|trhh2-2_>C2%VNoLgZwX4t4zm(kp=+oH7;xXJuA&BD}HJ>k+_T4x>F~z#q{~no- ziGlx9WBxM-^gB{4vG|R~_Tu=at;ph_eQhiK{Gt>C()(jKY7&>)k19%yVt78&1M{?s zLSM>>`mlw|`XalqI%uBN@`XWftUD>v@~>!GVVIjlwg>RfSyJY!RGOOh(E-sKDrz;; zirC@#AK;LWLgdR={Ub5@ENPI4W|T=ZuQUV9UnViXzo_NBHNZk0b#F* z@P!$x%u8_v-w=6e-FXokP-6<-KwRVR~z^J6O zm(2hU*72GUJXoxCKu1x|%>1257MLdYCuav&#t=+?{9T{-gJm-J5A$zt^B!{sEKx|Sh1Fubby0AZC>5s#)>2;CB*r4g*(}pG z9@&gY>+_O-^wG2^&ehEHzm$bKzI);>aUZ(s*`@n>H8%lbB&_*yHEk7v|7}IM-iw{o zxU0P{V}Q0q{mMjlC#ez-hhICMnpWXQ_I;Q$&%_2NPIBTXQ5lv!HS_QSb=0UN8*^yJBtou}%P@b>8Uq}_` z`>zQ57vv4s0*lMKjlrlvXq2r>G*mF=znzH2LJU=*q2mLJn!p1l?tTxzg_x_4PT#`C z!Rj}uPlbK*@NtiDTW_Sz#asdTO+qEkCJEt{#=bt=Q)xvJ8GYA~l3AZEIb1;i!>wVU z6bF9-c{F1-h(`^GRH~UVgA`UDoWLA?wXz}j6-U;n+UyYTTHASDl!H;Ei9;U&p(Ge@ zAFc_>3@KD?TJ-^Z3g#Ld5ZD2kCOGR;w>K;-Zy#V25{@pVIJz?&UZ>|Qs$E=8BC^|c zJKwO%OiiHGL=bx=AeZXTEi09*34fyTf~s7OS)T!w_5twDZ?{u-eYDIQR%#N>?Zy}k zYkS7Yk;m7E*kbOWR53L$^#Ut7=bSrjzZy^{ClxlWpv7ppNCu)wr zw{8td7quX#FHwrdd+h z3hC8l4aX?v+e5(X-C;tnExDlpRi4Kc2t+q_xjlXx5xzZ`!i!`_&Eu)!% zMDw3I*7FX_8*J8Q!OK|$Y`{XmQFe zXUt$h@Vf=<3GE0hwW;-Y&jg(?{TCK8=3n$m;WDFVPvh(r3V%SfFH)o!QPur)SZ zl2CjJ^!IH4n`4JP1E#|dkx(5ymGH+&xFsZ{eKk39VVG+jR-d>F^egZTK(UaD1ipj{ z-DVu9Kb(xqo1=T%KP*!pB@{%)GMaeVAOoVC=oh=l^cmHhCj(OmdWL5-vCuvg>h$Ka z1v5LH&qucp+xV#vrzdMq17^p9(5oDqfO@&M!ScoD&EO(UfXeZGKc;)HH8o70|J?n< zhxkdcKOck`$1_{~0o14g=;`Pe5|N(eik{TqKCZ3B77L%aZ6MY`ycKMkaL-SkLAI&k zSsnf@i40?gvSjZ#VEDXX%ulcaNyKJT6q=*bI-`hj7A-`=VMq{RPf9OBL!6oG2-fN+ zSS(U-B;Sak5K$<>u`VuM{W^)-o|y^G16l`*Z=I0Ch?d0ZKK8gK$83LJMG>S>2}@<~ zG#T?}xt7i6KuFt+QSX8K=e&EDyLwWNyMW>~8?^e%FFTev_-SD4+rW`##6qr<2l~r@ znZgkjqobg}O~a&xw@@>?`VUAfK7|)p?B_E96C#`d`O{l)ALs8I$W7!37nLf7-hz_$ zGf6lj0rF`lj$t;oCJK=E71{pP-FYSfw+m$|p;gnYY~cQaF>J(LGEvL7UA^!~GEoY7 zgp6};k3PpukH{jCmFm6WX44UGpp_Cm^5^dvuFx%LcEAugDB8yomIH!Hn-@R*T+TQ7(sc08k!p@PT&?y1h@qUkN_OJq9|yV+@}G!x4p(p zo-RA5O!*;!PRO%Hf!0S#)g>Z!2%#>y-KpZFmn$oxql!Woh1}19Z3;b_ zisl$%vaB}1cTj6@Z`A1Xf&8D^5+B-ZRf9VNMJ}fN08|SUyzf9MfX9T$RA8K*j<+8X z^!Q4}Uvg+?LdjLU{!!7LENYcvEc`MPgm+6@zsWDNT%a4o+o*uX@X>T;+LZ^iT@1K$k73Km$t17 zilP{zGeFUo0H`;2<@W3yi-z`Rd$^pPS4rl(?a^zdtS2XD@X~qy>2`Jq61RqYXF^M` z3R9s?-?%c*B+`#UQj~7%6hyl!d`l*J2g5?crI1z5J%>k|Fe!}{naN)KMWokC5?20|7*pb2`R)T*?Dj5(DOvtytMfw>5E<3U+ zk=hS*ZSMqy=#MI(e^(V_~2_D`yJgqm;*hcy~F;6hK z`WGF!s?H`m47UFz+T;Hu+Na7cAyi}iz=$zY;^Ykn6ntL11qoW9O)}Ago34$FtMDA^ zDGG5l|6gqtU5nG^M!=&NZLW%o1>@d$l}zxdA#{DVqTKruJW&Ask{mdzNnB zILq$`#CYgg=`{SPh8;5o(=4D+Iz^vn;Yv5FYoxpATHdkh(UU^1Ps?2LbY{(65W4+c zM$0@PXy~b=b&TxYjRaAfw05vRFv9XArkD@!%}J_rrQbj5V)j=jE(w_5D8&E32-$sv zqwGjO=z!~^LHRl0XL#0L!M;C<2b8z-$I_Y@$3wKky?4~GUDGPYKQ2OSZ)_L>yRp2< zt6`SU25}jIL4by5uVTbn(y7JmYWX~a^2wR~IT$+ZY$&v%au>Dpk#oLb4JoKG$+b+m zPCV?)Q*A6IEMnIvik{G#UavKN>q>3|)bB$8WEM=k;(%=4pB5vRjN}5SIpg3r0~0p{ zsQ{K+kR`pl>0vyHPjkPk_oMG$6fAJ?5UGnC7`hNf6i`q2E25q8X1Sps)psarPc5^aQE(%~Nh^U^ z*AoxEIni;CUhBusy@DFU)++rh;y#PR0d(=(pMz@O23u!RCi#b9Nq5Z9NNDg-au2%& zYaV)6;hQx1gogp}#lVKM?_(@B0YZnQ)3`I1y1MI{>zyB)YPMC)k}A}QYMd*481D^& zh{33lcEP*p8) zhg5MNTS#P@RmRF5`HbTdD~GS%>}k`)VcPqVi5e0D81@@L!M(&b@mWak9~_V4FS zB9xxmbPNV8^AGl#`@q*nlrBLS(5{W`vqDdJhK;^*G(5dAz^l5|re(I(!Z84Xk%_j< zh=v)D`P3@tD;7Z2+R(eX*CR^wqTIXq?$cf4zR#@w#XT?Nky^d&qYq-#A9fRBH`<0i z^qBE6CDBD_7u@?9GaSA0uLet?HvLxv{S9BQu%o%GG?p*J{{ayYgnj8Gwg*o1Zv z_w}L{XMDT^)To6uLc02JUxd9LAvxil^e;h&0&=n|%=C`9Y6doPG5obuHHk-)8%Zf} zt!1WdgSMn0fhWMlSZdUUU|)w5O6+r8d-g@fTT-YsE!2f*C`?l4h#^T|Fq#rg6D^HD zAY!j_aj@k^o=?E-}AcjZiqTfX0TF6)PdBJpoK!{mzJ0j#zdtHzw-nPm) z9e=F8!v4xIraC%OM~wIy2Y9HZQCeK(O#ErpO~m<^`J;XI`%8K!m#QxuW`2H-LDR>0 zw!friMwg;XF48w5qhV)Fx&OA30BSm=MGV8mPq7FSjgF4mFqFHPVpr3IxBXB0!Viep z0GX}|Y?tImsUv5~z?Yr*0TDFUu(OAKou<((3blW$(n=8bg zo4g9gBN@Nl$y1(0PEkej4~~v3TTP+&vsA{-q9J_pIYt6lC5?@P)t4 z2L9K!qLm1mCjf%Et8tp|bi6Nms-hq=8g?_c9+{~srR&{i`VwwiWea+WI6z%L8aUb! z7dFebGl*Y5(G^xU86?2cQKP)=-qjK&dHSo9cx^}fO_!GgKRWFG>tAwj4UuQkzpmTq z))RUkP7YkAxUv4n)`;x5qJs+|8o~8A7h`ghwXFX@ggukVET-pvJ!~PTU>38Vkb{JqW>!1UVpSl7n3kl zXB6{%wLw*9Ohg*7hOK2>DQEo{X+$Ig8_fgIAH*R9ZND6(!fC+ZRpG<{6K zzXNY$Cj#pfD3Ii}yr|1VO<2qs&@{=6Bn?w|+1pYpO)k9CAU5PbI-rFk>=vYFxT}I! zYzzCiGPFCK06|R7_CFd%Tn>84Z@m}iqY_ah;4Bk%R_b||ejjCmm)GBCS*wdeZ9EGs z5VF?0&MSnawA0Ki87t>?pBh{HC;Y(5|A7dGxMv@Hr~%+lD1VYtkbNDr z%;r1wp+Hgw*Qw9K_R$kO-F zp_pnD{E~yr-3D<*1x0SH*pnmgh7@c7GJ`Pc5f@}ys!)PN48g&aKNGL6NhuQlU~Z)> zenLB3y4lMOhiDB-1%>wX`kg?vKlFy{6Rmbr4QD+urxwja-%WL^Sg?tmxt|M*Z;G`Z zjw_93Odem(6mUUNrToZhL+UafrsZiDHC(VGJrPj#umQ1V$>$i_deLQmyHTl8E%aHL zbaKJ=QMj=fI-|r0rM5r4kEEe?_G+Kct4ax#jpmKG)DYz<*|MfIkm@>f&MQu8yF?$vJ?B5?n_q?$r|#lY;R3omImn^9ikV7^cizRr${o%SAV8W@UUS@H4_59X z!5udkf`FxvG@W}{j?c4tz)FT^ndj>VM2v97t_lRQ{(y)@xtN|i9BO)LC8IiG!Eb4VBM z#W67x6CURobZ|HIRXvHa_}ogX>cP*AjeGF@r+gS=kd0SrAeGvHw?%x#x6(2CaANVEhW#nV2hC02h)%#GUVNl zGx*Q&13vG_y<;c>I~gJDEH3=j-ZtqL3)@i0$+9?u@)6AAet)F7p{U3A&>zy0S{D&*c+if@pXtC>GB&OICaTUY`5n2zK$xm*d@@ z+cTzUtd#gHe*wX4}6 z1HWEiRc}PWe>$bLD3(@yybNJSo4)u~$5G9P7>}ZqO@{j9J3=jUc=9x@D6h*An8E2E zgs_2uzKZn6Yiorszok*xjN;KVlVNHu^ffwJDuzc$9jaa& zO}XCG7!N{f*(-U%p?@&nDaOWP-WQTH@#=Qg zVkXho*Md3vB>tusFfV=fAtXRsG}4pY&)i<4cKCz*dl)Lj^-g#}uhzbZZ4ri9WDT!;ly#!UC?s>92+x~WC_pZ5kJL9P( z@e*8tCg(drhyt$orFXI@G zXu3>O=3qeOEz8(3+-xKDibV4Gt|5tvqa&%+&=-`-c`1Y+3D#vc8g0sAa|MN*0s^vr zT}z7Y^Z3dUfiZ}&6LK|EaDbXjT2j^Q==5#&2Y#Xm^I(A~^mUvGVuFGp+g5cc0LoVJ zXB3o=)HMYH3GWh2G#75Jb8h{iaApLi+086N(c%N5Urg-x{oVcWYsA0}x0V{Ap> zm^F5&e`zuY{;g#NMw_4r$htP;Px`7cai=i}n~`L!{N1x{S3h>Lj!6x~A;mq~uclX< z46U2@mR@nQi#?nAtapl|(qDnL;!^5*=nkLneGY8SacS<;ISnCpx0%;2|CTu|Twgqz zV6-n2yHnWqr1tu`2}#w}*e|5r=ooeW)hh)@ztC+?1s23Lp~gf zVo=j@D;aE*(W1{@8?=$U<-EeTRMouct_@CbwFA2AL5Y74-^Mqa4rRTZKspw)t^@-a3R{zFhiwNI=e=$`xHWZ_azTToIn3 zL^htcmmn;U*e_d~lg=K;0WuoSTNI6p40{S3bebv48KoU^iF(;XtRNXl(vpWMXpVVC z%=OlxD~}JF0xq`Ld^WJ1bk9zm@@9i!vIv`uyDlE+r{*o*RVi+aYw;_`(O-wzcZ!w2 z@3JcPg-(hdIUPl}7P7t5c9(NI*%ZKUaeo*4aDs{0C|5|WT)i7H;@$tbqff!4Har(Hd^H{ui6t&VI&3xHp+>dLW5lSb= znscz|`?x~EP(7LpYFtBab_&)=;{m3AYRVS_7CrObr%fTT2mch$!dUvXjdlVZN>+9J zh8DnIU+Zfh5#28KTyUYH%$^6Ch}Jwx1)yk_-ms9I>B_+eqXKOq2M}S`Cs8)Kwll|o ztpK>gT4rly`2My1H7LvwbkF!HA$U0-chsPMAU|srOmtf$Ub4%QA=pjpFrfU)KD@W$% z;g^2`Kb;pM?XI!rKay%E1Tu&&Z)>0U z4E`OL;7*KXIA5i)QmSe9TFF;BzzzdW& zjN=6QR2F=CZR5(DWnp5c@67e5PKHYhb}iF5Ho@&47>_ zxAQsOL++&KO)iBeeIo??tJQ}640rC5`7pzleB6!LYnHJZnQ{06Qo2A8b^!QSE~PRr z^A7w&4gr>YIr^g%ap+gUa}hT?3v?aF;9kLkn?e4>BStZ>JFWy;43nzHB$0Bh4NJH3oj%y8qu?|De|?r>UIuTD-FEz8tJ??cx-Q>P{9E&9L+@kp?>$E|$=SHmT<7>9C9YHC>bzl}qMtTLnvEQW01@==7*m2c zc}Z`^d2n)79Kj9vvF#(WY%4U)LQ!0+PO>{tg@Z78s|v(+468A3*@P z*^Po={GK67Z4R+p^%239(!u9&F?#c(xt~mu0sn|CHZ|Dt(~MtC?^v6KrbYhsVNzwL z{41hQ(sA>6qpY&|P|1h2X}O(N3nLSLiY6lqEdq9q%Oj<~D;ptN$*XtE15);IZ8(4HUTct*Vh&}r9QtK>j(YiG_n z%{SC;a!oVA8jc9`w+{ia5Cn?hiunPJU&rjNF2@S`l4zV?ma~sNDt#O#N}MDOfN z)}$mY5TN7$h!TEs{_obG#En7+MnoW>ea8Q9>yNXYm5Hr|C&1al&h{rC^$N$rZIk1^ z(-)ke+b5+J#fL0wP}?o3@phX|_+wP6DkH#6>pVMi8@P|L^tf{yZ-L{1cNWA3^0G&6N@O_An`Sb<9s@B!%7-Er#93%uL^Wf13 zyS>DGK6t#e$b(iywXGrHryf~GXxloL+qLuMMT4BOa9y$&_)0_G1YJNq67%X- z_wVtXk8)uOO}XFg20G0AXU;qg>Gj&V6#m48%<3eWPVisFB(Ll}C%r_x7T902V&ET_FMgl=*`2Tr*>J!dn!&w zZ(Z@EP-*1I%B-5D%=Ip`{?rM~B_@D15l2!b|4^y(+aM6*DF`2k1YlghC_Kw(I+7No zP&g(|R1@HhfO@$_@~999UedDqBWtum%S7no%vAh2I5=I!YR^JjP}2}u(}NbolU<#= zgXD+IVEG1jiA?=UeR=&+2dBq-d%lm}^}UdOp+TRJ_3dq~en3`;gnR=fLHRfFe@!Zr z*!utcQZLY8$|cVLqO@fM{z7Ll8LMMy0c?5}Oj`vNnDcjRvMJ4nkFF3JtfJ=90MNBB z5OvB5Sez}I9Z6rKzc#@bBY)3<5L&5BksZJCDArr}aF(}`@!m53A%4p)=m6{4&5gvfsRqc;seayNrO62 z1d?372eImjQtHSPQd5UZ4zE_(S{f971mpw%TMzYFN(97F$Iz8M?`D007?il216eGZ zFI)YWolA&$D_lbB5;4=GI4tVd?tZVtA9`DSoI3l1Ple!jr!Bq|d*dCM(|;Y0yaNkM z-0!j)V)_LsEXTtOj}3c^D0-vN?etx(nn`Dh*)@c)x=9Sk#ricPr znSLn05-eX2q5nPq0V<;kB)~XNuNMal=}yIiR-z-v)DoK^uYO!$+4xPz9&z z8#e&l<)J-X!9KUvKtlxdZl_9Q!KVvXI6S!~Q2~z%2{w;%72jiB6+VL= z=v_~|q(*S01|h*-&io-+CP;x2Fqg%0%Dfgx;_9&iLVWCEK#TCrcu<$l$k#?9J{7#$ zis(vWC7MgBBsq^6gqqVLs72$Y%wX`kcy$U0>^dgE{-3K?#GS-(odGp-hnByIx%Jy# z@^Dh<6gA6%KooA}=^c{>N0LVSMKGAx%Y@H!ytI=@1!65zJ%353B>gl9*A7JEilwZN zg;SLN5C)P~F=+Menf(e!GR>M`{F77?c zE02b);%zun(aqsIN~pTfrih=1Sq%-2I=zuM>EcvfA@iU(?6ljw+%Y7_}zR-JL5ZcXoY5@ptOGA>Okg?7kVirpas+F5h|YGzz@k}cpU zZli^s)}+ zNKMV44-TdHhltfg7>$T11GOclw=3HP9NW z;A$BV)vGFMgNjxuAxM#Lko_Z$u?h9&s^OSd9!`e5=(o7j#FVCn$bj%Sqp8+wxv<3g z8%-LP#7f%$=`u;vw#lPPHTC53F{5RwA~hXl<+MMZs*ruAJDIv=5ls;G-OY{Tt?*k@ z8qGBT_0sZP#48<}71y-SDykXY}UsqP`}l&D@vr%Cv%CcZky-!ZkQCTp166gz|0^+&?=zw7Ia6=dv9{$y0Ua6h7#<&sYe-0g#?v#mvU~}bqx7)QtJwQ ze~0*UpBasTk7M1mUg-O=x)5~Y038LivK_2Li6C@2e`0)PzI}O^&Nj$_ z>#X(De?G3i>*#J4gH?+#YB;J^Wq{;Nsia zyW&gYnGY-z1(_r1f`J;EoJY)yD)#_KPHXu}v zvy=x(>+8^KZqMx@mIgSadlV_5aI|EZn9RShL7mEeUmeVwd*fD^;MSpQmFVD>OO)x2nj!XY)g0MBFYxs|}|t!xp?WLGr_OtCydeQ&$9O1A4k zumANI{#E9$k-2NkOxF7~TUeA$if;viZt90Mc_Kbo4fg~l5j^IcJF8@{>w?SNoy)%;o0 zy=HLdqn1+d6%9!g7^UQvPmaVe8WiDDrqE99%C-BK9n-JPD#jj;J zJ!5|hIr1vWcF`e)p^?Vml6_Kj*3~qAe$K0C^To(pBB#wBU+DiXJ-3pEVtV*r$I2b5l|nsz?MlEYx@_>#eIUcx)fTOe>uc zvcdJP=u%r|fDhEtDt0V`HPVFE%Nd?HiLl7H%fI9`<~2L zfmjK@udl2+Kf+$$x0_kuzl;RBIy%w5|E_lr(q{LpqRLZ{nh1Oz&Yu)vMZEbvKmUal zV2F)*K3=WXL52Z%R*L@a3gTLd1lI&S@OG;+Lye~OM!oO|Q^)L3t}Cr3fh+1*P>lmN z%;Zyw-!4Pg_7OC-IAyh8P$u<+kwG6dyv5Ld=&IEd14O!jskAe?XW4Qy#w$O+(g>&2kqX{ zZ}Z;L)Sz={BS~rFt;&Np$xgFJb1BM$|Lyh8jxn3c8{^2h74n$J+QJU?J|oOmrPenK zY`#s-n!sFiP700JS{~&?C&FhShD4Gb4e`;-^u1*!s$29zi9UbAi<96Y&xlphsR!Hh zuP5S)N7+O2eLp7svXfx(-*tGGZ^u_L{XmF-r7$~hay)k$9E(ljSGX??{pp!cO3)Th z#cH~2MW>^bk1ukDOfT{9us&e1sP%ZusZbYhY|4DgR($|QbAxDck$yj9@}hqdGl*@keNdmMCZwrazlS$3&)V0`#ktXJDW!-*b+nk$PsqTVKqG7}Rat>uJqwI+z^1 zUPL1t(YZ(DWTuDFg}{RzlSQ4V)pg6u(lbH2+Fa|-XbH!%iC<-%s+#&9Rv zOHg6%eq!#YN^$$V>9|)+i!wV$%*EZ1HEy|ifa#VchjddnhL1BdN%uP?%T|-Ns(w)J z3>#{t7`_Bs0Q8NErIT1)LFo88P88vosS>2A<2alwi6T9bM6_N|vS2);2?#CKHnMB| z3tnGs(6+*)7&KHh7YFn^zLF_B_MzCp8sH|bn5ZF-1?XnE7}r6_&_1r>kd(;^MoYgw zvopOTf(fQba1|03mkb|o`&eC5x^3MlVlY#pblUCd)2i-axuQ)7Ioz|xGZ$3WuVkG6r3 zgHxAQO}E+FvXJ~bBaEwk(1Oym)S7T3K;I>YkGDvB>Y3x-l0lv@YcM!V4!T9aFyrMBp(8gRC=mH z+QyaiUey7Ut`cI3Eoz6*A6hAkR)RvmlCIN#+({SM(%hSv_qfS!1FE?38OeQV!b925}A+T_SAiO5B(PUNm=> zm*v4Q;a=p;syQ&f8jJvOm)$^(uq%VaK7AS&v_b=|C~(88*I#ohO}905_LiqR1a2e_ zm>7oPp&aj2RGjn5S_uv)LEvb1h4qkf!4Sh}S$iOo?%W5U=Zzf;@OfbAr(ae(O13aS z4lY1c?6^v4P%l3%OJ&09S@cy_(U_NykS-Qs?Zr*N87Mqh@fO6s{@UZoo6qL~{j&VJ z8^1t1Jke1)F%V(CoufRQY8wP>Q(H}7_LA{2xQEi*J zuo2}vS#2PaBd#cz10Ev|v5(UW0@#T^w9r$)BFUOj<^4yZ){REkWjT0~PTz^(MAcA? zr~np7hDTin6%}cBt*hc4%K~f&=+0V7D5?EgA3g;=6=PqTP5C1Be-Xc>`gx;iZw^rqxxBCWo zKgYfJKCC*hwSOCU3V8i_{pBS1Hh3%-W{}230*D1ephLl z8lmN-bkKyV;F&rD0@a4Jl_RJ0lMy};7Guf*dBhL!ZqM@co>LPUk5VvpVRl)S+D zBc~j^3;<%UP?goz1zeiq(R=Ll#0&H>`?T5lzP-Zg{eGh0)cGN?XO&C4EaDRin^ zi~LA^k74w)rj`U-4CWHjVaHS?Y?XohI;v>1s>qlrT%sED&}dBMu5vL5k9D$ zQ}sA;EX#XhOd>780+>N#2VG3@kzNo%%1%Jh8`)F5ouAq1x2S;9B=>O)V`#0<7;d;> zh72q34EGD$CKxQFDrxbVnu;0KZn!v#yGC@VK2Sh^XQPDI_+s|Zb^*59&+n;6VM81Q%1w2{{!||&^_fMDI|K80$l3xU$TJ& z@?|x?^t>S1=$4felMl5i*L2EyRVlja8UXxDYdA`+AMWrMSE^&csby~EmWyHly`4Pe z85!<1b?T=Sq}d}lIUKU~UMiypb(ahuv9FK>iWdglAie(13=OHlRN<^vokGWCiUqXT zcKWOPLj>?w64=O(>Nkk|&=H~u;>lB>gF}S;zA)=I zTwW75Z%Lq#ujeoelK+P2Y z^q5kD!+a!}emnn298S*(g+;g|=kZqV-wA~rWC}aBane0<(3;x$$p}*Y4zVPK?R=R!Ww%*` z3U?xOsO=^oMK5O4jpobMST>V=y|h-naI6t1m#mkqY1NFO{k@cN@i8>Tdg-7K2gzc= z4ce7X_ta^3cKQeaxP$;=`3Ch)YOn$shDS{1d`sEx;b z(zZz#?uof5-I5mT+NKI(Yh%}{=EO#QZS8~wYuS!uKc^%KBW$a^eW_m%(3OKv1{mKV zAv~IWnOVJ=w^cq3SwKdBtL7??Sr1|eTKa|6IiE^q_M|kRqDQgWhG%F9S4<-_+$_c`IeHj%9p5$L8M_DP+eLIyf9eTHdt# zi-Kq^PObm@7~jqyM8LmM{f0bt!|8(zQr6^{m0Z=>eihEY90t^9+VgY6m&t!vyr1L( zd8*t7V5A`n**I9qQ3?cP(IgCd?LDlv~ys+hf#^MuE@wGGGNo3ZJ*&vFD z_cIMn6t~TJ`Jj7^0Jqv0k%OyzepSVdvUt9p7x@4p#2l}6fU}HzmGF({hzZ5GYT8?- zx5ycLe#8bbNffyM$XEt`%)jDVVpD659hV?D_M}xdy=KO-3{6Cg)is3WtG#+CPbT%w z>3|@BD8{v|C&sjS+0pSA5zn+qM|U{ zs<$0jFH4WGR!Voje?4~z)fh0FdTuO@bnQqR1xx+HO&kL#!dOE#!h_sdnUh{9V}9L> z1KUXuvosY#ZO#lfYnwzqQh=+oq#FFL{=6~&R$&=9{uVH$Yb z#`Iy@d6uVA;q;l}bEQ+5!g);Y^Q7jp{k@xS!#{y(eTqBvHK7vf(q)uxIGoq>4LL=wwhU;}6|MRwD|S^L^_dT5yjeo} z^#_P=GI7#n6e(c?vb|46mZ~LzMC>19NhL6n!QXTPyIKoQf}p-pMYrMKo=h<{&m&I3 zEk^mur2ANK-r0V3scBqN#2J!%EXuNQ6&tp9W~86_yp1!u*VYe$o2Ipe_U(!VRIt=x zJ}B^2)lNYrgzKP7_^G_LpWB6M#aksJ!+OrFq5M^g?jC~!9&(x2(Pl*qtI$D7AewNc z4+E7c9HikyQsIR`y65xky>;8>gE&)FT7K$Xqo<779g~nj3WA}^r!txX`~&*x2INt6 zLBFHsp)LfZp+$t3zyT%iJhPc>OpixzkQN>=(v9L?V6pmcIamrO>L!O@sZ;+Zg8c|f zkdx4aKLrTF&yo|Bic#8^eUHn?Dck>U`+Q$U>UPoU5*8-ZJGzZ_j?Tbgk;DSkD;-t5B**>d7?(N@uW+T0_h$`6+NJb(E7Eq;`R#>{6a@xNsT#lQ;OjQi^`h zySsB%IJKPaZvd`Q-{qn2&QLUPr^7bA>BF{F=++J%@p|&@yNJE1!Sy2>?=Uiv>3RgV5^>ce;5uuAZ&uk@;$bR+vdErkXfS(+|yaN9DJ+M85VP zTD2|(_Z3IjZmDR5aZdXU&c)pn(W~$&(BmN63w7XYQlcJS(owcvma|&G_4HF@V_mt( zWM7I~^FmM|N_Wbo7<-{M%=coQuyiL=!T{6P4}k&meyB$y{ww1Ar@~B`d3N0C1{)At z-}x1ygj$-)`A*RpxNEf31)%br^2OJUgo#JPc`Rdy@-1@xax*q0-#-40*xniuAnkc_ za_|(A_7rLoO+gwt`BWDxU1ttDkd`l^l4omu@e9@QBa>@3cb$lFP>ei8_AEnZ5&M^A zcZMwY=%;ZlE8GOlv2F&S&(a|OOZOM2Afdz)My}7dhy`H?leJ5+ziIZ^6sR!r0hl zGC~&6Z_1E=;k%{$16I=vD4b@_aurp!F(Bpi&wI;a^Azeq^rg2&Db}_*ofpT zhg1a17ThT^jBS1-xoD$uC4K@xY6}UjP$_o-pCFyIAp^aX9lJHI>?hOG9p-Kq(E;d} zA!b*u#dZ2Zyf(PRtNg5Ru8EJCv9u=;8%Hi@Bc*OL56Sk{WrqWARRrlQNR4IW2E9~e zfgq)#`5EOz>x^}CPRdo=7+PbP zCmFco^rP9L5+FfiRo-R(x(i$$TRUd4LJ4ds%ux@#9Z<5QM zUoPqFI}lQ*qXXq*F3mU#MPdWS=)t3Smmd>(bI-lxT7JUYJ2G5h8sffkEtL9PQsCPA z=V!bCm*I-faG1i8(=wBTMzHFT!PR;5aeQ^o6}`-a`inaerDs{BO|t|>T+5%}QLxz#`uD;u^4jf_ zGb8pMDWm+@)bFr9Uz$d~%~eD5HAL* zk2rv@>zJ(0>0BlzVBogMb~l-(X+R(SqHz$L*qCGN2|(wX&6kw4$toynX%9)({h7Zt z1G#kB&4!5{ncVX??UcHifYuhjN-~9>_ie@#C`OP`XF3;@ zx&CIFvCk}ba4T_dLT(yNB5|H2aoo!EFYoKOx32q?)a{wTlWTS=#~Na@D0b{`Zt?>^J@8&&Gu%t9 zVvYNi{6I$-5F;a~?B)&!+5m>vKgRaJInneAoByKMkpH;+VS@LFT&_};fV@5czW%8E z7~!N2OW;k0D101E!`w=HC}Er%o7xT+lf0YVIxxKsM&5vS>xv#1 z?L%}%Fh|&Vb*Jih5R#%nTt!1$UVHMbh9mnFPl{b5vn>pNZ=1z0 zwRbOYFE*Lz8gzfkHa$n5S(e&CV{klZcL^L}Gr!kTc=yXl%gd>+o}%^_rh5sLHUxH%x0` zlnYRF5PRJ~3k(zwYEpQmF{QkZpVJ6R^u8K*$B!n3^W~MiY&joB)ZCZB-IemH&|^@z zNm&)LTTe$}M;?mK-FuMQCE#U3HF)~UqWHRWvASAkvkHAoc>4ItQuq!C(cu=&8Gq)B zJLOB=_c3wyvAwoMTG^sTYCRc`a@1J6kp@&aF(KUG3?W>5*OifpW*e&7x_5E1VTotAd=tV@bn z{}ik7tbCcH^0`Qev3%{4fukH#*-FoaO&+?za~7m@sCji2hxHV{wn=r$4xIK7GQ*!e z)ADqr%;S5UgZSOzIEkC~&Yra#s`<9TyNIPlWFEkP%#Kr#q1&Amqov}O9WL1S^1%tb zryw-Jm++%tO=NgU6{4NLKb*IDM<8*y}QP43jUy~iB z_^lun!)o+$@!Ac0(rJ<0E46xUP?Mx--V8=<%04-clJPiLQrus_23r=Cix2Pk_=*U9 zKA!N8<%n$$)|u++K9mf|4TSNh1=ucQ?DLc!N`Aq;} z3Qg$89Sy#MD&ejXo5Wk-EDd&RrSb*0!zs6X7u|nTO?7NW#l@azPfv-Tf2w5X56_jP@z%XtSD1 z;DmjbG|BL+o>;8}^c&TtJxe3r2YUOf1pRaQ-}=}%&jmM&g$pnn(;@;V%%X#f``V4H z4D1`?lFyC8t}N*=b=GMo567EAZY1`*-2|IuFvzUm04fTd0!rhK~wu z6FOUj5LYz#k?!4DT#tgsjC-;ut8K3{sFxh(Ygz0Unv}6`<~;h0(i0^88v&dZbB)>GTrJU+K~9mfpdXZdxV3UEho-%u!0 zmzvQw)htH0zQBEniEMlyqu0C;09}}6mG&JJ#+nBy35;sV$&C#uCO(DbXiqcZo0mkBOFN0&c}NBVgo zgP8)TSx7R$@#Hl4_RwF041d=wLl5`VKcgKNeM(2lTM_Wb=vo|Q3?=2d(!M|(kRzRx zG`8;Y7o$;y8P4BSl8phMEVkODEqnHj`=H@+ zQzX`gAfJ1>`YaJHf*9Pz(uGO1Yx~o?hcbesL5%Aq)~QMH*f5L*!wd;ju9L~my`D4G=@)PrIe3>wDtpi3!{=^c{lrMbJb2=tN z{fg6eV()DikE8d*(^hFDpT{G;zP3?YwGE=*Xprp-5A9pt(DlBwI#BBTxEgxapYT^s zQxdb4+!)wTnEU=8oJ7-^t9<(8&Hj zxde5!7#uca@8uf8H98ubn56{FcimhJO!nz+b~OS6LSI5ha+co=<}57I=EeuT-(D52 zEbsGrWak(C?y2$(5l(ey6+~s^;r4Z@!t%`O%KDP^mTfBU;YL`yKW76q&w(PmOFzvG zpJ?KhCq*++=-LI14lObnCP*P>e$ZIXAg{jZTpiqcK;h8U3%(O;e~CD6fXR{qJ(>W0<5B zBweVPB)1JE%Q1ola>f|6iD#H(nu!M!`TAQJ%t0rjgCZ1RIbZ>(9l~8uL?wCc{0Qfy z*(DVD_;ogdgTZ=VAB|$l?oo+$qxizYoxekQ{t0kgqpsYd>de#Yz;A;Uf|F)(;zQ0* zZW0wq|JKZo9P^$UOIe7mr^rOz1Z}ZHVN<1YrXDgNs+Vf#QEo^~*>Xldoh2&<25HCQ zF{nLCTlJG}$-SZLfFX*TsG#EWw9+={n)t}HUro957(qNvy5;JW+*N65@J|Gc(KJFS zXC}!_>a+ihB7aPw_}@X&K~X(xDErmho$UB#BX(D+B-@JH+6_GK5aRLf5ItNZ&SZK_ zhM?JpZrw*+PTKLa85*v)M{YKp#U`oM@-tdPk{rCPHjrrcp?*hZm_Lp(rVo?Oj%_!t zBUktcnu`+8lfUW(;V!kvhIVUrq~1Is`g+9_vxnc!B5LFmr} z09dA$h;(1Kk5fHcLY)CnzD)pUuq+`*5<)i^pSDR{WcC|Z5K4AiesorH{K2H%h}2<; zNo9MDxxfA_h|?-Po!P1@uD}F$c^h*>;8GixNx|_|tjOG`c?YABc_TLj?VQd(T2GfQ zMS7m~)J1u63o??3ESiNs4l}HA&X$7Nr)Yf~E+t1BK@Pg{h82;=0}Ye!OC*VnMaXEv zQF?|W~weMV@;!;n{P{wCJNCg3TZkJW&i&YNz?BVt3BWM2t9cnbeWseDDVj)>vw*?;lT~rB%qPv|b^Jl$!b`WvKpp7C+J@i1~+wd=)5%yzKi(o27uk ziWj`To{Q#4rbFNY4H5zY^lKK8Zgn&J#{~r^5-F)sA2PYOa1EcL?P#n{DUY{zTY~h2 zrQZM%SbXpQ2&uo$G2tLLCpU;L+`8AeL!h?E^xM^9R4b@EgfmhqHwIPCoQUUlzqT`- z8Z;;yf%SJ*m&fDoXGPsW6lWg4k^(_(RYOvL$^y%bgL3gOP8b2Jw#+Y#^EyTaeUQc| zsoDS*C2LsozpH)el=Lic-OHePE(14o@7Kds zN%oE2Y$)+WQqW-1g3^{6Jf^-IsCWM+@DqAkOg}P`MSQEX?U;p@z8Fqa&-w;UWaoE& z@;R{QHS_7XTF5CXwQDHP!$-+BYFR}&+xjf#X9T^~Po^`&ONNdiT4tsmcpunV;Z1B9 z2t|)(2~54d8-JG11P0=OIEC63CkHc6J=ZVM75zK`%Vp0PDjo}# z8F*^4ZR=3-wYV^KR#_WI(s<6(qFJ5n@HDkAmoVY^OY=hg`LV|8D|Y9kb&p$l0z5!Z z%vBHXp9;(ZH+j?WvX=Bb8#y|I5Dl_y#0i4jy0W@0eVASsan>%noqaokm971P^r5f< zRV~)==2gR)ir+93UB=0rkf&9fVj8LL;jA2w!^?EA)W)JdTEkUQAcs~HrdTbPEeh`D z3$Z1@)K*y#@H;1IH)>4p;91+b_urtLviveOz*_KZIZ>4IxxHA3Zz}!ST;?r#JW)Go zp~NnYo%%8SRFRLVO;PC6P74v-OAesCia8;TD-_YCDo9u2OxX#-88aR8;4B z=&20k{mopQofylZar;-nQ099ss2gfkOGnq39}C-s$5fNT0*S?96`4`T5 zL@I{sWH%Seug=DWKeGsgIWdShfuU1Sk-bK9G@<~6V9z(AUv!gEHy5dQm{QBu87S=J zgRv8_Cy6EL{ENfoz1SgQ-Y<|OUZSEn$$dK*{_6azAT}VXHyr#JfPM-c7hW$@*S7Qa z`ENy4M<+hve$Mdfw|bM#0pmZf&6Cl=RVOdRwPnPN)hR(gOz-|R!v}`MU(gn&ibIYz( zbytPK5s!((qop2ci?K^LMuc??-j?;7(`=+?b>2!Hcbo+SC>Cl_NDXLtzw_LxJuFxL znhW>A=su(1jt)X7&vDSTa^kLLVRAY+>}UqN{Co z@)XHYf3*e&Q6cPO+=PB2fC3ynAudko(Q#cUdL7G0L5N`>AsJL+t&$P6(ZokP{o{1z zEQZH*9>;JCB3RqIYYzA_D`%cLF=>fcx#JPO=5 z2z(oD%Zq0TmH5E<`XEUIThtu;z`fOMkNobOb;!M&RAIVxHx~|E*ywb;=8DiM(xvEJ`#=$f~#s1G&X8))+6y7B7*9G1BxC6k9Ivwt^w;8ujKB?lz`t@gdsZ zU_V3QXa)xMxAz;c$xf?K9vlwR?k8+H<0}e{4@xhHa zf4|C-ugWZ!7imz^lag@eU!(Gh9M!{&4<%qq;FM=%2*cGGASyYZMU6-k=Ok zMpEv>*W&`^DuW8P?Gk8>2Bw4~1&hG7Zr&*IS^wuOo%;v)GJ4Y zivSTFsMBC;qv_Kw*IOFboGTNpdR$0zs94|{Oi9!S37&Ln))UeFhJI9TvMiz;!yZ~H zpTtpW7@^y)RYAz9Gpef2K+8LIh@;g*DJQa!fCB+XUlE#5kPkX7Mvv0m3f2gLLo;s! zt)B1HEPeUHp}{(d&^n%yXfnOSPJ%WWsFA(KDRZaPZSBQPl?GGSq1dcljl}Z;pL1}f zCYd-O&5=YDA;p6$X#+^>n&CN|Hgs0|1zl_aJEI40_)R`o2Cf=vA_^6fpgoBgb1a^I zmXV=+br!~|(o_)Tz)ao8pRe0z=LX~2VOH{wcWL?c&y%r#FxHT)g)_809IjG*ZARQL zf4m#?%C&fPz^e3^OEwIP_oY5ee$3oP+S9rwFDo;CZ2~>O*(~`u@jDj7_WHtbKk?As zq=10Xu5JM+TNjgDIdk(%|wps4L|^8% zffT>^B?TGz1^$EYkNU)=%i)qNKkJ=a%*wRT3;rt}ix7eYLSj z%gj|K;{ek*t5QxG>D=>_xz zRNc-vqhrdy*GezT+{UHK_jkKhHF zmi1_IVm-hVbwWrwX#o|I0l`1)e+8t&9`c1St@}$PDlZ^UrI=gh-;mYGc(q8EHP!y& zD%-{EpOt9zlPG=I1jNvN-xX9We@{(j9}byjFvs4FbSokPwKM<1{Y>KH&kah$D|#O0 z_2~o64eM);4NAY8ra)nafk?V?MLQgS|CgLM?b6s@;(jJR8qGPKRpckBNs+Ad3jQ5 zl-1@cYHk4WeHXd9TQG-XCk-N}j#pefb+t-;^6sv#Pgg=79-qye0^RZ8lE@Y4m}i@L zWa(`xqb+8l!vio@w2UB)r3&YEZGoZT#9SFsVtZBTnKu{7+tNRj^JddKFLHU+Mh}I! z3Dq=ZOQ2|G33nxJ1iI8TCr(uoeb}?rl>w0bGfnFF|FtDc}f4#&`_54b`=NL4+KIB%D|NMCKzg z3gpdL!>}D9x7q|Wf-@1F<%ZyMs6~b@zy=>@OGAG$LqRG;3m)k8eA0Knr${A1emH|;UXEghCh`v}L5EdI;Uy zqG3z5C5T}A4}+&2n=t`Y$X%kILe!z-Zy3(Ta}>)E7PnOiuDM$zP}? zU^GNq6*Q?OkpBd4bbU9l1%)*F$Cmd(kQ-=ggXhbT6J6a({Y5IVid@$uk>GE_wH4qo znVIY8Lo};J7E{t~>K)7C1)S9wI?OspbtD0u*lEnEIe815ynA^gdYP~!)-5HFMjx`7djpoW0NqRe; zpU;oK%vyMJg;$txlRU4HsNWV1iCH%fEU10>sF5oRPY8qE3yW3K#TkNpHYkLY9VQ^W zx#!`+*z(j6;z0o{1Z*DuOyEHsK|8Zp8D*7z3#=SK^2y2h4@_chu`CPIvLm~iVzh}E zsC;o!b;IpK#!uTBoU~|eEYuSY_A1?;erH2v*u9!EObL4)C(>W1^!K30g%hvm{TrB% z)6>z%BxSu?YpW;H6Ne!4?Xdbk>K!u&Gf4wc>KT6;D_SlHF=^lKOw%jmFUe&DD)Pjl zJOsmlJXeFjU9*y?Aspf zzc**xLyX5gGv*$svQz;;D>EuO`NFkMYZDeTc7P;s8IB8?c0*8Ef;0U%3`U*~KZDBs zyoFoyM%|e~F1Pe$E`T)bi$?|q7D{nk^r=61qpP+el%D`iyDhMPiX%+p@JWagOFdO` zJQh!}A6|9_t4HR}ChpV5B^)Gok(0(S3Hb8tuEx4k@?U8#X_+i0VsmYWynf)54vUIi z1UkYxCxq`M&MT>g2|Mm%AinDglz~ZzZ8i@B4{8;{#bHX!z#xRYK}PgOpD07y;Pg|5 z`1JY9ljP}Q))QzWyB-|rYseI%Bth*WM@vZ6{M{r$ zEa*w)iyX9;V6E#8qGht-YS{>FF4m7;h}taqFSJ=^0j^j8i{X0)<|=UZaL7?(QOEvq zU!*+8f;jUlapIRw;5=XcmJRCTSBtfb0lB;3ntWqOGw4vh-Yku=#T`2HPCG+Yx8Q)hb&x?cMbZ!r-mW_^~N2J!o=mJ#Q;~W5-)VXJx*!g*G zfXKy`twYkC0HrUggZ>~3@q?lKww}G)ub0n=4MQwtI=qE9TiyxYCO_9Ji~ppJ4-roM zS83=#*E`Nf?YFZ{Z4&JrSJMVV4~^QuNGa0mqF4^_D1Pn90X^f57S|z@_1^8q)GvyK z{ZIQEC%%K}H6iEUj=UvB0`F~VP^ff^@7OijJ3CVup*%dzG}aDmf+LptwC5aTQx3Ki zL;4d0(Tzu!HXT;psrc3cT4SO&#HnxY&;J-?yt;T!-@e7{zbh`s=7xPM_mQ*3E9U!7}Xx2V>^cqTvZhsX3&tJ49x&r@d-i3OiypZ8D@=4(p%hNy^ zy#-CgPE2|zd$Zx1)Qar@(ofyoiT_GM{OT-dBnBO}%KgP~tNkTSAb=!?tr`hsIY7cy z>$2wVi_^IUv^iK+aHE>qxsqFAGU{G7AK@X=A}f5^0<*h8Zvj`#Nua*3Pxz~F1LGZ&#imP?KOI?MQvo~SnW8j`1IdV)CDb30c$M5YzrS-Jvx;3)*1uZt z5PR8x54)C~@;h(a6Zy^H4=Qi)*Gq%_Wo+(O8`_H#fZ^E4z|gO6D|FYFw|qPf$Vd|!PVGs6cb-AyFWp4(EPxIvUVnl1I8*#=z@g?DhNwhy#K12Mhc$v8lPv z(?)*Zy12qy^+c>fU`koLC*XM8VtDhLoyB$eqTpP5M&_d0r36afY{WAO_Y){1GmEp( zRAL`xqrz`HpKrbGn7#XTIPEu@x{=kK6;LVJx*UQFc*5~woz(SISl=IP5U4pSv!#~= zwtV2}5P*H7bzZ)JfQKXVWwujG2&s8)Ywdu&^us*R_`x?zf!Ah9L{@y)oqoUMZ~gQ7 zN-$n0DXOkn_|RH(;@_Lb?nnW^QCf8r!GlWF;7SruqH=f@kK}2CRO~)f;eVbgy1PGJ zCLEhQfh@9PZ`m3o`F>C4ILsQcv*aSdqR*Oix)5SNS>bXrP2Y5H6bu+6IyuM@JBGk# zFt?sgZ5;^uOJ0^-b@Qfu;QV*;`9B-I*qQ|t92Ni|Yx94HrvKkYZ)R=spGQC*3!8n> z`0Y=sH{1=i^f>0^`;`C$=-73`06?2%?BoUQ4wI>br8uraq10b*l2MM#BJoF3$OJR9 zv%PT$$|<89Zl%@Gi(YuAj5Nd9+|*}ea)-{X)S+JJ&=;%8&#I`0x~P%(ISY%&Zk}8J;GiP`)=mt%BxiIMb$QwAPTZsKh2g$c*$~)@uwL(?+M^}0A4A#X= z-FUgaFBgXKTFc1nx!d@IB2~ISTmG_Xg`pKnC+cd^W#P6jO(S|PdVtDRKX|0WT zk;H{-$cVuHme?hK5lw`#D>9ZphYX$H5t2nJ?kiMVp zyZ{(nLB#%e+s$F>c_tDZRlo~F~kkx;6^zZ=O6dtNBj{-x?dsn(@N`mmaWH^Y~DnH2buznHw*}55+ zWNuFn|M=B8kA*ppf_Jm9;5Hz4pNNJ45))QV4-ow(F&%Q07COJL$Cd`HpSWD!aH)Yl z;jE8OsUGgm9M_8Q0^Xb9Ey~NJ8gRFC^go!8Rp*DGPxnEh=0kl3RmBIg7#BjUKn-R- z92fm6!@0dGqlM9UVihsCHf&VAhv?PZ#QB)*6cP*{fViN6e^$kWW7&xE=pkV%H}@c^ zp-3`vKb8m#D1wZc_KtG*XDjGFFYDu+EvX(NWA&xX)dcIP_-QV)U z{|P&rV{CB|4ouu^28@fLgtuJZzk-lI)umZK9h}1jir_z+ zK7SI?30J3x$sWBxo8j~kk3Zu-DL2qAd#T$^ zdZG^##KDG@xRs=y>r;*z&`ckas3kGF6qP96_<{sLY49Cj5i_BpT4+VMn`SUWeAlmJ zO)Eo-ReN9`?uuOHN|{(-FH8V77%wtTbmaq}-FQt;5sAE@0>Mt;_(x~)d@w%#X3xeE z?`;g2Aea$7chXDpC7hX8Bg&(@1qaps^lq}khU|9QeWUcE#{_&Ff;72m%B4qIZ(Oxp zM1+`iX(4D|8@%!mmRvss2UW}>3q-`yVyM!{GNyT+h1u~4@P1?vJF}8OVGTGy&n;7J zc+$p+6OT7o2?}x=qkCwwnWi6Fl*E12o(+L8IFK7+{d*tdOBlaV(6T<33MNfP^Pnh+ zT|z6~JQVBjw5&n|M@if%uHmYEFi$|@7olMR6u`cEt@zx%Q8?O&aU>8j2p;h~rp7hw z3F^*tnayOD*qGQnZjn$tjAZ6&u7LCiEkgT{xSGSN&%!uwk54InZk6OX=Tiw zI3rah@pKM~8TEH;MsXgH1b|U;5HiD@ z!AsnbE9bI$b(;lh7PkI}a16sQb8VKvfPlJ@QvXp=y-LUZINM!Vtk5`j$lD}yXm_Sv zZI$Dv^PtbS)5pgZ!P0uiqPL&!7tK!ytD4>+nD?G_V)|mH*$-@y1CIUdmSGV}PGYVy zjbgV(%HPyNe?CNA{<){DUgSzw`m^@i6ZyMjxOcorW-1K>Qz7c35m3%t5J{1CFVbg4 zSBk2bvwu9dCq)f5&j~bvqsSG<%po^&uZ{eMskO&C<{T5ji)Y6Fcj$Ss2THR7FLa=R zIp{1f=jFg2FT&_$o)uaJSa>`>}wM&fiJ03T09u9#>o#`Oq^mj5yMF-OC zltZz!bE~?kiwE#UQV&+pg!*qpDTmZ60-bj!KEFU3le!=w1>f<$EKa1$9}lmwP6-I) z>B#f0CSncD(TbuLxitdV9Lh?)v}?Qyt4)+?}@ivoz}n8tSmxAl7q1aS?(5s9QKY%D%w}XG8kq4&K4pC;A&8b z8`Ih+%)OT9YBt7e7Cjm#wT|y?YS%>@ny=_fYdYE0X`~CXa`o`fmeiI_gitE5-{e(f--G{8$Q$;e6XfWJ zr+#6j!?(9H0PjTV`C@HY)%{$VOu?G@=kOkAn`lx{j|`;bp5)BpFj;| zOD(az1Q`XL`zs{@E;6{^c3Z5%-FW zBarTXE>pe0{*6u+>#iyFSD&=M`adNa*+7IYqI|lc4eE)#VNPWvFApo__G$~2suuNV zr@Lk|pM0wvT0WQSW<9?*znZ{$zH$lT{+p`KEs=U%bzbA$nG_g8PeTu|Xu-cT& z1@8Fz!}8K672v|>h59o1 z6v0md8@fp2weXBSbe3R-8XTe|C77=#DY994YVOE8Qfs+Dliu%l_bc!|)zvyW)CAxV0-E-#Z+ca? zC|HlEHTd(Jh$GtKytt|gCx^Btq?r|%fh1{I7Q)CVQzg|t#poeb@3*JpIqvW%@misc z!MfnI?sj=LTjIs)A;YS!tX&tyU2)>tof#1S;I%6kl3%^sjLUVe;D z~vdL{O%ltfoUx{8Z0y<)g$<63aS zLYUHNZ}0+oB_OgvPc7PH9H@1WYOMB8`MM$Y+XA2}&OW8Fu3IuMC6VDp%D6Dz4Zs3l zMxOcpI0Z`QFmcUow08}9t;o_n=ry43iNm4}p&c5eEYdBTJNTAT{~;W5(NQrc=9j;7 zBXrD)1JZvTf!GEQ$j1gIedm}$%~UInRdnKWaBq)M8c#^i*VG19YCxS7t+R$z<1k3) zvErQQZ+GF@<*~>*9Zm>q4aq-NNICZxpoH=+U?j%h0q5 zE!YRS6<35I?j3IHb#0VvztDfU+W*N>3J>j{Z=9JD)43lP&|dt>C|Kk!;JLd>&gH3Q0u- zT7d%RN@E!=Ef(M{)8X9NF|c1O&IHV(q8_RsAiqs7BQvsQV-6OsE8pXZ;R*s^)jG&} zH^N+__x}PeN%v4UM<~#nGWWu&6c*#VHj7lz!Csz1stOy1_~qRTo{WF7=BKBTK63oA zMeL+gzKtI%3LN9SIl=_Y*kYYdL*Dw$iS94+bpyHik8Y}yE)Sg^-AJ97(DN%991UQr z7gvN!st!tL5{EDh!>eb9xKk3dnJep{|siTYQ9m78!z z5u9~DHg&(co3^aJTNN|Nq#)4=oL#bv|F`>N{&@%T&u@#G8$AD;3g}Z%UOJ&sF+zhd z4x0MuRNGj$uhGv#!EYU$Q*nG&Zv(47ArySbP0M&wxQ*NFEz2`^jGQQD4|j=CSE|`Wb8 z-BAOieS^7 zA6VOf_jo1s?ZkYi)F>MG8EHkt2eaVLWn{DiT$jG-YZ>vh;8(t-$zy~DnyjQKH*@}c zpkng6yGQlb6}F9h;x1k{!j)p|QK?|P#$pm-XQAH4hG9&+sn)$O_kIrz|gJ2B#zw~+9 z;-Q|o^qGPzt?ZVkq{%n>#F8PP2TSTmDk5DH}2SNFxK8*+cH`0?-zVhcFQ?TqF) z92DvIImoF^E|OgXy(9^&?;ecWX;!7khdm(tU1yM^Nt|W6ChI$fRvuc#SZDL%Z|ZUojD4z5bF!4&mXeQODrpu#F*X6^znR9{Ed=C`7p zAVHV~Z9K({4zrosS@?~n`tf_uZrtt>0(?4IUX|H)SJBb?~} zvd_ihkP%Pif~jyFw!Z3Kq2yny0u6M?i2I{Xo}=bBb%?A{y1@G`{NKd-Q@xYcW{_C- z^TbC(TyytrpWIgV zron1imzLCpEI(8Nw*WPYfn!pf&wBBWbd z0gAxoo`A?Z0JT7>jvGb!9L=>-@B(sql>_6(#3icMyTN;#m#i=v44bCHEQIP9*w@&< zQ9r@@EMwLfTHnZZS|L@2$RXPP^wTA+syQaU-;n;x-?xqOh%l6WwcFwB=6~=fs7EH; zY8{xxIz1C@T(m`=MWx*}#|^`(#Q#wD$bGxoo0iusZfd~2w-^T~$I50c0r?YbxUyw{ajy=Q@ z#v;kI(&Os(h8N8i%?+-c(?apYJ#toMKUWHpk#)- zw$8Ch5MbM~W!tuG+qSFAwr$(CZQHi1%eHMy&zG5ZA0ko*2*7^=Sow%08Mo*kW;FCL`$3Xm(}w&btSrzBxR|pL%2h7hXo3hza zl0u++K~{11D>nZDc~aU|JxS0l+!JcA{i1qw&bafrbbnHbDk|95=x$bK_*i!U?m;s2 zlifnI&MXizIoaM0AR4WBRw4^S4ae6nx8)CdN$3TXeIB97<*)C>okRx@olGmgK18zr z`>*7;gn((%YW}7SqJ&?mUtF^rgaBuR?{szcMO^R*^RpDl!6+zylF47k{c`|N?a}c_ z%~E64+dpa2^tDgs^J5&6TcWu%fN39#{59;TL#rbC69EJI@o<1)B1!?kO-2DLf(Cl%m&c3Y~WOqZPn zeKJ51mCy`*Wugf?do_l#Y}ga~X%z}audI;H(HE&fVj6^1LECdkbmp5ZHSQyOU&gUQ z1V6)X)d6HP1$M)26bY~CAE9^O4lg;g;(T+J+xnrjF7I~w*xRc?iE?J?f`N^}F3zoT zf@27Qm$SBfhqVF)1(ibm2JBkuyZt(AGCB0nE9k^ueV9;UfNitE?SQYxxoLmh*Wa&% zuw&q+#Q5T4#m!s)eZf@Uu;#4T)GG1oOf$*&0mBdA4Wm?c-pJlq&QZq7{$mR2WiA;C zwov`(ShALCyjRILyMmAANCzBO0r$Y)_kP}TDc2yV5Yx?%y*D3>-C+zx1D)MO*Ye>d zaI?7|(APPxhfS*OQkd?#7{Lm@$aX%i0sq|v^a2kdU~~&+9Xfoo^h)h(UE|Fuh0|gQ z5#=_{*wR^d|VkAU@^`3_9cqqkSg9+R}e2T#+Q&ih7^Nd}L^>bFijjuPkb+ znZKY4uA&lNqWW3&XM8MW>#^dMX2@N8CF&FvhKLC>O)3LVz>t))lQv&W<6Lb6*od{1 zUl>(WJxE9AOrCcAj)mrVT9F!eTJcf8T;LIZs2{L};T5!j7SfVClu>?PIJka;&i!Ha zZAy$|F<5dHRXNpB`G9beF2o(3_iQmBZtm(G+@Oiu7F4n5OO%Nn>zUbg8(>CYtUV>2 z>75TE>j(TlQxtoXk-GyB00440004sj#zneVJ6kyXe}!y}x^&zo8$$1=Is$faBGO2M zBg>xdA{fLx8E9rMj&MHdq$y2;R>I_l&Brb`juJ|VdE5FF-Rb?z<)OkG5Zxy z%-5vEW|YwDvNL_itflUJWdwu`=|!rbhS{lXH}2{ot|}1Ut1o#OU~)Q`bPMu1Y8fdT@V9RR zn3n$30xPuH>H0A1wG=e2(s#*K(mN76ZZ)EA-~*wt-2lzU;49Z%a7TnD2Gv%SAqW}8 z6EV)Nlm<9A;^G|ERPU3mkZ*pbf!$NlEX?Efp(d^u z7}Bt%_wIZ3(7e{fPfo$4vZXajW~P`8Ajhxr@C6)_VV0!C^{191qnP*y^&Ne{_Bnq) z!0g~O;P3`P62;%FJ`#zeW@M2TeZ+N#$zR^nn0~Ut51Hx#rX}l7i`Uy95W;rqZ~V zVIpE8^MZQeDB@fQ< z^}ny2+B=#UTNpXpIqF#%n3-9d{7ai_c&{BdITC-LQ8-FUX<|%baxykI978H5NoK7p z4dcjXcb01=N9K`?nF}OBioY1w-yP^3;N9gNw`1zSASR_;bt=+HuwulD7S3#0_Q(x& zX&)ZA)|;n-`ruMsT97O{x0eI4Z-KRs+B}lfP4wP7XQ$A5 zQy}9`8^wxGGQOw~k&<8fGU&TbS(rIzd0^GuBr4wLUvGY4H>6 zA7k~>BCbDu_2uEg2cO@3@#OhYkqlO!TM$HvF#&w1D1vRL#@H05m?*DI6?O{aTrVP# zN>gM`xzL>k_iEOmw3S_I)m~C>Yf~OiU#`-I~?O?ezFKxe_5Y ztbg|Xe&_L0aJ_@}FF6tJ9Ts?e$LPjQQ||@;Ekk=7p?3dvV8ED;snx^3E+F zkp=gM@YMw_u3ZW5>h7svBWNW3vbr(mMZAX1H)l>hbOe<76YjEd6fS7of&wnCEQ(O5 zJx)4}Mo=cZq(~ffEYh;?%V!1~E%y2d77m_lxi+}yqGtBjrL{K#UEReuhsqI>fU`R9C&K2(vn9%x6H=JXbB`j)Ey9Un)3 z%>n?HpHsi@7U059-#;4=Vr&;TeGCNPaVLj>#ir(1ps74HUtBtbSk|FY=zUFfUQZ(i ztUk!PV$8=~&6^i(msR*a2jjiTeRx!#$Fzlvug}JXyHk;;TgrbHKh=owv$lgIT9`50 zxB9^m1ye2tSk>H3C6<-8G$dceyN%#0AyY4C1)EYpi@R8NL0xe&goGd&J%$&PLF-1g zW2d>!B$V2Esq6>~l6psph82Uas51Aa+KtTwZU|GESL;>qI;8tHnXb}fqq?cFti6V> zB$Cr6W%27WOjwNvinO4FOvmK)yql_MgHGIcArg&V(`2Ts7A(fk_UIW(ZHNbg9h z00NI4Xs{c&DJg=$)gwEN$3Wz(EHvIlo|b9Uld>!91(oPFi;}nINeD@c)fG$8<_=)@ z_^=(lL=t6|1_290b4`BnIJT1K#*dZ6IwaFVzfi6&@zvAc7Mh;a&}aMTP^Ls;D^|1e zfZ$93eiH3y6++tX%G%I=y(Zo-zgtIwY|Zp=%-$l!j5bHm6b;^$P+ZbP27t`#4*6i& zm9YIY2IPsctU`5fNF&_S8Hmj99Y)Y z{SrVCE`GDtADEcf{o{re#nudO!5Z(`PEpfE3;^f>n&EW@Js5f&Npm6~|M$^;uEdy) zg%q%dD^#&)u{s_UCnh`9giC)1#>9ESd*}8GQ>3QX9$zwT89%@NL>pUyF>na}x4i+P z<36zb9mC_EcmJP6)P_=w$4raX(2OpOb2pIr(1ZX5Ez+t1y}ne$k-)fLwGpJOYuDC? zb3XmR9d=FpjLJ>2i`XUSPm=4L!Z_La-t&&7qb)YpHks+`rA9X;tVFkr_?{()v5EFa zm9NT(_D7a22c#PY&^}k7LP{5Cq@t^+ghmfW5GKU3N#_x@h=(Je9z|2CH(3x4;C5*W zbd#0Ecu(7U;D9*|6 z5ag232xNVw-P|bQ(9m8`ca*IFq2e+*2%8VGd9FK1Cedut22<;T%6FU%W@OAL7Qbzv zYD{R>4+INxV}ml* zrC~Nh4oXE`PJcVAIsDk26K8e{pv-r(QqoRIGB|xxdYMr$o)}BQs0e}IvV^<^?RHx` zu3VGXu&v(KS#%{;Fj5Wv^b7BhZ9w%FiwQJ(@`LA1^u9moqVKfn+pyI=%_L)Jhwv!i z@0?br?D$y7f78>7;mDs9Cat1L{ z_vc|aIT$_3L2xEbSU0>XLb2M!dcWR>()blk29o-7{obAIpq8r}Q|YY$Ey@LG36tPH zM~y_Yu=E|%u^Z{9$pVCJMkahAU(jT@eUlL+%JmDR&9BO`9CKM64KhJf2?DyG8D+8+ zj$piisNOHH?l`Jl*3Eb<#w4(a<=W?(8S)B9?JM=mdaO_zZ^d zb?_6YO`Y{g$$CMIN=$Bvv0};sEBS*+Lj%)1Iud~K>BNiD{)Q~%Q1a<%$r@BN42^}_ z&Rb|`FIbE0XVq@*E-U&egTdNaW(4l^JWKpbD&-JiBe9yRx@!TZDO5IphQU(MR@Mq8 zl&4f(s2QdB*t_xoMH1b$%wO)A00?v|cKkEWG+1v=SC;s6HCZB6#kle*x-7-cC~{&8 z-uSP9;<@%cEwQ7|fz0OU&CDYA%S^4|%ltsU{ak5k`*XjCoiSRzc^9SeC!S+!6wajf zkQ~u!Lw9=pLAh12(Ee>1Wn+BQ@^s>m`=bJxn(VJ36Hi10?C5|^-UoD4C1Xdsf&!^Bb+Y2&#JtkJaBRdeQbpNY z=dCxa)XewZncqImjGRZk(*Kg_=5+IWy#E}n_+71&ZYi^TYm4d(k*eWPcr*cJjci6% zTMjuxTpt-ny#pGVazMzsS&dY~sEhBDSgNKFkRhe`OQsFuZjpa*LJLzxztoiyo!!~8 z*h4iZMrF9y;uXLNqCVU3_l+UXqMQci=(3e92Vpob0A%poQzI^aW$$ zbV>CgIGo49$xKUUxKOl)inzVpt|tA+#_4$OXFwuCM!FIateg=wUC*10HrHY{#u{lqX@T>eo$3Hzy5n2Flq@;_w=prsn5@k;mg4FW`}8ZkY5Jx0t(mc*er^ z*6rv!d5#rRD8aD)bHxbr>)Th0XLm;2J@r(6h;)N{eeQ`T%$%;2T4QUGo6_&<`meBc zad7sLe4I~U2|A58RjHh zbxdzqWYioLZtu-X(y+4A+cD=q0}ND4z_3;lMi0f4AmxTeChwOZ!6m6JwwvhIAy1K# zv0=6eV>;?CFk=zxIaUi?z>zazU`WP}PjuBj@emQo&C{OH#fLWdFioZJS7-qt@4Sf| zN-x(jHr4dZ-fX%S={O!aJg$rZpqw6$2itD<$8G2CQ%IaQ>OcZzwuTwCvPkiT-v`s8 zn#~V;tkbn-&ctSD&hYFmpBJ?j0be!{93}uQB~_gC85uXxJ+Q6xKqTfbW5EO^aw>QzS({iAzC4uNh>C+TIF4nTP{G+*A&z6OYbI;rvrs|wbRITb>}_>fNr-52_w&=64MUo zBku}qKA2UF7&iQl6$4UZzEI3rSa)eH$QD?t^reLc4`Qj9K}7(r871i1;curL2T|k< zFZ;B!I2wF~-&hNm&ujCioWgv#m~pBl4jj#TchYbH3V7K4C{0w<@G{%u-ZDz!9nDF= z3@J0nXFlAS+@4OQtG~`m5xapn2AiZ$WP5_oE|GNMiG26zt!8`GLjf0lh9d`vVjhB3 zt+7#cU$t@=aCr-$5^kkGEc(4U97c943(I=9d*u5NlB{Qrau{y0IuYJB`KgF%2&Jl` zrbY(mPx(;FCh&mKKOGBiJxqF-4EMUenqq?_m*Ybv5K2)v_6v^z3m`Lhl*t&NcPa)_ z55yfZt{3)!2MNPrLi|1l*xm7*U#6dMleo1-y0b-gFo}%JCr;0<-_x+it7e6KGc0&# z9B#)^!Mgy1hoUo(d+^)+yw0iwLT%()4XvpL5c>!ayPDNJsXP1PK}oteg(JWw*pb_loO*gG`ID_oy!u0MDh|I3 z5N)bQSAi@SjjG!c!FJf}N|s`>K8 zSrt9Tt!5Giv_oZWP~IPxA!n{eWL)?$cbRKRACz9AR}ns_Yy4LX4v8}eluz(jjWKIE zStMTCqsN-f2a;%eN%aGGR)8LH1wEH$3Ykpg1qR0A&?}z`V#%wz2e( zl(%z@Ap+bfL-t^i$2tZp&5{+q1E(UGM~WC;k#ghnuMR~1W zSuSA7wuu@qBHvZp!dS8w5F*MijlxLl6V&%4HY~0I4q{vd91Z;7z*6&Ivvk-h6<#v4 zO(|surk-3S(EG|v{L3>7$cql^nyH1t!BNqR%8fs}{-mU}g0A`nfQITG_mV4R1Se4x zP+)hD4)|JNPY>;7oS@7(NE1(vU|WLLZmK;9UX`PGC9dV>4qj#MpjJRRmFf+1d62(T zlwhgUce@ZPI;9Yyj8+dJ%bMg)S~d11IGOd0l0Reh_HaRg3q~E&jKpo2>8g*Ps5E2% z-@K4^oBj)pDr2jHz&p)gkZUy%Tjm15BIe!#7fEnve=3U8$h0RQ)v|&ncE=lxrm_<0 zw{;b^_JjqUVVG0VC(fizo{`D!fai(%l!`MZ*W&PffXK;AWrk!@d>^O~xvJgI)I}=m zjF(Cy|GI{#?!IM6=m;b%*@b8NmK*{;uD^o{vSwwB(OT>7bGD*|06i!fLqmUrgcjYh z8-N;OR0Zg8);~i`+rtOV8ek{UAvI^fmxYNRe8vNo(p$^OpaJV13E<~8DJ@Cc!JN0L z_k$ z#P)Hd5{uR}M&*6P>MhDy(@L~$G9=zU32c(LA)!6Phv?w^;oyR^N$?OvMXnpaMn%5&PRZX; z3}dKM`z@cvi8nCrV%Z_dvpFHjvmk^#H86@q0%m%tUAH$cLvNgIBKj5Xt)tUoF=otr z*KvK7W798wxebA*omS}JvxYh_ZF5lk>t3e{mHAqE&riLe_Ai>S@MhQz4Qel@gOVL<41afW1rvfm^ucHT+3a~Wbrb9@Zm^Q{ zO`_5uCaCGk$kOD6H;Pt`z^h=3#=H;xF_^Uj43Bh2Jao^4>z_3}l~FEn-eSy?5X(2r zMtv54ZcJxX>Nuzzi0IKe%{<1~xwWF2Exsc;aEEZ|Ch4JVU+%Jc-+!|yK`M`7{3y)& zdsv_qx13#}XAKDQ=K{Mmgo5L-jkb66ZTB1nBaPUGpMmeSX~HayZ&JjGC12T0hzBvR z_>YmX!_UOq?Z*4kYGVRMCc>wAm-@6Shx3V-l>Q3}?g3rnhZbahr9HLgM|c@ww(R-i z0F{wjw<5f`b$#avlsvTHM|Diy*vh#$x_)2>pzeKkx75SXBNhjR-NJ|(GtvpYjQ0@D z7zKEFf=9IA>8MXS2+MlBYP@ry93q8_N};1!7WV)pJZPjR6$r~dS5Y4hFi}*jw1z8c z%;QppiE_4PN*o#wI6Q4W)StFEob)QyzXE11Sw1XG*Xpnkk7xqwV^N*Iw&1S@Is+K3 zA8WCV48gkO?I%H+Ts^sR(`EYF1XkUb2$0I$+B&pM4t_LZuS)_Y*UD0i zR%@y?$FvkS6-)gt`GCR8O<0`)=)8Sgh`Y6W!55o+SV5A2GN_#8e$xG)mCbv3hunBi39ZR5Hu>dz{ z`#8yji3+$S@J5ETz1^ek?D;{4A;+~HS-yK|z za9s2y^{%xDQyP(WfVywSVQ(-O2f)`it*o<}M~|`8)gk_bPJX~@5YroCF#3QGfD9?j z0t>DZYopajCBomHxrLTYgRl6BUapf23$i_YK^-_utu_VRE_u%Vi5#~2qDEp`%YS}R z>mRh$=Pj7VMXtTFTgHf2P_!FU_=z-Uv-*NYjKO`0Xx}J5#z_`OITEKJDd-}$rN%87Igr!z-U31 z*0q-|6pF|P?v=YxV)hivSd;~&W|K-MuS8OBnNROaHw*(EU;Z1@@85hG(?WE|1lLI2 zEZiJ;e#mzL&3=9pK0aHduDwqlzl?hJLB~s{B);j|or=lJqib@CZ+g1QPz74Uw>r!| zg_sV=h82RpTw+tI{Gzs0Fwd9r$N^VuQIM9n>aZfr+`Mi?HqZTx(SYLPNZE zm5&(HE*XjcYp+*>rV9L8*obrmb*bW(SoY4dTwI41hB{7W#mAan&F`Q$WXvQQ>5|C= z4aozC#cs|nlelKC8RZ9-}YsC!UI*wbj$YrmcI5N&0!Pu?ce+@ zah+MGTnk;<1x)FcU2>@`qOqxaqQ~0^tzhW8Ca7&y&hUi|Oh54$(Px7n2JnLv#HRt0 zw$AQ9Q#`B=l*w_}+~d+ioUwr>x6!HuPPTR~(di3H!x^N+1nkH5cP8Rof zoJBcKE$%qC)?aUb9HNvC@O%((?6_q>NV0;V1Hi98H6ng1Taqt0tD~eNdIqgT$8t^T z_4g5t(9GxTRQ+aU+KVe!O7pfe3!#-0>_#<%ei_*Yfvi4GAS!AVdPZ}yu=t3z(vkng zW@hEG=2MoI2@J6Cg=ssJA5gUKVTjc3PA?zo7oi>9L5-y9OVBuzn~O;qHX<#$)_{iUqeF+HcmnK+l9cA6|!^n3Aris$3IsxDdj(MqzNTml(X3 z^z-Fxd*IjD^|FQ?m&Q9W+ey0rF7pRfmic{A8MxI1gBhViFk64Qs)f#V>8zR8+1twm z@De7zd9xn}s}bTuyHpO$P}m$;)fkuMYAs+XtU-?eH39m?)k<}aTPdJv724P7on~rK z68tr8%5*ov`~!v)+xY#d#|1rz$oav5`OL)kee9c zB~CfyZCYrsPIa`YgiCN}igU#eqB_gp|D;*9g4uCT{MG2-FK@^{M~24W7AeP<=H`IS z?Tdh6>)|3~HdI`ul>yUWEr_er6VF4pafr?EyM<3E%iunehCO>61!If*4ZbQNvg(SX z@aj5sgJ$H9;pcck@0kEuf}Kyw+f9qlYXJ9^442*k>Q<;5yy;lwFfWy6?Ky#M`swLY zL0trScFXr{j1meDxB|z>iP^W!*g)Y>vIY2@(Z^Q=G;_@tj(<$7I^L?H2cx2QtaJv6~X6tSRdLwwN45W*!So2CQ_!D83LuS-(Tu-qc{ zb4f05i9%zKB@S)<2DUI}e6wDUcs^l9d5X@NZynAr$r)Y``)d#S!uo3YD~PO-hgh zIkzbm2dj4JRN_5hJM@JN(U5V!B`m;A(F=r}@+s$$5??gSg+=mY(-Dq~`IlXoaS8~x znRMd~^cO%XY8XIceMc-LPeccx{oKbyGODbBuGyqI<|a^j5QLwA-YpG~&?q>rGa+xj zID?v8{$l4=L{fGCl9oeVAzUO%t0YHL!pvR(1qL$dEsVhLB`TjJ{34-iCsByu5x4le zw(AXQtq@kb_yo1b3B<=KNQG2a%&+Fa6$XqJqFJ1mdyYf~9_ zz6jk(q8WQ*q(jkwXYcwq_xxa#M_XC-&=-dW_%LmDr$OPVv`)Cb#?sANGCeoOQ7WIt zJ3$#_pob%xT%Vn$23lDvbA!N>ptFe@_AzV=!- zidr}&SE>eykELV5pEs2FY34663mka>E?T{8xPnyrzpkMVJXz;EPmf>A zMiFL;@`O{Uximsl$nDD5s!)kPXba?rO`R_KugVyc9nIWlQ2soXM~Y#R}=dNC%%b>em-o!q@O zP{kS*EMnhD-QWyo*GcJ3;6U<g*Yq6IB;{JMh<`&2D4~Ib4@2T8E3x6U9rKo^{{m z^&^1(ACLNut8u|U8k-^jQbp%7)nY}w&E}3A4RE2#u-}?VvpXSZGu~$oS>_KHr3*8T zQ|PK9J~ds|M1W2ZfEqDGq0G;d>bTgVn58t4JU$g0W8PK>h8TVxr4)!AH|sRr4Nx0F z)J|1Stmi3k(LUL4x#Q%AG{OHaYiiQnKGCvVR)*QGKuMcFc)P!Pn`2zT0Xb*QL6-c)0XP>Grt4 z%TKJkBxIvTl~(RfS8f{H^>o%=kjcK7HXVUPu}(-XpghYm%DUy=@W}d7{mYrt$u38l z%ac%-cePKD-^6=ilJcTDDXT;usC*Kh8v2pnpN#Lw&G&1y$REGxG*zmMw^8Or)=AQi zt0f}mk_u{O@l) zTeNQY{CvHLk@dtrc^uHvB+dyIyz@AfwVGGXDaFFSE=DOOr06bbO8EO z=o87k$|m;Dla4-nmWGxHoA3AUuEvU9%_hQnl9GDd%)-FSCKeySPd}7O z?qAQ&n&`|V(RBT(TXjh=;TWjYC4CoFo&ku*(r`UGSS&*V~Ee7pTaDV05fmTt!X?Sp~hiAnc2`|Z}SF!G7sRF zef-0qz4BgM-d3SadmZcmaQD`a7u6XGR|oiO*6t5q1#<+7NN2Sw9hSZ+@7m$(4(oq^ z(CB?8P;yZD_7yH@nkE2+uT=xK2n#S{s@meMzx`a9O(?eCw_e?SR!f(&#&)#M;@mTR z>5W&Ri+)b(O=(;8kwod8^q15Ye)!a@vUtXmZ|;&?$fv@0vhIM@0$QhXS}xRLfOpB z;Q4?ouO^~ntyOGorS*CGQFzKsLb!{{;=?jsp>iJ5Kc2`=8E!uR55eEQjr9q*-2j4y zSLK(bg>wrgDP6SR=A`e>fr$i zrl2HVBILqX!LemOD;Ecdn@AA9vo%Nr%_4EBceB)ooJ2M)$ggUan zn#w;l{&SyZzot9TW<0$-X@Z)=eVRcSN!nUOHz9QfXc17Sstbs9&?6L`9-|%rs_ZEWdaWFj#+q*LY4h%* z0J?kHKRgaxUb){I-(FMpj1iu2L9m_PKC8V z?al-;RdsVJUoM`&e*ZUq>i1rnG|+KaQD&LRDUz_0{1U8A@CoFSiz8NMJ^K$rgB+B0 zgf>zbXf;B;AwFPtSqN;~OYe?gGQ~r{>uS2C%}XRWm@Qe!Uzo8dYKUi%SmGJb*Wmsh z8b=g2$z*Vg-@tu%8hGm%q!S*1a6gW9Zm>VRzJ94a9^m}5v5oNjct6epU6=yDWO=Lv z?La?eE)J?}^>xe#IyK8%yMi&&w`qD!dcM~s?mtIWz4F5PJzVA^`VuW2qDQe}ndfqT zsbPOdNYR6pR9sPbkC&#s!GG*Ake%;|5rhz&A16R!fh3uO8_-=_4@J-r`}CUhNTg1J0IqaMK%3k~)acSV}9*^nLdCazrsrE(dU@ruwU zSj0aN{Uv9eRi{yTLURrpy-^$I!#eug;IN<`9 z`E|e9i8p32d?x@M{y5}sw;Lx*dxx1F_cYcxLBXREpna|2w`Ya;mjySs-T>$3f{loW zW8Wk#dc^RFodRq=+uwfX!O4YfJMrLygoB9V22u{fKh5awfs#CvqS%Dnice~vm2oS; z&l1%z;V97-Hv5CJA2+8jrfN0aB=TcVIE0sz52Gr&EC_hyPxA$sY~=X~0m-Ytn!8AS zII(jS1(rgUgUUsFrdTlMP?~Pp>*}T<_fMbe`R5JBY>mIG+z)j6eeq4utCug#6yQwT zg-H3~L5{wm}92Rgs#R-*e{C zJ188NXpfsBGxhs^gKKf)O!866Loh5O`}({K@onrg$H9FTS1Fp$`G?6LS;a@VqoD=@ zmyJM^?XgcCNo>bnm6=UUnN?Z&X}T66V=nNt&sK>qzYndt=f4 zX#o)sklc|l#JXqc5dIrf_+1!FP}KHFXaq82Xh+H4mF}2a$I+5 zsrVCf7dq;{b{|CeVEe0c)c=yG3p_CnRIVCyN;jYGkw8DVczKM{Ip~BFi&8C+j9sD; zAJscz8+G31j+GG222NSuE1ePW_Vcf7@%d-x95X=#$t+8rgd>$_4+L&T645z)@TF6e zousg)63UD2y}Qc5odAfO-6S^~=XMF?^@5sE3?NU3!07PG9&i&pR+O(pVUE#tk-&f_ zmRthe&G)yjLEN*@5cHsU(M@#%#JJC&T@GI) ze3M|0GbWrp@s^yDL&?G#n~yvzEoSviH0$f+O-BY`=FNYyI1P7b)8Z8pg zBce+kZiR)j#+=#nE;lu;kWsieO|^~73*qFBj&G_=S-AQd1GY7jI~c`H#Cku$3L9#n zrA67Zl_CxWF}6V-z&Vd#kgoP|g6dOc3o*my!_f)$H!C#>)d1Ccv5`@mmjZw{%h1+W z6;=TTs&pRoDkK1SNL<8`DN~|xX$XJk`a3w%Uq~w`gqM^Xbk(B3G~g9#AcYnc;Z=cD zQzlRS67^?_1E$#7LN&MN`_{$y2n7+MKSYx%ta4ig8?TFON*q-J`yn zv~?&V4y|V0sK2c-rGi;Fxq5E2thx~unG79(U$xh=%9+{B_@E;El zM84U|&>z+<&Mz^m91={g*9_RUpHUl5!`-pq%}2wKWM0g)^5j1?2{l127>{1uH;jjP zu5MACrdJ}UfRl^)Z;`{`C$6eA_LPFlsjEF?0Aj&1(0mUKl%;`(p>uL;do2O^MILw# z(E8`0f#D8ngjz!QNl4d=-i6VT@>&%<7N+LsU}J5BRE85l8%j4ip>3Ti>Jyd=iSXs* z5UVt*_M%+OuDn9p-6La!ljl;f=dZ1r(JCO`;2RMg)Z?^Cd;ZW6{@aJrM6%JL4=J$( znbj81?bwF48d`l$oygt7H0m*`#`$1EHCZNG8g>Ype67sMvFWk#(LMniE(nNuGmm}w zOl2}r$;UFDF@|LJ)5VztIYQg{Hh*z828Jj(g+?jWX_rFAWp!sIo{lXcGqLGv^LY~`OrotrLfZY?V5(6Q zAH+)di=q4h1x7!ZKyHxZ1W(!lhro@*R+1sp#s7x;_V~C#Ot(lO( zsvnvr>glRAQ3fHJnSz;S6hcf8j%Q1)FiOMBXVy^twHPKjmEO2`KahK?2V18RZ}h+k zXTSua+)4vLTcW8Gv{hFhXW0o?x75TGMTl|7badOtrYT(SVX9rm9v%LpUhqd}CGV{t zr3a1G*uCn9K()2^!o73v+uDnqprJN&Mk$62iSk4i@R0xyN&~UOdRV*6l24C*G9uhj z;R4G8f`cGF0R(6cOW=GX(TNO%GSPwn-72A4=9y5}f2)o4pKL zs$6juav`Z$Ud32IMI(GPz9l5Z#Pr7_8G!OLxjXsgdjuTs#F3ZSJnEbtF4h`XN(HlE zKC~>PK^U1i{2z4P>^gh)(1u5y5rZ*it+-{X>BnRx)DT&`xR-MPBIYiL=qtWT@G)=) zwGiitJPPgZE!rHu(Xw!hmcKnD?1HfF=M0w^_W&D$EoV} z+3}(S4ZN!IsUMaaKKdN=*Z&!RPV)^8eL&5^M`GC16+TO+f|u9EEg|~QHrB)b-xdgvs}a<3;V2z} zOx?28;qvf?iGb-H34g12xlh7SIAj9!pyheE&3bW>c<*R$tip=0k83D|_x@jyvu-o%7`^BwjyNs`wcao_r~rzmrUD$u%faV6ruTK)i- zJFLiNiDaDQq+_n3MvdhnHNegSZ_t@NcRY}2SLB^>A&7~!4C5BAE^wYdmqr|_#s3Fg zK%&17{h(I`zrnfb!}EMdnUOo=hZ2yEl%vzWB~vSeYj+({p3-JP2Fs$;YL!xPgnhr` zek)e@AuH+i88&8EQ5sCpK}s6u#txp%O8jb$PSXLZD8F5YwD=5=nfBw0(Q0vUK5#_C zYMQrM*GN+y4)L+qIv@?nGn+?|K;Y{QzNxn~EQgVDSa{kkU-9Li3e;Tp(LkTO&j&J}FB$nzy%*t!n;zLwGG(tUS z#OB@_|50zb?-9G7rSxCfj@STv;Km0qL42WV%2-Fv8!kPgUw!Oiq(?K-gBsDuS`DSs zyqJT$IWksl!DryO>Q&z!%+0D=>a$qQCIvj|i>5q9t+A^FR3bwKbY?7*QROY*b9(BI zCRG8`AYGf;_t~UcFIJ5n7Fit8j4Z$E)Zrtq3jk360xuur=D}=9vg=Q$#l`xn-}|Pa zK6NYKG`*o(Vs9djuIg?_iH6Ru+Wy=ZAYLV1O^TB42CmnuX?44hOM)SRe~D$iz8zg< zk|mFKWiiu3HqiS^RW3BJ{Qgg&PqHnhj=yQl28i^?^b;{6f>+dww=em=1l5Y(4h?qf zmv2=z)WX~5^#ylMw#Vk8rIOpO$Lwg58#<@L5JCw;nu|1~0Cf#qE)S}hI-}snY#9;8 z+RYjy+q9>2iC{=X64bTNA;DXYXc5lngx*H#BLiJ|&mw43Fs7}S*8I&{a>%s`uQ2PC z^c@7?RV1!Mq8>LYZ~4`rLY_oFrP`q>tz~=O0{n%z~j1utGc=HZUUc zN!EqP)dc3NmZ~hK#ounKbzD0HkVgyl_&0+Y?24%sM~rb?H~UL<0r-Y?7Vd>ArGSAt z9`~fmD4;+Y^`y$UnLrWu&>9bXyqS7|4V; z1gD??u<;;L=eeAA))id{_dwssCsGSz@(r43t`%`oz#zT9GQ2{sod^@*AtN#^-V3uk zE{dy7M-pyVL=a-7)Vb?WMU&MoGrG}M=F%4}PtxDjI(GAnBKtx#ChxkJsw{01c}j3Kr}k!!h1FH9hXFI@+}U z=KL5E=VgToF=4iwP1vVL?lzzKk8HOGV4AeG0oA@9=G>1r0oDlHZ8)2Up-p=&6@wIC z%OfAKTP&s=ud)N(O57(_iJ~o3-E5`LFnyyfNxj;4fpJdPlL-vN;uTwHAasBrNXHy< zXVei>CsnPT(3@i{-ygWT>2Y2+#Tz){PRS~y&owQb=R#;uxo#NQvN@~-2(}@KFZd2j zb(vh|C5mu~a(C!%&uWutekn%XDw|#T7&npvsdE)$O>X zkZ_bMC6@K9Sc=;mR|pR#{Et;yTMvjISex#79=dJBfzNC(7bBSV1Cr`^u%Sea8O6AM za%sd@A)Ta+g)E+$>?S6!i+q~OQ;~+%$qqYPK_Pj@u(8a1xW5gP)Mf`*aBW5|I!+35 z`VqhFWAi`n30q?AG5 zLb*40E)93}{xmsYe=uU|tNIgke2Zu1v|_gm3~Gm_LGqCdR<)E{d@fha$D3-IpcYJ^ zT&(&n6naCxMjz1l`1R2b+1cUSgQNFnh&(TP14~j{t^ITI!U+OioG?~iXFR~nRJ2o# z@yVTkR&1~#rDBA%H}s!eQd7*`x^?v%Ek5jBwT3ys;5xBSA!a8Lp}B9 zCVi(dI@)Stt-A!R?NisQF)eXaJ?zhF)HTU_^uvKuq=JR}sf?uh4Ab>_~V_Q%Uqb=-uGqrQ~sjv3>vz*f9d zb}~;AGW+bp?{VlQ6}Tvcu2R(C9*>7c=b|+LLIOV`20~Zs97O09F*DxS*`}V2vEh$` zkFrnvI40AGI|zS5?>=Dmx0w}R6L~#zKm+W^aRY4s{z_sqpevz`^}w-*6m1;ncmV7W zX?;5*=fP z#&;_-OR|Hn%i@OJhw_`MoF-`e#O zgvQ5Xf18daZiI6g)eOUK;3KMV5#|7MCNXJ_+G-cG!K{_wrW@yO^3JxzdLaZD+y9%w zb`rY@4pEe%yp7O#5&kQqC)pjumu2svE5hBE~tnigXGk}#_-E_jN~kgwK;H= z492{*e*3L(K}X!7YX{KCRX3)(mYyjsx7W2X?TlQTnpe|2dE26bSxfg`P+G-aP&h2;dzE{gfjqB@>vIgmtUeSTwuUn3yguWS zsy%TuVq3sX5h8pAPNan>y7&Pteo$n`h1+9VJ577Op#5c+b&u3gsd>43T*Z^N=N$m1 zbNjT>N%!?#h>j77Pf7#`<)Kp0$u;9)(b}7_FMWM~nK!>!xvL z;}YWCOJqsnW-*{84nEcBDmwCAAXk$ z`jllXVRjY;Mm#`oRti0vOe=bgv@l|5R{8gS(jHnxV1PYsrRE^cM(b(v{d$FwQ0KX(YbIN)&LrlT= z#HN%jEj=X3i`}H0qXq9!qU7>SFdBA}^u3^xgLdtgkE)%fX{)LK`9Hs4ATM8ZPva%j z_5(Q*tW_(vH45^fA~$m5{c=iyvtLluR#RIdZmA%gWGiLck~NyvsApb9f)-sD+U*3_ z8t16i6GZgMH8N*J*(oRxP!WtH09An^99^EEMFMg26eij1C5{PU8+uy-C9g0>g|X^w z3L-Nwo5ED~&z01+HMHain|dNX_)k2{=YSOqEV{=nry~H@(CROyCG4zvYE&Ue7Uy%Q z4^(+}=Se7R=#d7q>$7@+|4W+lB`-j*=SXOwh(snZ0uet9V6AOp&`AixkfVndvuvbk z=CENDA(hO{fDY8_3FS;;ojP7Rle5~ixB;>&59QRH!aqo-O?W(@ou3y?+jK>lurSct z6@`dsR^s$1F&d^uscPI%vVHk*MD8-g>r$Q{QvA-W=M`p#nu(!q?0esg++C+===2@? zx0k>uO@Ibf%6exaYlCy*bB4l#1#b)OpDJWS(fD6ZU};K>dj)u>k@&O9n!Cq;&vNM>)RiCt^-y5Qd&U&O39I< zttUb^@Zh?oH#QxN6_FcDy!UZpc&fVs(`3-KFROS%$M7C*l6|{LY)E+(dmbA}lj9kM zN5g3Htu|~}+|=Tl43GUoOA|K^ZwFC9B|J^5p|J@gFG^=VTgH6l>89Lsa5NRnIYhVU zydLr;gO)P>*dOc!>lYz9Hi7|FlKRuto|A!pO9m=&aX9bu%`j{e!K7;8t#ZYpp4 z`lXD`w&*)Pc-8Guq{{T~gUK*T)<>PR-iv!58x@XMKKrzO7@u|Q91Yw2l5>>r_w&ys z-ReI6I3<$!^^Q|K4kbUBvV&K4ANG{tLHGF|IZ7UQjI@pr|JX2K={hAuVfHw(&vh-O zcT~!~GvX3?`+%H^hGCwQ^;>wLD7wcd2d@wJ&ss4@+ULl|Q>2GCZq$=BI8-E-Z!iRf z-MNCXMkIuzPrSQGw=+qDVLB{~U2sHVpR`BVZo(aJcuu+Qf`$43m}FEwB8+Nq2_2o#dsBTPMntqUMUV)gS)&{d@)u9;y2X|tA)2>CgPRU) zaIp)z8KYKIWLgSCXRZ@juLNmb&CE~HumTn|P%$UPDSaUL{F7~|j*@|dDu5OBY z#fF|dX<(QlX?4;B?>lYg0vpZEwq$S@;Z8T8&U%(zg^O6Ti3FA9y9wLaSXxU>Kxeuf zLw$aKFqTpe<-xd?(c9R<^1?A`k7d!ixU%)aoca>Fgx1_=%)Sa+77k!&COc4bdjOl2 za4Zi>TGWg_t~gFG$Kyk$J~}ZkxH{LTxbM^e{d!0P^gXj7?V6Dz#MZ?A0qk5G2q_Xc z$4_#=%zoFlF)eg(g#L=^PM{qTPIv(~vx^(>rWFismQRTJb?oCTpI=P#H>9_2$(i1gh1n(^M!@k17PgBDemxG>ZDYCtFie?dT@8b0vN}u)?c9E^ z_7tJ=D6VgkPp;8<*z!R(&cHparr8$V`oqWXkCJmXIXTKY$vTa28v$I;Pm}jeVa-_q zlz4lyn3Nm{T#O}i_`seraFCW0wp*o?R;C%1$j2^Jy~4kv(#&YF2c!16b~HCkcRm$7m_ME$>)`_T9zc` z)cT~%8{HHVB@eNDpKE#fv1dj@Q55|e`lG5GMyHElu>AUOuVvK?y#mDOKE4B5iL~uJ`Ib_E8Q3YJ zu~qI=?jSBVWyhw^V%i}?JzibybG>r*7)~g56XgS76np7cV+x;fW*Hn49k{Kx5&U_y zDtrSL#=|8%P-Okm+kxMFmi`Jn<^tdmR~X&G+kK*&UW|y?wx!V4EwcSK0-H+<7O-er z3uqlIdiaNo2~ND(T7X0AA5V6ZW1L#~y0V+y;4c(YOUu+&8yMbp^kUE{@?|f6L!mEw z5fS@d{J-mu$6kN@ekH%Jd?8`B;e(|i5#lcYQJR)Vs)sIW z>xv}b#OG-yB3<4y2Q|h`6J9F>%%?X*v}2;@2;JPB$_xcSKn7eY;lFYN1?RQF?7N6(GZt*?MYj?5tRd z9->&#Yl&eVNRX`sZz-Ays$W&w-d5iJNg$3?Yo;83E!55e;nujpU1F^LkzJc$D;BMBE*mVuVe8V zZ}>)^Y!4mpL|N?0rP%xl_buUnp}AID`9(ul&sIlER=dyb1&eB0kwnR^M>#Q?a#f_Q zoQxOf4sOznc`AHpM+VZ$jyz20-kb~-8r`)yI>|m1E#xV2;f}bP^GJ0VfdLvi8aac$ znmY8B?}==+9i0|HM<+qoH#|D%>ayTyPo7`%=M(Yf(Bci(Z#GiN&9s6Ls`seNf{*Fb@O;&5KJ8F zDeVidigm!6-4UuCE|BLfc?3cqpRX74i#j&p4-kSa#GEx1pcP6UvmML9hw1GU;L!?& zl1wUhbQg32qt%%h+y)ps6#F1*vhW9Z-_Prd5-_SR0c{oqo%i;v!5&P-fvZXLX1vCw z!d{b?9FG8hOEy#0B}EUwunsZC1Q!dDfwNLJ#WJtSkAOTVKs2mpcR06svAV9N4I_Zw z21Czn1Gh_lC&@~kAx{??5{fkhO&`y87{Qg72?DvHP!kx&S|wXG&@$09Xbt<4bg~mE z2e9tnSD0z9V`ySzKY7yJfYfZ*c zE?pxI(GABql#g*W$LvBUwobruHU+JsWf6>njLk>t^4zk~m6t&P#BV@Bq;+}2tSK)b z!jxN_lKqs<5zmJTi@K*~7AL|S>C=U3oDrYOA1TrV{Why*p+l%rSHo)Cj+}LNrKv9p;88NPIj@c zxAi!*AF0!09SoBHHA-F$lb*|a`x24sRbn{0)G07vklpQ!pdCM~qlOM?vaJUnBR;V$S5qtxfpn%K0_r!A-}` z1nAitVMf^GgjSlPvm%kg(Ebe3yiwF!>_a6 z%knca&vG-*cDKlb8Lh&~RV5RS701a?NjWLPOH!^8l#R0^Z)0MS=9y7SO=)W>k8 z3a1tHP9=qnFeBw6PR(DQv(GB56Y^qQSYn=VLq=EG0y0LfzRGKqK^s27Eip8_!dAWxE zKh%k6h>2_)LJqvQ5DW3_Q*p;Htqft~Hal#JS5PvFpsdT#qz((c6ZuSut!wP4v++lz z`ORm4#CAySv-lL+Eo${#l)@(eW8_2A*Q2%~d$kT30CvkZ>{e>ia;_!~tR@1y3Ff`P<@fEeQxE>bIl{MHPUBe0wF>T&XF73sb- z=y)%tuboPW%9(XbxHlAb6{+oDK0o$Mi)3V{R6h7;j_VGB)Y+ymz2}y6yY`e?`T-3Z z1@pNw;NvQC;GxzyLzRTN)981!rB=x***2|HcwwPd<^;MUl^&_ke8!hdNvpcNqF@EF z%QW0dslj?3C98d_vzxpLTeqqvvKO-L1P)`+u&6-cIPN>-89E)(P(qxvwKTXi?jSqB z&PWuc5(Dz*o#cmHr>Ne=Wc``R-$G<2)0j@h~@PghJQhBrdm1AMajtdho83x-!! z$3nwO19An9;YsL{8@0QrP@>fFQr2~`3wvXEx4Oo=7qdlxL+8-A4GgrI)a3g#o?StAe+)70&<&ILzoJ5E=U9 zc(+fYI9bGR5rp=F6voENFgQ+ZEET@+U}+~7kgxMf+1d`78;Z0`z3QgE`W$6fyx^E- zz$SQ)nCp~{Z~)==ASs?$MvN@3cwfEC=OB|J^um3qcgn$gq%g|hx4j&emi5Ia9xx`z z`=uA;?mlxsTp(A|p6pMP50o+4_fakzvHsv_)ECwCE;0ASHUK!^A4)4@GfG5S(X^Ln z^k`a=5a7Jz9OD|(_oT^bHT1#yR2=|xrNhLO@yH9_k6Gpi;$vurqgyS8yz z*3Zad$r0=N=A z8eS?sBbRetX}sH~XWImZtT(QDQ&xLUN&=m>R^*NgNz%}ct4tZ0?)3kQP~}XTVb$I zAzo9&qb3%0z^P*7i+>{(>^+qkRV|!)jq{0^VgycQWSQkMEQ!e)y`?fSTeyW+euK4~ zK>j|$z^SfjDwiFng}YokzBiOqs01ZA#ytIhrVt`uR#ywH%vND2*G0z}TKxKr z;LdVLfgyV_oq|b6AlwG$SnwNi%t-35hbCb*fpFB?4@DBQoPgVxiV|5~()okn-pEW; z5tU>0X>w!?Y#U{FyAT^&tHXZ4{Ahle$3|2>$_+(%KmPH7Jkn~$pB z3OfJkpb1qJT@7u+=mg=-KTzD}B}H{6rLkwdEFuzxePX-Ppe=N`)8@+lA^tQd{W3*$ zl|5W-46#H!-K_qL!eG0ligaxD3kKvT2^)u}^(Ab}JTW)H<2cul^mK~ z&TexM)tdaW*gVr$@tL*_ZcVtfRaH?UFBIRAZU^nb6z0(}4pH9@4N}f@2Qn6_Z3dX2 zHI;kq?2V0@K@sKIPHT3TvWA1Wvxe7$Y$n!C*qA6>Nk>JoHdroLnLzdB0weIc+>-)S zw8w1?tid7R5t!G`Ds))`Mx%8tjwB5!>qb6;|GtQu;J-(4`(2y`|2>4iu(SPQE@P{{ z@8dQ2Z#!Y;(M*VR@-?KB$FU_(eIF*HBm}H9M}*N95sN^2FLKgbk&#xpVx8BeTrWJe zrJ*H?q{j+i`o$_&w86%EGcoeU?$YE(pvps3%wDLRA?)dAZ}XyBZ*SJdgthIsGfX~O zU|||EMkF+m{}I+oD6`8CR8z*9t}VroJHJn1X7y!V>RVq$=8TitN;0`1ITn>?Q_azS z5rzb^+;uVeM7OrHN>Z6$)+Ma{>`v<*QHd%vp4m~#mQZmd%&YRIkW=o0glCfY? z7rZ~EnwvEUxb8$IlK!l?$|p$wu5%!5^?>NBq1JVgf1>7S#1q)km~~=ElgN|XoPwf8 z?}c>cH+zXHASU0XytIP0N=k^>i^n3YNDk|s(Ir5CK<_4) zke$4L_wL~2)bgwVoNV4)A<1JST`!o~Du*MCom6VhfI@zNFzf}A3 zpFh97y-oRo0ME~~s;{13tzj6?Wqa_weyoL3E?!Xg@5L+LRuL6R_b2d=3vwneG@_6z zX&v#%-{{TaLyZKo4m|PplCe2y#zfPXMeXwBa!Mm)w#X?*3L+P|4+lT47%~8`N+p^U z!y_%e3O5Dsb`o=M(WsnEw1>Hh995M3s;t#@jfwP+rJOfg+Y^h}85uhad4Y*!5``FK zjpJsi4RH8gm#?2qHtkSZi2%EZ<6NTRXLM(3sQIBtFM0J z$tdVemGruWg5I+#$K%0UQu?93@Ce}x|Il^pwW_AQKm#(Q%39 z8782jwy`NQ^A;B!wlE4Bv;*R`&Q?{%>x!ci=uyT^9-zAe^2`;mYLj>=iV}w{6n5H} z#t@!RYD38Y%+&hKL{GN$HM-Fr5)*s0UZ5&%iGlAY*iNDGjTBCo#YC+5X-=3M4y}{W zI>E3ZJT~fgEXk&ge`sBZ5m0L?ic^$LEi-Dn-Q`lHuj&jElbw_K7T$v-e{jfW|EAj$ zNa!+l2|y4Ncwmt@x3UD4wgEC$#%q)qewH}c@*?@Zz=J_^4^E^*u(MaMjxi40orzem z0%<8cE^Ne^fptzz(GB>rWM^EO2H)Pv*r=%`UQ*VaZOK`ZwL$DgXM87BhHt|JTH~s* zL*Q73Mu*LuW!YTCdDqM6-LAb3Z3Dq5YLqBX+XvQKL_K>l~kPuE=9z+vR!{~^* zqzc<4zI0dIm2g60%{W?vyKw4ejH5Ri{=`tO3|+46 zB{ERj^71b8ziQ!fyF-C`8Zm&?piZ=H$D+7(J4p}C!8A$6^kHlm7V`YaZRmb64}rtG zLSCjMT3XDPAc6EN@^QQZifk`~T*K5V9>U%{Q#}es5=+fnW%+yX(iGR;pp8+j_5H#_ zUxAQm?`Aws2D=Z6lNj4UHXKJVOjRV#uWoFNY^ry5f|CAT?CRMsWm`d40DSA&QV{qg z06P`{wwE2D&-ejZjRgW_sx|j33X9fROGTiuE3CR+tx~E)OjCz+HsqQen&8NvZ_i<( z^Qe6RQZR=fTQ3~OpSVwY(U-h>$2mc)VEke1C+bw8TE&c9Po z2yc35uS>h(0?`unACKY>!f^Z}3BX4wcg!d&c<(C5mPlE#-~Xj&Utt`Uj`n^X#M%fz z;o6ogxK6**kKB4i*CE=rHv{3wKjw9v-;wr>?Za1hbeIj!F>4(WFU`RB-I=Ej{$X@? z`{>-UtM6gwNu>Fj+`N_bd}jW%oR_nl_JHGlSHBI1AEGQTCCzk>r`g2u_cYqA*?T80 z-_PT_vdHCAKfiA#U-C86PmJ|4!)&<#2pgp%^q(RC(Sa2gf*vB>I8K#g(ppZa-Tqf*_#--K?+ot+v(t9=YID1IF?byo z2DuH#2q)VQzV|BDicEI!aJkP2W$ngVf?2#2erd@iF$pG8HzE>6w#%j#985#urV@h^ z9Z;yejLksv6-uNr4SekB0_4AWPa_+9i*n9EOpLsdU4${9c!ouCTP=DkBb{8$stX}# zlA}GD(s$38;wj$&ICeQUJI9rGOwAUM+KyHl*50;e!M`Q%-NYDp#e^Fc97c1^_utd0 zM;13Q;+Bs3r%03!kD=XWwp&uge^t&~+Ke#Yzt2bD4SejW$?e;u%oF>j&Z~}3SoQHeP{c+O{VV*#5iimwzxLGxtP<#pH zBz<=D`pDWb?@gzTXX=CjfgOpEe=(eB+F4zqr>Z!~CdftJ=z((!ZF;GSaIjSuIWSC& zw{f1vZOY_?mcXm_G{EuoKNWXF8QuUDNP2wbs+cQB0(*hM`FB%^-IS6yY$TcM#-K#K zGwTt}=nVr&`oH`-kcgq()4B#I2E=)Kx5($^B;mZkeHB;%wKspix_O z);2JdgKPWfJ=@=`#O+rCyTyFfnF8CSZ(#MBWqVL(Ny9fhFF)BPo=bH$)H13qw64Q- z;Xynjq{Dy{vSePQUq7~+39S+fBJ7|c5sak)fj4X`2`sGR3J1xDyFN?_%C`oGMA2SFU~A+)h9I{h(0p(k{-}48i>jIx`GS4T6C{H` z63HMK9Rd96(w{~j!<%UNXEg-bw9d|-2T?hTAIrPgcFttg{ma< z84A$#DNVIMz-}+sv)LWqr@)6fvK7o%_n-~{>( zuRJ0eyr;`ptSqeQrATCO$U(3@M79Z1*Z%^r#4KYOR2njaHfZb#Kn+-ob#&YXLK>I@ zyF2okt#3pn8yLwRCBB^1Wy{EFSr+)B;+9i|khpH)h0u}w17fd37M7T?)`cDkf}V6A zYWEshx|=16Qvt*kW5t=gBj^YK!ZCm7`!=Xtl?4Ho9E78KOZS`8qQKj| zBWPnirxK~wD?`!kj(TydM<(9fAr4n5fE%SFJ+!) zH>CX_3!gF z{^j7$!>)FX6zj1J-hl6AD0c20%lp#<)M@cRa29@pZWY?!%v>Ls8Z$_aseFg&xbgB_ z`ue(!6WKreH=cx-5mRDh#MOwctHzN#LyZ2%(xQH_4(z`n6MD2E)`QfC9B3+-4Ed;D z=)59h+r?NV5@KrrjwB+o#XAZ#;Csmqdca$Odetd82|M3#XWYTJvo z(;Fn;B|Q+K?y7Zin=e+q+fA7#fl+GDk&zBsju19j!{q>;Y|7Pio(`)Sn zN&%-c?jlL|uXf~G#%dr;5(=fAHi4K#UEWr32C7(PXWAEC6wOU56Q{8VOL7Q{L8Nxo!?Ez zG_bjw7z97QJokYpy6t;FiIM3$ph`U8HCC=f8LYo!#?~lw#lk@VZh$d^W{hbHrHvz1qq9K46b^2jsnjs) zdDa(eYy)Tz!r@C9Wki+LeYr|^&~2)VsR6`gMd^eAajn!uQMFsAN{?wpLW4Fr2!rsn zYykbsuWsEVM-%V-)>MbyLuRSDPeG`&R&WLQVM z*kUH!5v%YFOk-ryoTcvEcrcBVqi2}7QzN{Q^tX+5OX?(m#Y(dNj7;nOZ>3gT=lC>v ztFES$@l<^m_zAQd4qJw{KTnF~$`LJ;m%UgYA^Q_z+yOOEa(vubVg1j)X?EfNX>v%H zBD3-n<&9M7d|J5M|E8hsG_4edV~tixgVO)ebNYg&HyJTe8)}zL{Kqgha&ydV zO%zR4CLV_=k<5f&iclG%M~2J=-a59COL0{uf{LE1&mV=$A1CaQj)QyCj=RpL{EV}y zc}9_DJVMxF(Rg%b7Vu`!Q939oBj{?Hxw2T5lssjcU)fIh(PQ@MJ712rLR8tf(`uu1nE4rhPZhykxfQU(XGsTN zUC2cp2M?82WjI=eq&8KcEda5FUN~xR1sO72_{()utU*lQH$hKe4r|VRtuI-1KDS6` zrmSQoi>$KnWMRNWI0#dU$^XKgC}!l8`5cNGH!`y@gr??L8k`5qx6%i;zksKdkXjS) z)pcF1uda=wA-meI7m7??22M<8s&>w?N1W42KC1w5Q{a;Tm{9EnVhM#4%8g;M$`c{! z9*7Q$@6p+H(4xJ>2h8wu+?vrDjal)zn5LwDy62NhZR@DfW0?U;S) zM4+@5xe0#9;w|^GIgpS6v>_M`hu^6mh}Gt<$YX_`l7|_^-!7feFksNf@*H~-Piu)E z6y`r9+g)=^Jyk10M}I>sfVVUlWlzSf`f?jAl(GHSR)m(<_iQlyoVjn_)^hY!jl)p7 zk57I0H5N0zBPpj`9_%mHtNd10kz`b3tSwk%y!4K)E!4jsU z9%IUjJ@S^`H(=t$8+l9VmyjvQ1V?V7N=gR6{RJadQZPyYGqArknThy=_t-DJq5on} za%{t5&+EcvA<-fA!cCf8aF#wtJC+qf>cuu&G187@jPw|x$z5#rI}bYd|IKWIGXDTI z2CCHNsI!=X1q&_g@S?+6nb@P(1V9p9#YT~M3>{$ev>F=J$7slOQsKtL6K1*Q@|f`p zPOy7Fv`h;VUv)D1}P#zm>~=NB17P z;sG^KiTFy9n6TXc_FLVSs(Y`Vd2gsJ@rQ`C7 z;+g^vjQER&1aw=Mp7IOW&-mTkY~gPSexgLY4SK@JC6<3yhBrd;%hbInV12b%L~fBF z@o`WDiPVfXmPAJ+W(4Qtka@~XXy__zfnm8=NX}s(Xhg6o=rWTcpswM0u__8->?l42 z$0ffgCOO_7+A}~JqZH;)^+E^n#(RKE4EsoexREEWYt*wgtGr${o@Kr{g3zc8UA`o2 zBaKSbv;Uuh9D$ZME%{oM7a5Q0E|q_?ThtfTI$W&_kMcD60YnlLESyW)u-c6Ss;X>s z`JWa*E^o=cyOOppXY1x#-y2>cD}@#17Hi-uH($a7c;knVAwC^w-#$ctIR5@}`_* z;pS@8?%H#9hE`E5-5AY;9cbqWHK<%*(}U0X9609YxTsH&(VWsw?;O&qSngMAG|JIC z9w@|QO##{^tRG%%t81R;O)+8FLwchZ({+tRwq|ain;3+2)7e0^J{30w;4;V4X+|$! zyyz}bOgQy$s{}{z$fji-|L*S?oxWISA|RGzq{hl6))+iC3Uf|mFi7p(9f|iQYW_!4**xb3fhZU|#;CGxUYiR3&aRwm7qVNq(#}DEo-(I@E|DfKqQLkwin+l3pXp zbFUTA1~J@JofgQyjWurcFOn?Hn^}xW(oINd=qY|&$tHB6*t8x)2#9^zHZY{ou?KT=kR`XPMK{xvF4lGDQ<4&R+6{r$6( zpZ@>-f#jE{MT-g&=wH|6N<7&q8>B>z#sK;=_e0J@sTgaIPL z?Dd&258@#<#pE0B-WF3$~IlRfZF~UxZTQpRVHyN64VbE zOzO|oS^{$zCXu~PpgF~RVjOlmHaD(uyIWrThGT|_x_erz@^VI22qroq2agaP7n)(9 zPBBM*)dUtJ4=oNMzyZpq#x>7oh8)V-k1P$Tq4|DihXR+E)fBawC_Ndu0NIJwfH~q8 zMLD;Z3rfUCPfoT*r|{#@@H0|rbD5olOOc>qL^v|Ia@ncL*viJ!NR#&@$%|G&r1_mB z-u1*KEha@dvibJg!8WYX0&2E26`8_RxokoC@aJ)e>O-^I{V6>-NA3@9;F%>Gt49d1z8_x%A)%({3hW{jhp?i1tZ2b!;>_|!8&OeoJn2`)%L}OrGHvrSeXgH z2%C^d9Q>8_^o)4}H7~8fqp*I)M|G=-+G`>;UtEsJ+^A0%LjxX0_76X)-UJzhId7Zd z1@_SqlfIqA%w9*_PkyX!kue*RZ7Y_CBLr2B#+B2XGY1owZT3OKQiuCw+l#E zU=To8{zO;UCR@=?!Z&FZYO+eY*=r_iBAtd1s8+3A6UbJxJAz^hBB^iBNSY%X^<`|s z+6m%|07GCbC`g`t<#JiL_`zNNxMTKz8#jb^v0;sUq)WV^u(1^_o$cMxuZ9RN*?N$0 z%M}yBZ2vyd1R691CpV?7ebnMa5iIOu%NQ4|?|F3UVDTrY=aiS`~>KEk=FBOF=b!2LsSo3B5 za=m~i%F7a&jb;;h+n2C!vvOWarf)o6pGE$#_{%gzP|7KETh*UPgP(h9|2|%Uk%=07 z2N_0`h0K|Le{!_N}( zudn9$=f1?%OkR2h^S_8}(90*2b)8S{db?OhC$>i?Y1SEViU@-TsUXr2dnmegTqqA=Ei*km^$jYW!BU%K3X-KeY=13YP zVbqZLnz0F;0$fCy7AXRTl(&2UWB5vs9T?l(nc{ZsT|+=4bGos;C@A&PPQp?A79~bO zMj)~o#^Hjs=XpaxFLY*?=lakML=JyW(Au|5!0$an;>QQBdT@Xkj+CxMZ46`+=}1Ij z;vPJpP*N5O1bd&h@PHKD5I zD`XK->6?7u_|+gS+1R@nnEF!KR7#S`7Y)M=kWx8e5T-P6vVRemCTI;acQS*Kx>Q3Z z3bbW&t=wJlApnvym4GuEjt4Y^%T1D*sY%PqH915#?*?Y`nY%PO(-0QSO~qDyCsDJ> z*Q*M%@3F)z+8PSNVr3c(7j#h+3$rFug{RIj&@g^3m+Ud;Bb6MDP(tUj1M{OlilGd6$U_|_e>g`P}Z<87{^_25OS zBuDkaq2Wdc%F*Lvz9%tPO`9KO=;r~))5Y>gdfSlexlH9^6pf7@&h}`IyT#P#0EfFt zn_HnmWs@zD6%@dO7Ct)8N5Fn=nxv>P^YLZtd>E)C~2k_E_OM_!9zsgAe znWH!6>A}g*hwpwEBaBHzg#!>t<)*Tt{qep5^eBYD-imk^;sEH%ko}OMFx#V>)WS`H zj=64#oV_;Z!|!`Gpin7y5-!Ai?@(uNz+=8?8)0Bxa%5wO)Vx7UuTvQof$14BShY!Z zJhTRXQ}RL7r?#{NaIqfmWMV$n_wu+8UGEu>tv?vBYGjO`C4oC-qb_I6@97N4CL_9} z-JtB?E3{iAHm+QFs!e_1Y}N~WWq}u82-2DS7$c_zuo90~gf3k~df}uXjzs<9>hKTA z1N5s4(q^st`<|i#SbMza$;v4X!37G}>WYCbNoO7_{&bjwmLWTsMeB>&1;)qxP&pWDnpt%Z<-t<5VeQH@$Z_{me_WxTwp@gm$w zr;P=pR{R^ITl|}m>(1(R*U;5D&^`G33Plc;XgU5)CY#-z@ijnv%z`hgs=LT-Ue7bK z5u<=O+8~8P%Cp{_r2Y=6I1)tCV8L$kHVC}gcsYi31R2q=Srt0}`N2&1Uc|pY5Vf)m z0s;bgFHF_YbMvNyBn;au{5*#PSfVlmZw`>-kbp`cazr?45zB; z!V(Wk(IdL3b3-{@$gF8K5vQ6-zHsA289>o#JgGQk^Rg;2(UcXgrnWbDk2jlzyAiEy zd&s_(i*=!LIwK<9l-0TcHinLK9wweX$ruq~JXA6Cpc_6C_|FEPGxq{CP%@KQQ41Gv zs&jIvf)g&?+335dbCxqsFx`|j-GYE$%mI1Pa>dP0cV)oN!`Mys7IfxG47>DXe4RtE zC_s~?uWj45ZQFj=wte5VZQHhO+qP}1-#;-iJ-z8gEvgn385y<6$~^I%ZU*(pFH@b! ziQI#-qVb-hAa?JJmoxzdFI32-w8zLM(K7h99${(8)|dmH!%(pS6MOrM9zMCaeN#8F zh_V0+IFyjMv{OexMp5$#$p&M_tnN4kTtc~c!xcVR3Js{>kbBpvO%R-H*mwbGD=-it zaK{`h#qE-D=EAn1M)XuMPS>la*KR9HNAL2`qh$!6nve2GzA$Fhj6CssRor@b#19u}HEC50sh=9z5FZHo< z#9_zH6+_-$0mZ`w4L6pO+`Y3%Vdd&Bpn~(Ymj%4OkeJO;i&rTs&^h(XUEIYsqp1@X z^)(Kd_3_1mbU%>k{VLn_F=5W$2W<_)(VZfy-!@${)Jbfy1}5D`xdN6XMCDf06}vHzMpy@q;5o7n=~dP-y)FXzLVl8vev2fRx2sr$pwZ?~e_kbAgxhY_30js^QW zGx6z&qeJ|V@kpokY5AW%#neobW_`fOqBS?8V}9*)hed>~n?z`bEg*Ws>Nd1BV$FgQ z6(Aw+x=i!sHY)@iL1UP0;4;(v00cD@1w1(>guXvq(lTwgDxdQdXh$!XLZkyl*g&@eknZQ(M`H{KBNgRYs00Rj%+GF9I6&tp zqr+Pb2Y7YkW=kaZUMM@1@YZj90_n;$j#^c5bAasB4wv&1D9I`rkR)nl3b2WY5c%*r zpKrjT_+E!c$3(9jTXG}UJE7JaAyCk%OB}BsBG2GAk&ean^X{fd@-E7<50TGN!ea)S z`$OrfMBOunLTn9-Wx3rk%Y1D8@wJjabJ|LL!u08#DUIDjl5B(pbw>PQsq=b6?lT0O zqT)w}lY-bSYeE_0u%c5@4NX*2`5G)#%+B-W+t2)zk>bW+=bYS}0@)k?4@elo#CwfL zAEIj2m@-rV=n!Y0uplmeS8g7wc6(@Q*d|y0!NN&r35DYS+70VsQJVgvDj&Z? z+4)j@P^!fbeZ^e)i%TAG-plIPFz>3dA^ovCi`CV%w@J*UNdiZZIl!qE0)vA1g7 z3Y%@}J5Q3g4ot>+3;d<083qf{TVz$LWqoz@dBfK1lZU$7^*F~R^A~gQEJYMSKKBY} z?1ng3hN1va(}p|b8HrT0O8^zN#th!h@X=Fn8XvWyf}%ec)Fe9(>*mmfECvu0cOew>jrjXV z1kcN+#mRtIxq@%ScWctjQn#yhRwNmaA2~mk%1guOB#6$#kR4PsI>wCS&d(4Y!_kJN zdH*8G-I0;blBhob;|M;0jhPg}2lHGywau~e1Htb<>Wk!FG@qH^P$LzHY=WK&yY7-k zR?z99sqjVVzwxYWGLSzM(fq`|NUW;9cNyT0gIqhXffHTMyrNaXCOaExCoWY3R&f>s zTWY{pnI-mNiuMPKa|iGHz^9Z6VHI?q*4B?Bz9=P~btE!}CO+NkG~RCc@<4whoZlZT zWgl(taZw|`dR3%&- zxabv(@;m=19nF)BLg?IQ8li>?VvcdLhzPxBkiT>*$1uFBx74G9$c1-Uqnxv;uPcrX zN+FKp-N%X(lT}ZMy&n+2Rr>+3J9t>4Tbn_esb3X`6J%Vq`gsGqnz72h_tD^}BceX#2OrrW)B%od5$8_^biqkhj4Lik>RsuRQf@4>YcvSo?r zV>6-(J-z-EuK8u$4?-+m;JaLilcNN=V_+w=p3BR(dC1KZ{f$TgMAWv#4x?lRwgdO@ z#?|oTp=RLgnT3g0ciWkPiGgmP;Om=(tKgmYV0;HEQO8k|Divh@gs25%n1e+o(FPfa zi5IYe^+K4G*SWoa-w%fQdpw_GeM!DLx_hKY0K+g6*Y;KXY2nqFaQ5R%M~AfNXDWzN6L!vuczI%EZ4qO9s1WK4 z;RB+`xOT4B#Q+ozalk5mhFbP(;=?HAFY>G*uR;g3(dYB!OpUFh1PrvvgXaf3q)F>t zFe*`THeX<>!aYAy99wTHk_B_33NKk#(2&P@wWB!WTgGR7@XzN3G*c4gSC+JgBaIYY zuV{xfm?j+eh+0Cs$rhb$V=B$<=h<4#P3g1Rv5~>F&7e33%H)TiST{T>U^jnRWSDj@ zpwoeDOMMT6MTt>w8QLwx@8Oj8-y}>o?ExFQj8i@5S|}t$s5~3aj(aIZ9Bh>$b2u6zXWQ6FDN*jSy;YnA+PNTP`#@IShjR ztQUx0IK4P{YBK4crRLOikVwkzSb%`e7dTwVGq$&hNZ? z9p~5JCbm7FDD8VGc$rVTInm)m?L_)LTJ#hqKsT_D(y)F2! zi9LPahU>3J)_WZi2KQ&*JM4U0DG!#8*Vd?FtwDrFf7;O|QhH6g!6RQ4Br^&PXIpXC zU^qnW8?c?>9sV}6e%^E_&LP0pRjaDb;Bw=i;XNYgG1%q&SeXGHZz(r@YTK6JWViGY z2$Sp`Aqv_lQ<4@3qXsIkFNa$i1pUgV>%R5#+OtH_A5%}&0n^1r#D>MzqjcW;QvyjQ z>rGL~tXI-V`EVrWKTON|nMQN4NkGSOi>90|f)SLG*$x>c5+ox+X@0xW17Ak zD{^UG&;A=sV?8e(wZA{HpviNZteV}vLiuaa3AW5luwpKuP29dxErV8s#o7~H~s&JYZ+8<7!w-7dQxvkrBo`w7xp zt3}tf22l=AD!E%bD#UH%4I%3XtLJ9edlbnx=X)0xy6W9zrIatuSfWvGf0nE38aNoz zu_X4M8@6^)8& zWEPvPs>XkZ5t*$6D_9F0qFtyM&0tBHO^B}&i~tq42;k=QX}Zr@wTL@R&@7~x>gFKX zUF4YP@Y6Fh-wr;23b680f5b>hncs}6_|_)E>JX{}6WE~-Q);o8@r!fyc+G;SXcjX2{=}n+BXOrmFb)}9YWRiKS}@J<49ckW9(sV4{h{>( zN3Un#Bs*kvWxENlGao=eF}`qZCrc!i)Qi_QePKJC$iBTGUCP`Nm^__<2nF%M885P- zFif|+r44681co$oJnn4;(yNm55e6dCA|7nQzV5%k>_8c_H!)uJ(E(Jzu_n3^fz3K7>jacev>lSuDgnM1I@`K?71v*~{!93yr68u@ z4vlgxaE4%tJw*@UNVh+&KB{)C_;dV*#n!?)lA*xB7<4_L?i5B?i!$6{TeJ;kb~|Cn zHl4=V*q_;iySWPYZxw%jP94 zs{HoQjmXxryyI@}^lIDGqRQvwJ0ncUFbpqq*L>bB#bFC{ZV)B8iZY4PYRq6CMkBqYlnvaCfkebi6;w5a4s*2 z;qzzTf*?0c2?Z1(yoMdmG0Y;SnjOy*QQ$7v5}JNq^Be6I-G%RYMUA;Y@?inTvjf-< z(gNPvp8@bsWt+0(J;bY-=*&G1y;QQ!t?s4Wa?rHq0Aw%LeV>511$`V;De=%Hb)u<| zS1P{?BLq;v+I$)jA0pVUb4XFl{U;@I>jLt7 zY!24gEVUDn;Ajt{+#+v#;G8`aY~H|i%0f1n?SiE&A2i|I28q$5JF6O>yCyO4O#`91 z1*A6(4(Kxw&Ntn;7X{0cbwR-iv@NM(s^EM?bWJ!$RuTINyHT+wOd@sJALeFK9;u66 zCWk-K=(U9oRP;+A=*pd` z9%tZozArxmU%}Vs?OMrnr}xLK=Ec^GBs46Was1+2a`@7(GB^4$szv@b zDL^$VQ)smMv(tOkip7UgpM^2Wphik+vV6x%#LoyH zT+*C&S+5)eXNv>ww`XR+-RQBbAw1a#vnQ@t zscDY>jFq{aE^?B?D|p_fOEb=@7|L5JJ-j(t%lPD)YvN%Mb^y=npOEma)sEjlzlKY$ zhW!$Ch&;)WYL`E^W(0AP}ML; z9V&IcD)bDMf0yBuEmJ@h6(!P2g3YD5SZd=%3d8{_4%XyPtL+yS0(P)e0)$`@&c;i( zoM`8PU=K4b$j>m3OCexpPjUicZp+u{QmPD6%67+A>) zh(@c=XkUnRC#&Oh(bIt-Qvj}2TY4^;aX8*dAWT&)1+#GOfbP0vW4vacI?phP$|6~q zhkLv{3Clt{#heE`8`~@Hb|^TlEJsa(tQQK!7{>+dd7u8MA=?q`(pgSLwlxOBV;G7% zqv>CyJgrrwcWJ|VY>toQl~tBkDr*%yp8Uq%FD##zvAWvk`v%CEFaJ7IvD7oz{AWLh z)m`BrhZqZG4v!m|9cga71@Z*1X5#{9bX!4bve#OwDR#M^r%`L{D=q;TA_1`Ld}gLo z@xm^XkqyS&=8-Ym>kUU1^NAwJQG%?n1#1Fod1UX>PWF zqCkaqp>8goFkFvFD&mUc<)xii&wOQ9f9q#tn$b~tbM3j__ptkjKt>otRota%zRvfz zhrf+6uasbhIw~?aFt&Rc_p(ECY2^j%VPl*Mew%>W3GB4)?WTp0GBaxMbjJ)twLr?KY zqb`Yl>8SGKG<;tFnYl`6YJ2N=&vOE(l|ur{IP+|SQLdwYM7LRWDsHgv`eY<=EaJ@d zE8cw}pna4}PU-KCv7VZaYzzU_H-BU_n`&84)!aoJ*17#N{5sDCy&4l*|GnkZ$fBkPdSKH?!l-qr z$Y@k*xZ;iYY8=6My^14DiuJ5a3(_Ko&89sr=S}C>18mnrD5T5LD2P4o`{9y|ifMXO zA&>4Gb{pMNRy~VOEpz*)*opIJ`LUYND|)ysKgkLNl}V6&lQ7?d@aYRlYL{l`(3b_O zON6~+nrgH#-75!0K1RADGd7IfMD?&z6Nw?^Mq#ykp%qQ6QZ-t5OQ0}WY#^9bT2_At zVm^#77G?#8u{`MWN>?h9_e~<>NWdgIXOwYv&Sutys(48VGTb$Kvd>&cZP%4cB&*Eb zRUs8lMv^FW0}K5Eop5zgUGnjIe706ql7q)H7KVJZ1cW6M9-$c;mK78X2_0WU%CC>< zk+qSONO6oGdMO9N+54KrJk_&Zvz@jp(UG{Ood6Z(Th$O6qievR+kK=0S$9FSLPnEU z*f;JLsEu!mCVnhfIp-+j91`y>m@IVAw70wyF<}WJ?IaSfX$f z*9@H$)5yB%=B;~g`39Q@sCkmTS~`l-m=6djoQ_2s!5D>_h7{&H=?p^KzfpQ&K6(&WB_lPs*Ql4-xMZ631oUA72)5V3u7y~Y@=0I zMZIsonRZ;`u#hjmFy3u8qrqnhV#|F6+X|dXE>IwOJieHDE3tTGnWAZG8_asqsxxx& zLW_emmXBMxL7 zCE@u%Q7pwC9#e>;KYCl4sYBf55OC@iICb5QfK^eO^BoskgEJ&=Q>X^Lre*(E5_?87 z{(!n>W_8Ck@~4ypCg9R;*nhnPKC*7*8*?(lJ$Bm| zVptBFFn&A3(0IHM(hH{}pYrsWrWAo9TmN40VK+`dK$;od$i+`FSI=b>N(&UoLA1FR zK%o{7=~VNT1LI7m%f#p2PBm?+R9wcvdO4#`u)&X)k$2{1>-5*5+oFR)55`x5yX;(aC=(y+!TDC*G zqJ78kwY4J;U+oujW*6NG$Z1>^bL()O)|yH7`29ei%Q2}ROKvz--TerQNwq6d9%o>~ zktEU9jd>-)aHlLE=A@GB=ZFIs__7T~$|7UXCjJu&tc1tif6;M%sSar@%4$nawCpTW z9D5_NV$j2+*wXl}Xtxwk+VMB>Bo>Vg*f^*?Ox5DQjSsAt%RRCh7jBbR+6X#xUe%IT z_Ab`p{jY0UBsUZ8+G;*?WVIrn&IJ1Y%;yZcg_?zW@o$U!{3Uxk9Yac<3QN7rGJ5^=?#q^N&v zlCH)M;%;!QXuN^3CX9OvkyR$Q*QxU{l)%)T=EsOGbAJ*|j!>Ug`>j z`d|n>Z_M_!ov>j<_+amQ*b$YX1jH@94JWihEUK>k^g!blm8Qc$SdqY0krBvTAi$V% zf?>BOAcYJDR)d35rBJQguF?WAgSq|}>crcGRBpW)#C_7$S2y=vSwMN&1`Acsr-W

NY75LzI{eLKw{krYpY0S>3%I6+y%)=q`zW;+= zE7}5u_@Q5}o)9k!jP+sR{L1TJ`nu`GDElm?7;70~R7(s|1MNjT*%seuCAx|wx+FZQ zMK2?`ZUcE1ONfMQ z(Ya;;zfR+d=|jK&tQFx*n+6J!1509?4xZikJW}ao#mjRv<`r z3`-w#=%Swi1|$s1OWi4CUY(MXlac=92a*dZ2M6cZxve)fyT>;Z7x&g4AnYLl_;$#% zU40k}yIrwyVHgT7dhtoqNJ4INULhq0w}aNYSnnv4AW+fE+H7Rrc}*zpk?jP!otX%R zDJ5?1_SiE;0c&@2K$LIV9St%ej!Hsj7muc^d5h8P3f0*+`&DMVdP1Yz(i^H@MPxw*dXR@nGe`F=g=s$o&Vc)pZ);1$do zdjd|69ZkoNyvyjurkL$lj$VB*zlPLt_2H8+p@3kvS>S=A;K7SRTS!;^Nb2Ek{=gxKQRnkG`|;4{dqb`P&|-;nxDUu$Y=7WW2K}Zk;|} z)?TjF^6YqXr-iidl)!Xxb?SC|AZFLQB+%-)+m@WGR_R+VV^;AzVFeO5IRX9^8CqNY zVxLnal?!#kj0z)8XH345zQ5f;PsbN7I|Bf!fP-Xnxh&G}#lnT8#9e#zx`SpGg&-ac zRdhZ2_rR2w5}cb$;+hj3s0SBxr+^-|uJuj+T$mGnZUV(o9tDT!Kv1&OU#X;~qh+?> z8dN(6;dB~pc2^Y*?S9?2$cn9x007LrA?mmaXxqRX z{`Tn4=NnrEQ$vP;Ax7k)_wOGqzr7tD0l#~t&H)0yAmle_Ri~`Kx{zs_>jnFM!r}Z? z{QF5q_%|8?N|ENV^%k279MEo@G0L7CJ03$02_d>tfUoABCmj9)H*_eD-%fIc{1n1T z^+qv0foZz6 zYMA|VL}WckRa+8EvXXEW+qD#~;ard(00ZihvA~$V?T*IK76|N7qs9kIl6Jt{ol1!n z$yf(@{{?}sM_$CE1o{}S(RSK`Pes@LDj4SyXfBKpg>io~65UQi{A4ljq9LV6pioV z5)Y~@6JjhAz4`LRWyZd&U+CtkakH*=s7@TFfEQEOT6^^%!I=wFUlVwAm?D&=hF0O~qToQke&Hk+w!g@QOvo)osIrzIANz4v2yUCg1Bh*RYzU?meX~c~ z-y(0T7QmH9>rZ1y$e4^Jm!@K^k|O44U55r{ffkR zLF)7d`wCH`c?sY)91jj_DqOUF^^Whc`k0gT-(m6oJpHh z6{UCB7}4lfL})8{yz-}k+#$rZOvCV5U?{AkGdAVchGe-eXLIk}YmS_9ioG9dUyievS757&(-Y?aJ9Zt^DM$W5FtQ1}xZTV>+5 zKm~}My~5YCHGqZpOIvP|jOeKuFL8Au*0+-UcpLKYav`BSzu@Ah@>Sgj>~WM7YYk_( zsIi@+vgb_aq^zs*Ww$j{dkjSO*j-;j#NZ-b(=TdT;B+C9l9=GwW_S`8VFm9XyQLg* z5row>uo*C_jJRmIIejdGz4G#d)VsQ_dEpaMtiwiEu6P0}00mqPI~r@)=6M{)x3dS} z;&*<#9%?u|)OUu*gty&VD=r_VZ?lB4tRVx}~Y_Z3m8p_uM+D>cVnTVxxXfu27fSzFM61lQuMNLTT2vYC<5L)gWC4@^tL zkR0Zj0`es=1-1v0V&YB>n^(0wLLEf~9I!-3hsg}I5*;EQk$*5#F zCCxbUTyx}#v@0a^6$Y2g# zWw5-16jdVOS0_-N1@1HjAg-lwtbP%W>aW<+)>dJfYn9mLEqa`uItq@H+}2T{M%$eU z8Sp^q=T1)ibB!H}&$ghE_AMNZy8#Pj-tg%2?Gux+{wn}DOjM{~^3^ue6nfiY6v6hBu7CkPDgAsrnY+;y3 zR@*%Zdo)vd@bBS&c9Lz|OKMvu8b@9Oz{{Z(xf zcQx-suJJ>yDl>@nm;*NOmD+;-6={m#bwz_oM%h&0)!|0MRuyE^IQ#4i^631rZ=~>v zT+`BaMb$3-)Y~x(B`eFxte=lI`Y?(4*NMiFIwfgO+TVSBF_ZHvBcUjl=}mD*ZC{V~ z0srG{a8wjN(eaE?N)icrW47PtkTI$@^XLzqm~P%CA`>$Y%lLGHRF)zjYz(V{ExIEP zB=NZWMtev2HMtfMqto~jA9CTkLo27@nO1x*`;X(Un4*xRIYO&M%BHW%l=DqzF?lkn zpLcjADz3KaaCgNVbv1`sC4oktW6`HMF z_cl|MvB6iV?hHGiBuJ^zkY zo1bsC8#6Zt4)$|bm3oqd*6c^jmW=Y6K6+B8R<84w_wgtEnPZOpMo(h_3HCqY>ob0- ztY$*j7+mj#MKlpk(-&^_%@P0G2EdNm>lpC`cURE~tv|_BFjY%TJiYrwT75H4ccmQs zy$)RO7$FCy?~G-bSSMzOQQe-4Vt%zt2a=ui0~!?M1A`7qsjUcLX9i*P?iBgrq*ow1 zk0&c*>eL-=do&t!QeV0o39>)(x~QCg>!{-(e52dC&BUyeN>8K2!7y~;=WCjtJS;uU zh@60OmLk=FM;(9Lq^nyx`mmc=R%BZ;a~-)ON=KS3z^2#u2a=`Ziy~Q(4`2h!0ZPR+ z0Q78cutA{xuZj)pKf>oM%8n4j88hj3AvK6M3t0WiAFw~S@Xpy$JUjyBK1)`wGG1*Y zdMC0LnWeRSCp*8Py!J<4f@uu zOj^31%dS@7WRM$YZX%kC;hHcaeRrf^ExP9T^NQMWamHUVmnY-^NmKn8e=rj0{x!72 zx`)6(wuHV-iN$e?-BKP*<6Q{Yqj8n=211!}_!c6fTn_h0yv@(D9l=b+5uK5l(ojE> zBU={puS>Eubo~zhsh1)%0?aX>>6gU#UWKj8y#qSn!40@lSVyk(Wn_5v?{luR?iU9Q z9>_FHyARp~!$S>YbP%90&smbP9j_dQQ98)?i6}MV#Yq?dS-S;i#ga4>pR!mliwl(+ zYzmg+9$6!M!5=B2EC#SHOmyHt&=eeHbt#Ta+P@-Fj!$2W1C0sO)Qpi9+i_Bi>%@-W zUxzQ6Mj5p!)E1c(bRR4|qj!A00J9|YP#j#WPU#}d4n$;cWmuPRY?{y|MWT!Bgl+}N z#%TVYTzhjAxN?6m*TVnlKbN$4(5w~Ww0-Zh7wcZJ7%`_cVX}_(p>7VH58?ITlx9E3 z0lO+^Ow$98yp?HonrVw!5VxdG`RWFnPIv=SWlrD2moSR-l(gN($U(UsQUd@QUhIIh zwopDJLObz9m{m<8X&n_DAH2EFvFq^ooC_nQgy(qc^^WotP=(1t!t33fmJ7^w3wy!QQ1 zv%U#m?^nyOlk3dG(_b!+X4Zt4O*fb4d`jace;XxT7e%gPJUiCkCNNANPiCET^^!@A zu#9&VB9r;F0wGXbrD@m@H+l~S4X+k`ANs6yf5PEfcjj-c*^u9yYcCnvJhB3w$jbH5 zljGaR^D0$g98Yv`@ZsIdPr~4bzalgte95FMtkRKj!>F?h$BIm0T-ndU7YJS918ixD z;Dej_jK5e64)dfn!5K1j2Y{r0!+d{Z|IS=LfAF7pLtRGvQ2f5+@cns1$e20m^oJK8 zNwz?H9!42r?(_#4^FxYB$fp`Sl5bLdXXOp`nA9b&cGy~9$-l-s^J!hc3!l!s#0ORA z0DkMpEL9bnvyA>n;mqgBSGdjbZqH_gOdEIR95JNa5o`OrpYNrU(1e+<`|z8x9|_vf8HXl&AB8f^+&MX72FiBx2}zFXQTX^6aTmIBz{fREEvFGkX z)~p9jVQ$f9)~{!gy=kqo0RLg>=uUlpp^BdYbg5;76!sC0T7*z5cl^w>*iRAao5ZyC0(x! zfsU49z{5kaJTL;c7V2iMsJCw;TKuOCUak=uz&8jtxm<7-(LfH<1GqVuw%BW)t@gO^ z!cKuZ9Ctou8V+w!C4hj9xQnS&nxQKY46rW9Q5>wLNQ>joSU|~!Cn7|iDyLrMr9JCi#>R2_*4iUpm!`d z?sx35S=FW4p+u~smry;$&5?uSk3CUfbzW%S8^kLC>7M0fjPL5gCn9Z^I?0f8<>hoY zooc#=E(nKMW#4zZ&PD?3e!nsl%w7xO?s~Rolm1Vxx^AJ`U6rnd{aMl=Z3!ZzL-G{g zPZgP=kTE^1c;t@j^Dr*{7P8sZ3DH~p#(N>owzuxGqg{7oOeVUWYXHDO&0)wS!N5DD zOoFa?UIb7M1S=_@I+qc^_4zlQpY_lVJtCx5r#44an?om(e9T2)$fqCuj(UgMdZ(O2 z3nP%)zOxqHF@c~b$BZTMq>IaW|0#x^G4H}B%p!gKWq+0ty2@Q(m%4U*_N9o3pqo9T z)5I;lFW-Bcva9~A&rHKANN5>j2xcas9p;qQU6%Hqu9<@D!DY{T$+iBff1e2l7tt5h z?UH)w5A1 zI$eWBW+(#Crp=9T*a4Lnc#cynn$CqGghQsymLm}!k5)4L_E}*^uEQSO13hQg7xy59 zG+_0Wtl3Dv(Hkasz#RsHDPWjuKN3{W6;f6rru-PcToG{`7K@H<<;vh;7R#s=u$LG) z!>ix=nt$F>MlX@Eeucr)7fpM=?*1f65QjVkw=C2i;unp-e8ZFy&78INy0Ei@g!Qlo zb}mIX{-0c=Zufm#7fj?5kjxTkhwpIXa)|k|CDme21>?2b#O%g$I1?`Dc4feQg_RY8 z=>tEd5bv~$z@I{Cf4#PM{y25ml2VEtzf}RKzzTd^Iyd1n?FudLMQ)wT{rkUnIT~sV ze&H8(0U>rnGQNlKl#XNd?Arddjj5SM)vN2zm9k5u6Bs8&31sa-L=~_+h%$*8+JO7v zVgy2tGf$ta5ZfS+SU#sM)sX$#*Xj!6b09Hi*z`d@1EI^4k zS|9tWZ2u~1TVB3afp=47{)RAO80ixR$qmVbE;TlzpMlbtwDn=3fCN+n!g;$rhvbq^ zP(dY6OsKA_F-}okB~78BbW7!~xPe80NIO3B0+qZqp_;5XFl68LP%)H6M|oSw>U6x_gsWpN7eTn)#x{^m(>n12Js& zw#ZbpRp>^$!0?9T;AK2ur=oEhn7096$m2}bj^5$+S?u-24S=W`!f)zJb$+7@HsD&T z=Wv_w@c{_AR}@z6h!VB-#9~g0F}zw!TjUA(V0%k~azOsQJk$MxEB2vU3r2-Q#(xAb zl>_a>)%tBpqQfgIQ%Qtn5^&S92mg+SXO1edVWtj}!%HXQjT1%kk)xv5+zVha>Nsay z3)(UX13W|NV=(DE>O;sk1|f9~u)T#+Lfq^NGDs2Jo&X$)D=R$dZ-ysIOXv;)-iSk@ zuwwc-WcBGj0-XKZ6hroL$mx^u3g|al7#*osotXOnG`pXfta=!6y`k*1@{CD!+1|h) z@FUO5gD=~$61;D@5G&zzI=m!aE(qNzd9mADMK`&L&m-V1eF#9num>HT0$1?aUMf_6 z6U9yR(t)p}L1x3nXI*3GD1jKBld)6AN^zF=VqxCE*A9tW>MKP%#bqPqGG-!ke}1MS z1ka_$L58>T4{IFn9)Fhgm4I0%ElSRMA`{&(hwtYciPVg5<&%^LUW)R%OIBvnNS`Px zXtN_|TG}hn%zYZsa8h?zsvmLwU1-x!Vt-QNKTr4p?PTabr$*Jh| zLvGFyO0&EFfh@(_1KS2%dVlvGwLNJ8W~6%oK29iP3M}$iqi8jJF#>)=vZe2Ws=@e zYpo+Iae!6s9HW$HAb;c(CmaoVr3jR3INNUlZkIUw;A41N)Khw8SkiF<4U4O>23A)A7J$VV9 zr`~zQ&rJS3CSUYP3q!V6?Z7G!(i|FsuJ6Yio$v3-VkmokxfG`X*v5hZbTC=9=h(TH ztroTuyX(7OlIDhMIA-(jT&67lU5#EB1dpCvMa*4@ieS)qQ=bH1oUeQo#TE)+sJx~w zelrO2CkNy?LyspoX)={lTdRD;Hu<@@qesnP5H*6@boJ#=j-1T<${i?7!WUp~wMvH@ zaMjZuI~(m>bB%Te2_^{ zFLX$&l1uG@&F~TilGkD9$?I^zTy(@-bmUxg)Le9|8Sv1cXI??u15UFN{$VM}u`NJ* z9u+&Lq3|59)q~6nU8=2-KoTc%Sf;0ROn&v%*hoUX6TkG%*?rxwdhrojfJuT1b+Zx# z4SB4=^;cVri+nTtpWjvIr5J#LPAdOks! zirzvF81>^>6hmZRisdU5)DAkaZ&c1u9uKk;_W=8Tg*m}}soXk&Jj*>qMWTjieBCVi z(T2frj|X%V>D8=%&0AGnBp&bS)jfjtc#{}a2#Bgr92|=Ch{H%K-bLE7>#W)F-LEhB z|E}066$2G)!gu580RaHk{w?DFQL)o9&@s|6&>34eIn!F$n%dFJimC{T2&xF0C{EfA zFd%e&Qqw$N1^KCjOH@ak<%g9*+005eU{u~0wg`{!y1_pAd)9YzcQSV#a*CA^2Dppj zC$W;=nt}fD8%a>lU2e(-{{=C$j8m%uz2_KpBkU)r=nSCyDTm zqD%6iQ{8IatZLx8?epVkaXuyobg(ooWV@-R_Xwc(7l5tEU#hQe5$)yw8%G(hu_mKx z4n)~SQxbW+8BmQC#uD-~9fb`cc#VQ|Iq~OQ(8`1)*FT@w%3jqsU%t4i>XSJSEVtucdes4IA@dfmM=KCXx!$I}Qpp}eNy@Y;QIxT)Oi#{AFUw9<9vuJy{C8v& z|6al<4>}?2zli>Q-vB87_sE>>?DecoTurR$oZX$J<)&z)TxRGeW%mF7EC2Jj#$L)| zFu(u+&=CLt2>6dBiAgm0%sQ0AISQe6D z89VGF*U5MCL0G*Falf}}z?M3>;<5VUrwA@8rhc~|FSS=7^u}y5owYrObYWu%I!%%5 zf+!}ne0OTjWmTm;2d|=qB7n5$cKB&Z{+o%fG#j^wbvw#aCF6$_2kzVDlS(ZBsjVYB zn{A=h!{L^i#QayK<^m4Yy2kN^rd&1-(n-URNVfYVaui)o2t_ry9BIrjSQ%rQOSxog z=yB;n*r+YXuBOWgql{GOvQxR0OC6DsYc(4wmA0Zd-_XQjarAlS`R4IF>$l$b{oe2W zf9qXSlDL29pn^|@Qh>jel&;4r8@BtfRT^FWZ0M;AZPDAFmnpZF;m!z8mh4#iVHktQ z-&z!>kyFvT)49*h*;w|We{qpd`O~uX-b-=5n`5J@c>;6J-lXRCd`lOM$1Q8k=*WdG z0oJ;1K4m{$^Da`pX0)1js=KgJu#?Yg!o2h`*QMVT$o9Eng#Ftjm)F|c=2~5<5suKc zyPOXWCN7)pn18%;NI?W^OHO0a2MZfPf`V*g+Ue z%}-d@b?NTKWqA>WGTP6CW+@M3I@3mk4@&Rt4*Cx@!g5g){yDY$SSIuAlXx}lpAy(c zHP?e8c(G?vUw`{(KlJ|ar+Zw{LwR*1G zmF(UZ-YGq;ut_nq(T16k38${k*00N9&@^{nEq|p*c{z z$>fUEiERXn`9Ui^_ZgZd@URJ49rOFlYY#8*O16&qw5C9sWWJG~-+l`hT`n(Q-LLdD zL7f>~y!BeFL|}U1(%Mj~3T9H8{$ishDRu@=!L#RH_X*Ox`*LsXLJckrgkA9Z*{-`K zJ?um1uJftCoEB79-l9c!C1=qFu{Ik{dz{bB@s)HLZSd0ff>WsxWJt z44;RzWG*_jP}kcpt|X3or)j@^^M-dxuhj>6cQTf^57~7%w*BeVox?WxdL(!wIfJL7 zu%N!xW0A(?64wEX>OW6i4PqD-^^&h1$@}=O)4-NmEo;(5tgvX2A#7;fxZGn`&K*aY z#DcEjp6&Ci*x8Vprp5u!?B~OWaF#!p-L~j(@*dv*CgY;pknt18(anWdx=%VCFi(Ej zm&!PMCP!1DGhx#koU_+Qhd2#s1?<0lqvF$bTJPSj;XfXP-G&0HWj#}?V!0WWfeC@j zvGwh$%%@LduJdxcjr;b?&EX;*QwQTy6=IvX1utDuBZAHtD96P7;y}rN;Y=58|U;U&*~t za{_|m4L*MdAMxOFMBm1`jz+#?ZAT&zrok*C5i}jALRPf89gYOY9)c7EsfY<+=5h{B zfdrFa8Vca1Zy?4`8iG>6!AS`LFqTAGv6qB$dBu~fZg_BSGuiQGshGg!`~m(m*$D=P z#egaMNK_QchcEbtN#J&6n5m)AS3pu&6ef#EqQfW<(X9bvY-oO)MH83P2c&zlDP%Z= zOkuE0W{Io8u~*j*=t9uFUm*wyGXzmWg9V59QK&F20L8NKmHCc0L5vxooh&+^iU*hT z(gGb8LTACja5#wthoCK*{;2!P&qfeboil#SnE-a#q66s+iXZID3SkhxIgeuIIi1_q zFHIrneh~y|i;-aFaXJ|`0sbaNO?BHo8=c1_?W4?MU`q3u zy(Wqf)L}9exUmc!$O;Lf&}NRK$VqX^s3Sb)KSlz_I{&leOHd;}>#7z6U132G4nca0 zM)D)ENJKUZrZQ3DHjluWHv-ZPna!^;64QxesXU{3_RT>u@@f#|5&}WUpxsuT35LEe=#=~Yo%0|_4{UPL z(ONvXocZ8!ZknUPY$^-hJ9FGhI&>v{fO}EB6 z2m_w63RW3!{v8y^B9QK&R(~H{&X-=q_0u#+{nu#MJRJRmNQ3Y|agahipN%iE*sfXRXHQDB>w8;_oe6ln zz6r!p@B3njQbwqc%A43~b5kE=G4R@s3Z1+7AbpOCLB;)m_8@uT7f^XWAU#+_47-aE zyxElJI9LorYciui3xe{1@@ODHI@hLyW)z-d0QD?z7(WCmipr{~`l{449rc5{gG^%Q z7t?`rmmOrla04^2{=e#gxP2d7j-R~Lgqf%?bT=Ky)xbbdYDeL(0=> z6B}VpnuFA83@CDAqBy=;$IJu3xk?VwSC-{;*ldb}_R9!y=(rqW>2w;J;GlJDCn!o4 zlPNRyZYL|42spJJIRWHU&I%lBPEMwwwGbz0*s57+h=$^1Dq0s&Of8h4%}yQfDY`mg UKnoIrOu^q8a9X-w2lNj59~agE@&Et; diff --git a/dist/caireCovid-0.1.0.tar.gz b/dist/caireCovid-0.1.0.tar.gz deleted file mode 100644 index 66e29b440f054400f88627130fa6a2ee86d2ee98..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 115986 zcmV(vKw&lZ+2;9Eif)IE-)^1VR8WMeQS5)xRL05 z*012I@0^z0GA+3uGtow~*ZrC}>2Y`3p3KR0d}UgqY;H@ULQ-~nbMoJBJ@6=ipd|N9 zvYUIxXOb34pa2wrLRFz2lO!wBSNUx=-S5YPxPS1~FaOtvzi*yD=YL=Dzx~5!{4f7~ zHF)~s+2BS0`SSsMA3W<14!???fAvrJTbET*MA28tYBf(cl!Ick*_J`#K=i-=B7YNq z{y+TT`~BmSx9@-9`G4{3S>ybF^Wv#D|IY{TPt^Z){{P{BKcrPMO{!%7PiawR`EnQ? z#)H;LvPg%KT^lX)l@>|sbiG(4#r-fU@5?G(M6*^*ih7o>N5BupN$>#9t^LlbMI6VB# z|Gvf-2&MHdo21JUTKMtg4=3+`I%&O5%Sn-~D(dB>k-yf3e>v>x1b2RvkDbo5d zPb&Dbuz+99ld{ZaSqh!|P~_J|vRGux>*!swyk5f~qanP%hyE_{*Hfpw>Lq@7e|j7p ztyV>Tn@;ihhvPH-*{Sk=l@>{b4V-d6scv+S5NnN{O) zyt@B~(*MEJ=Pvy}><_-_|4RS=8Go}PUqo?{Rz;THCi5s;tn#9YvL!M>3GxzS^RdN+ z|C;Dhe@uSJr|Wszi&jOptj5W*yaPrNmd$vU&C_4cjDPL@f0gA|X;J;`bKv3h|C{H7 z{;%}^f6Dy-+;d>a+(%V1@(?r$prpUUwV;JB1MOBKk8tCw))iW{yRXp&)3l+xsR546|Kt@KxAcv zd_eN}#ig)jUg&iiv?j4L-765c5%cy;H8Xcdu zqnAgg$EQ7j@ze3y@817-7X5Ve@#E3S+3}mx=>5m&)%%mz$7jdyPvGfobae9P=nuyy zuX|AnsDftF&#MCa51q^qjp-C|cABP62T-!vRO8Paj8ka&TrFViK?kpS}d{> zC$og^O##Y0TVxgSi)DRKajV^K<4RiKth{{l@hl>mf^`9#^8`maTQ4V6KZe3B<~7IT z*}7U6>3FPJ&g81h=YW|pKQ|SdX15u3ujxDUtGdZnOTC`t^Lfg>u&-A4`uBg9P*(j0 z=J-bchG|v5%IvfLS;5HjVm8n3qNI$fS*z8Wpq?*!2{iRpzMN&(o&4%Qp}Fp`6-6*; zez`6PjWiFJSoQ}*)g@ykil*r-(g$*Hc%R4<}*aFZs}U@<^2tdew^!R&I2Py62-HgzePRHOEF zI$yV&N=%DrWKa8}jI**;;Ba(jhE@_D?<%Z%!zi>q{sl&~B<4~(X|U!`M< z+0o!dV88$=nZws&T)>`5q0#;!O|G6^*w^bxC6_?lqR&UgbvbPL^EZtCgumDGq_~gp z1(LGMY`I!jAY@I;OC&LJjkIdhJd93NJ=~A!5~w}wO`crZe)xKo=X3p5!>BEVVRW(v z92E$QKWH$}350S6gDz2QL{h^INnqZRk>Jdyh(6KMPk|hZ57ZR2WRSk4#oFo{(ot3l zWVB_OcGSt@H10(+Al~}`6NvS+Yg*IIZ)?`BW^QkpFZY3UN`b|IciFAl+KTQ(9rFuD zj6{EV-BqoaruCfRWbs6KjmdS+t*SQ>zDcgKc~;#MPLuf@+MdtvfvPQ+KzY)sJ3=#; zb~p5N2YP`XY}#^eSBR*Yv&Gai&D*YKX{o_CY*2;_AG5`J5tVQnr^pb@)8#d>1WCob z1D`PqWM(Wj;RuVaQe-fu#6rZ;+5HMmaKuWPs+w?km-!-v-*DK$?t{bcKY>vs4kRh< z+;r0T8rBRD44l9LFtzyaVgK2;T|EYmaB$ym>)RU9a#`h{U_z-1ZSNJF7tl{wsJUq~ zu-|KxM=kbhn%-*IKzWuEJZQ&D)a=a1q7u$at479f?6_j?Qep?;z$@T5SCfOM5&nxO zP;7@6jRg6!1q4BY65`1gsnHuKSbOId_b7_0;JvMN0`{Tp13cIQHJSnCYVT_BGy~PMsyf+dV-kKt0c<=D=`rpLW;AY9Eia$Z7Ydp;+ zRcA5>0)lMQSg<<&X_)Yu<^%DPYQsBqr;1=;IJi|{C9jeqfu>+7#Ciozs2cQ6@KxfV zKFOCU({Us|olog~FSy!vB@N01f;NX9YvQzJ<1|N@UJmc{>0Jy+ajv%N=kpgSE zROL1ygrK85Bd~&x55Ni5VUZ||3zmsy@imr-zkiF3nRa?nyJ&Zj87*LAq=gK(~H8CP7MtFvHfZeA^tE3#A6}ZZtMD4#U+Ya|n)Z1hJg;dmOzs#l}U@}tX(Hq49d`wY( zgJzVRMTqWo$%|$OdA3TSrUev$1=$cxK zr2JH?1~u?MszX0j754zFRqI1RRSo_N7i?DwIjl!FYnUl)G9?FEIM z7rlURgNHwLh3C-g3C}q9a>V}tkaAoVAQ7Yb1=k99!{B;ke)R+@NA_pWL)XZCq~6Sv zt90(35=bA?YJ8i(F1P{$5yvs2o;bVd*laxNKb3U85N(*}*IA`t0bj6K&pS?`UaeiM z1r709zR6Sgew0)m*vTlfp15zmFXl;`3zSOz%9F}xq&gCDwQfhJdm`?IJRMprNmF*18fk)uBFY=U%?%$16VZu@wSBeWJO z@_A{xe|M9T#z=GTcNy@3z%`S$3)My*qKCL4o*qVLr1QYTsk}*6X>@*-RFfOlMIfU- zW*+^*1%UH0j5_&>>0S2=fw`0S3jhqyv;&yY{gXl$&`(rcLY;l66WswVwStG;f;ca; z*t{6e^Ze6#)#)1X2rG&Tar9mZo*#a+ODwBUSGZV%m^@3$3dPfzI?u|8RwF78vD&K- zKVqBTLv2;dR1o4cSA2^LL?eKfuQOgvH26~-y-sJzdR~#l)W*w#wpzs>ldMe5I{p)K zOK*w-4g63tQRJG=URa0O6#EBg)4~d^3#CEh$+BzHR&pf~a|E~?1HWcg0T6`BWw~Ko zE-%c|_v5}>YzrHY&M zkRb9eY)|vJdMS=+DR$`pdw8JWf*db_H6HPbG^~6>&9>cXwk+~VAJPIDE4vdT-l=+D z(Kdx4#a16cY@yxR`ZV-jSJ-+NFj1oct>8H}YBi?vDpuWC1Zc40Vo49$m#S+|WOh($b?{+!{NtmHG) z2pqnSh+uDJU|5iS`dYx+siF(y?j{4?0=j^@QUR)_#6@PHNCg?R=c77cCLnNvpya+E zlTyJRL=D7G3O~;N|F~u{4BYVK6W+L5NjMh8F13PKZL`ZrAIObZ5Ondv+y2QMq#B+q ztEUt#kBO!Jd0XE_)5v{vi%LOu8&R*zx2i_%L%p6a0*jj7Cx37Had*-Mm;J6!nC4Weep9ojSlfvdzFn806#Kuz;8E1t%<>eQT(Qb}(ET1?*hhV|I6 zQqT7X7cEO%(x}b1Ic-a1>?PsUUaVBE++;xPw11cu?F7Qvc1-=?zh#QYDe8Zb@RaHG z0v%iC>1{eUU^?QIv6v1Y>eZWZjG5{26Wn70%l!zhzzmVGw#4XrZ(O35rm z;V9g+aH1NDw~%y&TZf|mq;TFQK&&ac!aJbFYJLyh`-Ds;eo_~#K-wylmI~;`DqkYS z5YPa#Say~bWyL3Tj4i;B*Nar#gWV)0>-u0c`y}}YdQP=9r@^yC9C`_ppZ-_c2`#bn zVK3?r2fb(rOnUM;D@TK_OD26~5&+~i2!h$NOkMg|?}EDzwh=Z<6i`;ElI4y2{Zm>j zQ|H2Kqz)wUs?)S5t%kd<3ln-}r6R76Ue@;T=;@`uPb)Nd!j&g4*m2#NYi1U%sVyU2 z&*nKy^^V#NZ+C{)mRVP#C*~Hh;p*Gdl!sQ0L`0o9HQ};eQo`{M(hx?~BGe`jzA&fm z{guig_lMd2r{cV^uQ+npYco{a!1i8fLLqcaLI5(haU0~Lo3uejY8eBIrIv+E7mTPx z+szVbp|q)Cuz7*_vlpIOl9rp6hgYoCYB7NMx+B>xearHFvW5d8Sw^Icir%ql#*eV) zX`1zLlBitdIqCFnmC>S_l8`gIiqF|CSrH~zpPUJ#PW)n>tdD{EoO=Rj=(3tmv3^sh73x zTxxw7jB2r}?t7@~Y_m~NyT>Oq>DLv0u_$ku<5?3p)M9E3l1^6w2-M-B3YDy{kPWpn z%~Nj4B7tY?(-Pf?{j)c=w>pn}HY8czvn>riwcY;Hep5G+urVz_0%Vs&nIq3eW;l8! z5)WpSy82$EzVdo>&(FU}V^O-4q*kG2;ntUztcjC_h1sa2RyLmJcfeY785|WFnYph6 z0vbLSOlrpNdupd)rX(84+Mw z<4$+5GiYv8pt8I3`MeME!sqkBg}si!LDBYI)`lYPW~ywdtZm$Wnb8^?H_xvrJJ_Wl znr#gGZ7y#QzmD5}Zvz;;C6r#kIbM5?;!v;s55D4gN1lOC3)l8iA7of8CK4}=L38fb z3dt|t?-XWNjxwaPS&&nmB6W%u6Q~b3zk6R1}L&CEkTP;$jdx^OQgGWl>&oG#l|F}?+^4lG8`-I zTeebQ3gnM-3{-Z!eF=v#PzF&j14;tNBDv4#rOrcUTw687Ms#-r6?8|P_r1+iYgP-5 zJfIlZVAqca7F8gFy-g;zfKY2w_u6e@vmwk@Pm$oY z;o$JwhJs-nH5C4WTf%MEg=Ds%r{o=szKlS$N0_A)eDR>lrh1=+=<634v(N#h%xr`> zZZ3wU@i#z@U5^YS;#-?=uIA!iRsAWO!YWdAeKV0(gDto^1m2jYOc~ygycAmvQ!~`Y zkkN^dr)r+r_?%Fts8+NTN@#0c(1}lof>&WFOO#~ zD7p}=c3h-A5S|DMNODvVB)P_M4la~nW%fi&m0}Qs7bTv%hu)sy6E^+4qT>PzN%Iyu zXz=oZI7?4s6EH{=4DTqgAT?!B)+2(5`4iNu`$vp+A@Hp7h4D8mXtj2dYV09EAy*Eb z$4w^93XoSvz3#a1(2`|U-Hhz$x~^TcwY;ps?fE$FI^vzj6VnQNgNY_@w;V&9G7c|UZ+O)C8E>@+{ zc6*J0%VG2j+G>Qxxtr~V|MxR&Ier#j@da+3QKKjJY!Fz7&V48Rua?)((Ym8P&uoB|d#PE0xo26kUbV+|otlOt4fp>rC39Gy@ zl*Nm>SmU-Ew%gAdWANT+8-w|g+{83<%3gymtH}@=-8yN8f7s;LNzRlZvVZtsdlWXDFy^FlBD=4J*#S^huMD8YgD-w%un$^ z-fX|T$$A-WDyFp$hZhb@;cI#nw%y%=%{4TjKqiJrWJG8XH%NN{h z?0LXt6g=gIK(6Tuk}m}lR~)h+*Zibs_Zs52gPgWskJ;@Y2D^=iNf~Cu5uQKITY&@y z)MRzK;jdAq<3->I5l=?p{)p`qBGXzApBB|PUvcNvBZkKr1945}9ocpR!S6cu{1bLU z8tXX6lPoPe=cb(tV@QP&3>qAu8#j{LN)1P)#ID*(P1e7)&Gg0F2{*tg?F?zCQ9GPT z(4VJCu>`uK2ZqwTQdPpfGT_$4jV4hC54>*en7eEk)FrbB$)tjbGr;`XeDvCbP#>om zAO&}@+RVn|%p9tZl-(U2;6NPkJiSS7`B*m181vyr7CU)#*)-WiR@2!GucU6%Ib9ul zsB1}sWrJUI937DFXdK6lOLHH6OtqPex_Yc*1`X?U<ZVeiCZw=iZ2w|MYH=?2s3Dtd@XLxGoO>lvD-BRm^6}p_HmHw)$w1>p zeXCL3<@RO+7H^Sah^O>6SyoZ!(l&WX_`Rh4blF7=8GwMPt0GUP6NHy6a%JnphK?3? z&zh)LCWeQDUaUm~kpzVrc1>{dr{nCxwlOhyXLh@q`S4LVLv&-$;Ze9ZPtohC=XTJdF zGA4TZS~w?=3xbNDU=^nL))USJ;31ZTQUGY9yh~N&)WOcb(6XoUYu!O?hO&<2`~_<2 zT2FkKnyKf4VV>SWt;^D%16g}|QRC|^G-m6)Zn!1bF%df!ZeF1*=nv@(2iJe@0~K)w zU>WP9-(jp2cn5IAjWO*m~ zdOn3yvx}zL1WN&;!xkekA%O&L>rOe;Ort%7yBDnzb(SHERz|>GLg~FKH($CfHUg*5 z>U`n#FYWD-sYd5cdyjBw_d0&K^gUr}ONm4>w~}xgb+z$Z012N?7sJ0(Hu-|?6MNvD zCLl(uF_Hm2ZQ>|WQ}1w^V`(Q-MqG5 zwM{#qt;rsSB#oS*Q@J+mfj0G^O>143FG>Ev$gqH*ju0xkD0*eYPoffy3*%2MPzAQ~ zWluR6vD%h9Brbki8)S56yI&|=q)VDB-sIGK7>4N#c6pl9ZNzjvA^S`yQ35ZVPQ|cL zLUOgvo}~k|F23RnB2suZZ<8Qx}4Av<8_Ttyz{_GMRj0 z17Z|YNj|glr4|L6fx-J@><}87PI#dc5NKip3m-j-5HW5Dc`K}DeVWRs?J+$T#=)(=3 zB~wSfFKzij+-i(HN;eIS0Z6%`vl~6r9WOd5mf%X}D0ZgfC=mNoyHWGLHvTdUMxE9> z10j;&f-uMV&Op?Lcfo89@hKp0xCG%Fh}udC#Qgli%O!IyQ)eD()z-Pxvtqs4<^3-c6*$=+^KA zzPRNp;kwdd%5jg@N`+P^$V0E0#S3-GDEyilGsE@`n^hFVf!575nKu6dYM+g2o6VK^ zItf+Dwin&#-&nh>;dUE*j|_q-fdIlKdk=^1&;>2I)zH#1g~WzzH+5q)ik=SZ0k?d` z)^^oajo5fvD&>k5X#db$Rd+-mgx7=LwD{zy|_G$!g^S>Ls*nR^tSf=rVXQW;dF zGZ3^u%AC?g^N@-~(MwbXnG-BR-MwP-%w1Dg6@3du-KFH+dZ}_}SWbwO;Zi49DkKuB z@4v(E4!OKETazIJ-6kI~-d&`=s*3Eh+@WeOZZ2;MDBN8#TS~J?`o5QYZ+p?n-tTZ( zif_%48r)7k;6&)v97RWlNA5{WUnRrqce@2`XfWM{5hDjJ$9rbRYrZs(RwDT4ja4!7 z?nqG&@k@=9+5GTG#kYUPSdGNtj+S*TztnhZD;Y-Hi(oZ})^dnl5JenrVYe%@;Q8Ia zFKzjA(AltTX>ez1aL&RF4b2qzd2CQn8QKf>_bmCuZ1OZGkq>oH=zMX9;byc}jx=th zdxJCOI<3?peMb;NQN#cbw!i$^o$E71hhN08#c^g?;KkzYN`+Et1cWN5)%?EWf8GIi z=uvn;DS|kL0&)Kx7lFpfz>AhvbNzL&fkiYf1{N2Am}{Oth8IOlIb{qq*Qsq$>Yc%{ zxEhu_5Ww*^UvmhobL?}umW?-;T1(eb2_89TH;36K5yZc*GWYo|ux(B64>0AL>Zjkb zQe#Y~v~N1>?5S@FeXNLksV-1F4cNue9czl!{aj{Zp1_jU+OMk=x}pR~*1Pt!rd@%O zsxhsU=h*y<{8mMGDKQvVl@(CNSH`Gzo{V^8yc;h z*AX?e+78jgL0@+0yH3aFm`A4XFYSa@xwh-jnOo4}CdKRfE5P-7xh?@3Tj!$XM}mgY zbo)Y@9UYfNCh4IVEpc!l+D^gy(%Y!_YCL<1R-Y(xT|0d}nNfV#BxRHGH+b@R!gq7O z#4%FqmlZE%mptK;A)}FA`nhU6f1U3RL9QWKw6ziXTZ_>EblZ35W1#owRUS|eY7!f0 zTky!iwre^7w+pR+P3D*C6I~CkO{n^1``i1{?zUuN6x)cpd#?w~+{OQD=NtCziS8bf z@9tq~?;e)@ZezI1`FhLnwr05d(#&{YrW)UmVy>km~0l|8sCP&#w*^WH;-5p-oX9$S7*7`(MtNED+)LXI&-+ z{M20EW4xXngf8lohYp|pd^b(Zq`e6ADyU0HcFN(dJLwMTn&WpWe5P=;Kl2091ri*3 zw}L9ZZwIDn@~P|w(*J$&4Pzs$K;A;*_qHi{VG@6e6P3Quny^w5Lz&kxKxl>{h}jM) z3vq&0AQo+kTPRyOkO>uKSAy9KH(Vm>TS_PQZo34O# z7r1?-2s)2h*a>YWyApjJw*mCprc9>ffBSl1*{!u$Ol-go=}Bh8BbYWETm{aG2%kh#ojQ zrfcnnF@_dv2i#!Ni5tKJxA|?Jp&isdVpeGu$YzR-naGa?dag{_pE^}7Bp^r(L0zOW zsR7}8R-_nqrKSBMzW?y!0cHk5Ia;S(2ehkVCf|^^fyQC%C%zYKq9sAikdBjy2%S zU732b)>A>Q|f?H1W6g~NMmMVxur^0855g;1dU^1w^V4ZQ`5Yu=y|n_Bxo^A z%(HETY!SriaLLB87zrXVXQKc^EJFm5s%&C@YiejOR+Ho|IfMC+t}f{ zepY*w1=H+ogeAfW*1kTG^k4Wkp1JX!@d^X|aJ;Y%o-`);<85gooSI_#97%1r`P)2u zTQaVQd)$DK_$3Jy8t!lzudnmF<-Xb?#xlHNlQ8$h+hiL$>-#`*6x%QuKzd`oonHzi z4gJ&$U%uY5=sgHxxjSE;)E0^b;qq*lj)*QNG{y~3^Nq@(y^;^N;^9F9r_}W8|X@)1rbzR+S>(XJdQA5 zkHTtvZLqFMVZ*Yw_G#(iywB2t!m(q{nQS8LtUS*Mt#LfUt)$fnl*$^nFm!+FG@*i7 z%D(!_MbMbOyxEd4x5SN87N#1E=& z3Lr+Y38y`bw2iP6HAzWZS`C#_0*M8tCc@yE_2|bM-@;%mjQyC@+cmUPFWE3LHQq5O zHgYC!qE*+JtcwOWy@$Tm>&v<-Zikq*l1%DKN7+I{c4FkHz~id36lt4_I+I$rTTr1p z@MCz*>Nviw$fRUbjd-yJC@MPz)khqSG1vO%)R7K5^sa})Cp~QnvIklkq#bEon$c|d z5rFEDH5s7Cu1E$@FY0C_AqOwCRlZ*uJq<=}VEHhQk--zb#Uj-E2Oii&~X_%o!r8m4;_xP z4)U`B!--)41$;t!$D1IMvXsh(Q3tG!1(b8f8eML@Dd85zu8I4P%de-GQKvLC+1#X1^e-(JyF48Nm9$%>2eBjSNksH8er3m@<^0OQ z44P4X_3Pgk|4ovtNMGf**>t}j590oTeQ|J=<(NSIKxy?kg6Hb~!EK-o<%?&}=-+Q% zJg0yAhy2{{51tKP9DX%;`r;Ylj|a7%^>vPuE;7*K$g#{I}F5t8D=ps@720;kfnPEW0nQ*rVt~4oJPl|?davv z>G5d~VElA^_Ph5#o<%<$ef)TIa(4XYG zGE$}k;ViKKD19tc&Yefb^htL>vja-Tp1`=&q+jQ^X+g35R%x-wN)_O3IRz+lpb&hs zS=I*?x8US|t1L)V;x5e0i;)>p4kLjvDFm5`I?Ba+y_#Zr6_z<#boV(P&(^>QrsFXV zBQL5bxhnJd8g*^-+*E9u-DcS5rthl)n1->obZNEJYnU|k%NlaA9PP#}Gnb#8d0y%a zXY+#X0Vpiu7-OlF`~nukV!hIDC}!+xT}vvnv{$Mkvf@gy?$`7D3QkLk&?3@%Axn@K zW7REDh$U$XCW)ahDg1@LA4E()Eu_ySex6G`r2cfP7<@_bW<3BgE+uI z+uY|>KEGe)i!8wapa4NLDozEhauq|89!nC(nmt=i#|{V?q-cf7`9=f#Vsby3tBC%M zbhtVL!s^?@KwFee)YgBgW7 zbr5JuLhx6nLOe`Cpb2g}D;0t^j2M>0=26rZIXbCW7Ma~uOiSpL;+B3Qy0I4=j9$v* zE>Wa#gf5h=CE8QsscTMKOKd2jE3EjaS}#=wO%7F?v5gL<6$dhC)0p2OMdaM#z}V+i zYLnj_O@Uch9Nfy>jYS%ryg%cOkyBJ3BFbJW!kChLnNE!e)K7FWyhbYV-jLi7xflhB z2OlgNn1tLwxfa^A2V*o9D~XAl)}fIe zxxYlbDW0^oA|gxEvF)5f^?zk64TwA1lRJJSes`r~l=8IV&k+%9W-o~8LduaX%WTRC zCA_9D*;(p$aAe$~d&yKFHxy>LJu0FMoY@8yyUZ?@ z8J|EAC5D9aF1;JO$G?1fLP;(KN@qPaa9IIr9Rv!G3Hc56>0+KJlTI#zd1s9CIFHNq9ncurxg=V__Ia7JJM**krQa4LDJ^9$QA! zeU{s*Rz4#42_IxU5US$eaETncn)StET8l`i8as+a{L-O9v6yyFVZ1fSzJ>iHY9A!9 z%*Km$J;pwR_pbV6F<<78-^`g-4-P7AFrYI0lb4PahB3Oh{ld}Q{tp|CjhGLd`xN~w zh`{pUrY&Zc=!Ca$xXZ}#a~IpM7s0Z7;(TRB)G32&&)|~4-|htduovBK1An-42y@p~ zau%0=T?2R7;m#S{sDpl_(QpuTsB8g(4)q*6B*}@u>hCEojf%#Mnh|QjjYX#;p+?9* zS#KHGhQ0a)%gZ26bZ_lwE@n$BE81UCrSl!Adf!QcHyk{Y#IW^ktZsm92qa6 z51i4AElAaJYdtK;@2ITE^az^|BwaX$g9Obbwas^~dmzV3y0t~JTZT808Z<7*RxPGG+fMuccH+J6@GpOvKKWm5rwNU` zMQAJzGqF(1WZdgZ$0XN?N4F-pY-#l0Me}^v)pAWw3 zA3lBl&EVJjzhCeFe!c(u|L6V>onv0U`FMsyI{oAN=yf!R2gIQA6CnA&@se+n&*$u| zQ|ed$S;BYq8$)^N-?=FPeDpM5a7!_%MagZGKXzUMrF@kyXBkXC{{8_A^gJ^;ezq*t zc#*7_&9GYVu4=)#Al0q-myfzI#Z~eP)8L7dz>q8yXQ@g{SotbrCAlxZL>1kcdIV19 z?Rq{pk6k_XGW*eajOXA902B0^8or$VGIu}Olz>DP#!ka zCc(r7sWQnj^MO`Osul|Z=;Du2{+H#Qj#!EmzG4&brAWSuGTi>ai!i)dulQGx?6Yb8 z^%^>l?i7U8RxK{dgt1$X97hI=+*PU@)&f%hO}@8q8NBG+Tp}xJ=Kv0kone=E1L&fz#pqN0u`@pVRy(VOF1uuhEI$em3rdQ7IKD;cYE^L z&OA_EHSt~7tefP!Vbth2sAyDP?xiICIhd^|u z8bB5nr1QyMAka;B1mfAHbr$c6-q{znpdpApF|5r&G^)>wU)-G#Q^S2NsGgo6 zGaysa=26Ux^S0VyZ5`0SVR=k=kI79^;4Z#{v5oWD3{MB~nVdi! z6k4YPg~!?o@w*iR<1t6%KR$TedmKIPqJkXb=K!8=v-D2rFef-(x3;6R3{U26U9AUn z0D$#M!&aCZ)?RiK>9+4floikq2#@v2wmw>oYQuyWmu1+qf&q&5BOA~GvG>=+LEkyD zb9Sw0d4QPa^L93E+fWxZt~s&$RurodP$vq9M>U;q0e0Gr;NcNd7biUPnov5(ojd^WcLmq|{Fmiq$4V18mnI@dB*81Xhg=?uXo6*R4Ck^w&Pv>oi zm`IqGZ0MruJuR1T6%)M6IP-}{yRBwM3#tTm$hp%Kw?%JRZmo?G2b@msKlE;Q;C4Wz z9H#&(jCU10hHp7Oo3N64iqYy3mb11pv>CZHJRky;jmY|%zdAa3dYSi7=tWoKz`{ zly!633>+0su)c3GeVkUDpy3CZRZ39TYan27bg@?Q3gh3TWm?>()7TFIg3ecJl^QXz z6*Vd}&OPC)<1CFQu&XcJr8k+wIMS)NAxu*NeN=rdyAZAS9G#39f1nd+g?Nnp!LU`c z7ntYQ*MQFWj_+wZ?N=tiCgN=1p;A)l~KQNW7*bCV$2_p zNV!04`aCGfywROvDdu02Wv{W}ie-0M@DKyOk^SDx(Qe_FH*(9{cx87dZ{?FWa>+hb z*u)`k<&VFJJGQ*Bniq#JZsLk};)yqL#LfKhW^Opl3%7P9dW|G+M}oJN+&zfc?M&)+ zB6K^Dxke%vpmF35moINI)`EVEt4r%fb!D{Nw>M0vzIsslB+|E4MTQIzR#w@5ROn(s zJa>SlVo1!En$W(>(JdljG8-bWm!uVbi6%3brPlIF&ElZ1|5`dejjPDj81rJ)Xyhom z!lQC^h-AlYcq7e8x_1DrmI&b}fpnwqYUmo;%_$p`kLUfNU+bc!@*RFM3QMgERiu{Q zVVqGY;(fw+ULn%Wfqc};2%|<`R}AIemn}*9<+f#b$8vqO5wY9_p$RY*$R^Cxwq&@5 z3KK-A>x53LQ+5NbYmwSa1@UVW9dxC774(Yn7L;RyEg6 zZqW;S1nUA$&wfiwJ+Mab%yQ?*8&Pu)d{O2CVFgd~0NB9N6V*F)Wb|rK(1{B1Sb*fJ z&jP$-kSCMdL@(L}uB5!J+D%hjR~@u$B+UJ%Ak|FrH99Qdt~h`7?)0J;ou9t>a6!v} zez9R8Pz>%CWvGK*qH&-@=m8^j3bh|S)h+&%k{nOU8ZDrU{O%W`e6S`r>*Xh%zDwB@ ztyko^zQ(j;pH+k0LK5)-MF)mGuT&`4@#}nY3X_TI6Ki=wz82GJjr<4YJ-eU7(KAGY zAaV{+!SVM^co))lDNt>ghc+vS(8-Xx(pKWPzMm4rmY0cPko)WldxOWF^Xa#twQOQ$ zOQlnNy-PqX8CV!AeHU0FB5J3GTrbpeFFy?()i~`?vm@DfPr)0h$jMz{6fOt*#I!rw zH0&t4D$?YW!?9`zBMgz=(X5D_c)>Y$yn9fJVkE)pp~Qqw)I{O;cH zxiAe{+qB7zZdBCKCHnH9G>4?z)()){u$^OeGi<+8NNBcnO7tqTQR6jMW3?7+-^$fI ztHugx-pwu_Na$_dYP?53=X}<*R4$mWa9M)$S_>B~sjzpai7mjq>9dQXx9$3WYyMHc zGu_rE0|0+?fotdbJh>YvF#e5`Klswgx3@U*65R56T0~KV1ttkf=)t+cpXq`egNkzc zq9Uc}RHnlfz7OrG zqlcrN2M#<<%w0G@&fPP3*s9rc)V7}NU*O}$&Bt-~{9cTFR-magHn~%A(TPy`pQT** z5_T7-%DspJD99&hMuyo5a-1S)xR1zY8UKS9iO5Gpj#McJs>Q4zIR+5c?qA5Fuv5kj zFTqIJ>s#eMpF&8#e1hf_LF>o>#q$WJ;A2U6hLevzhwd10U;$3gaH^V;F`Pe7X;ie`7NZA)Sc_P3!cdmw| zMi9*_dCMx^fU6p&^m)s=9(d(i&$R*uZ=;~;o1-ilHQK;9NiU;{Hty!o_8{dzJ%I2Zt68lzM@%Zjqn2|}DqN>)w3Z+nKSc@;*4b0W+T+)wcK zLzYg`NSgXno)nOpTP&nj=~CTdMr}Eqb}TLD5hstRMil)~sS+^*9fk+um{(==`}}4Z zoi0$cy~>N_f2iWGc)qy8u1DCHE`5l?xG@_q$EqF!7Inb(ng)QE_Uz6g3Ah z8`-1`PRQ>QSk071919jX7R1Qo>rw!nC)enEp)yj!Vw+F>c_KKbmz1;zDr453p4=FC ze|D2Yb8V{7<|Jo!2bAlpeLAZ1nL#QpZdD#Sir6Q<2Is1_!qdX?OC(AV6kFLQ30j*@ z;Cr1y&hAa?e?CjH0-1I8HGhO?ppDE4#;G|r0zsVMf!koZ5KtlqEEm*u0LIzjQ`(57 zVIoI16xNCn>)~6k7EwbBb04!_?+>ij0R6cYi)8vMq%u=;*wGC*rW+XZ+JYGQSg_23 zx(@0XfIZz~c_ZF1R2`{%tJphP7moJCC`@(mL<*;!nVb7n22aX%l-eUoyrNn6Y>>bcD(jQS2TuKQ!mYal=rcxZeiLcex zd!0{)N~^hqRn5^6*qI5lSRvyvWvCqGN#uaQTGQ{G;_9el8MMooMSK^aUB^;bcl1X)0 zzuMO7UTqRU-fS=_sMSp=bX`&!Dzis<3)hQ1ikFVj9Mc)Qgt4!y*1C{e>Rzb|>Ous@ zEl6Wh^j##Y>|r$^+C`amW0znWyDvvst>NK9v$q^p$I=qncWm^xKT7eQUqm2n(T z-46D&E|;d6Dm`;5@hM3f45uRY`U=j2G2_|@!u(D;J*W?)(fk5!<^!coG?}p1J#IBk zfgB4eS>ZcuEm#VpTq0V8-p!Y$Twxh25W++a9Lj#=Iy5Nkp)%NSqgMxl@*;*IjP`s4 z=&^`75#gTr#dqTC(1a}%b4dR>ALK*|&lO9hT`G%Y->c{bza{V24h9FnA(79kyoMLv zLef2Gqd!nY(GX#vFy8|1l-vYe7E?^kJ)C>I{?r~wPaOHi^Xm2PgN22KvfN+qZgNp)(ao3eJuGTYM# z*C7V72?q+1J+I7p94y7Tg_Sg^1OGHe>}pTuDaby+qP}nwkx*n zmQQUED-vGgZOEfmeXEdu-{}xzm(~*c<-ut(6-`Tr+*#d$* z=-MK*Tlty1Dk6Hp6pEAyPanxEHQ}lYHYvsEgxlu9D}R)7yc+%F72vWdE@}$Vc85d1 zG0m9&NjUVVquMssi|-w5iNUtAy$pf;%WQ;Wk175c6NW|p_rq-8QAh~=LUpTi-R3Hv zGpU%kg|%&h<!Ackp+)_|Wh#Dick5c-V1xf`$szn}8sH5>3Dyw7Jia1-vhFHCTN_rs>X4#>iE&_K zzsn4$_ocTB|Cv!}~sR-%4|4fVLHU=>ItgHp1~? zz77}Zo{VkjY;zw6`9KIH|FAJue1yv06qhr3z;aFBvjYkU@w-y)6X)(0XYW>L{=9$m zi)fgkf8EaR8W@oA?KMW`G@kh6Fkxt&r)@owAil7-5??u|`D^U@maH(6q|$2< z<}o1F+qR}G1ie3_g;;d7{gDYY1h(i{Cjqx}>wbm24%ef)lzS5$Be_x8z`QTcZ_n(@~E9~ z?;lgZa|EKL?+xT)Pkj>dV(-4;Q)5&ZGQLvgqV-{*{4>ToLg?6VbmDf^7~+=f)ngFz zsuAtk2uv}uF__E{sTHzPHq1q>L*K)#M!o(Sjw(dSx!Z|F7-Gpj)2PZjyJZh&4SL@| zqH4@>^Hl#eqFF17?KP?=yM9PZpn-)$!gae#@A>jrlO=9Dy_3&v=@V)&!TxveZaY%Y#|Psd{J^SP16F6qp1(%)Nto5!c~NucW?_FsFQ=o zE$vO08twF54)njhC4M*80WP}29<1&?CdW(sTBJ^O(u8(sLvFO3wY|QPa@*F|w%H~{ zP2PPzZ!+`zx#kWLYhQKmKJmPCuPuN68U4>}R=;cDJRTh7tc}&rhOa&l*0Lv_|QLuE;?N&)vhCmU2=qAjV zpQSY<0tKzvB)YAX7n;IY#b}-$0rKWst)7rqC^{lb2OiI2Fp8_Z3^d7mEFmZCAGY7dvOw z%om`@rK@<0us2196|XOPL>E=7aDYV-u{G_Vb^O|**Rulff+63LP;Oah>UPw?66l{1 z58Usl5hrI%98}6OC_#3T^(cL>Y}h$-5M;utr&1 zDr)5rQpeDBi^0V5*!E}~H`h0kIxO`E06ALWzg@LT*C-y0a%9*I;p>221YQ zo={)o#jm&Atmx%22C`0FesZ@0WhXuc?A>`S@>wqq(Pq9-glVEAj6TRpHpqe<=&b6e^H=;zAMjVcRqS^BgS&c+Dbrp(*1* z0+jQ>*Nzb^#nrru_NS2?-9<}78Z*HQfYZweT$@`FTm{(>?hvgS!*g%a8wA~=CES&s z!NqxXl+En3uLSr&PvM}w2mChnze+%o-ET&xCziYvt^Sb~{CVvwRNmJf8<~=N?7N&g zVzbdI(o0VF47T*2-+h@T>ltfY-umi3(o>Nj1NIEl=`g0#%dNd{A$a~L*=wi#9?$HI z1fB~mZgQag%@Lp{Q;$?1sb#cek12|qnc~_a71c+0t}?S{bc%cfG1q$AIj=YRFPYKU zdloX=8E9*Etb^zd-nRf?8a|IQZ2Hhv_Hg)74U-ysj&rRB`Dr~p4OBGde<%=Ju=loU zmh+%(a6qiplEH(@f}&8!^7O|NAeNA*SH*N5{6E2#qx%92^a^a~$_xkxRME8X#% z+jS12^Rks|>%j49u3&|HgQQsWWcGBmHG%3-vHdDrlIdvcI*eT1>vjXHqC;`ksgG4k z>F_~G1+R=nl*;y0EKHglu)?3US{L!Z1NHtr`xKcSEqdmgM^}EpASR60lVbh}2P9v0 z=?mokm8_u%(uWyiX+0U%Ng6k5*UNwZR7XJ7Ge@1Q9p9{$$^%a#{VacQw=;8r%$`7* z#MRN=25hKk*c7Yb?*+;YufvBQ`pumo)Ces#;x5V`9`br@&2QD69&I3{K>N?b=l9`- zUqgdu>q;P?U4+~>2z;Z=Q+=d?_P36S=|;q3i7+Muk9gi(IFe(uX|U7yz#8odAEj9B zw;g-o@cJFBj$W2mSuF6aHji?LTGT-HWe<6Kq74-Gv_4LU|w=oEBOH;h~mVz!Qh)_Oj&Q)HAEx5shHf|PT3}aEJ;KqyD2UC1AOnins-2;_w zgicMn$%4y`nYqmrP~EbgtBFXjpd! zdyCb^vl0ErMx4I2P%POYx3#kqowHbETF>-W>}0(O1%219nRn{}reusKC#l!La@l)m zA|UpAJytPIT)DrG3I2($A3{BPNL4p7m3wYy^w0G3lcr1IF_u)@c6Qtiz8HC{t$41muC@u%rmdi^XDM<;*yxCuTRml-UWEzi z+NY8gbAlCcaF0(_r|~E`Xg0AP%1WwfFAV}GH*uF7&=&3FnIxsjuhh_z15r$z;)t@< zA!G|0Oshc2UNJEyFyCc(H!0?x4UK%x)3*{;q)`ryaj^m+W3eN!bGK3Afvk!^twmGe zx4gX4bW%gGY)=w17yGrLn#!f8`qhX=;%O~i2P_NmzoZRF1pjLo&T{>XS|Q zsTi3__A(21N|+;Rd--X0gvKcU7W>>+#$2U~T6!h8ep+if8RC(RI1?{`Ju{=@VDxCTNPEf zML4?~38I5U2RF~ok-P86ypgr}7A+N`)RX}jbUG;P^|&k5Q8?mikipvnEK3|SbW2F_ z107}GgqP3XP=seM9;Dg=6=j40I7UI^RcT3YgmMU7TN9^1UNcH|y1#+*nq+SvrHdKc&$UX50LgFy%0ukk}EwUbolG zi!-HFBT9oX!<(O-govjQ19lL>t*RAH`&8N9s&Cp;1_&Q#Wn(!SXEO}@S2DFm74V?Cx z69@i^ay9ob(4Z26I8GU(f+i$0Le0*kdcgaB zoTlPD6MPpgiMP!~=~CkH4cU=Yw9ih`NeZC1HL%>;i z+@Ar83OE}qQUx()ne=9-(Jf?8Smpe3GLo)DD6~PeZTw?u^rqLtN zY1O6rX7GY7xG4y_iE*5YT6OQstlzAFydBuOqT^&LI+&iPk(HDxZGO$;~b)Jx;hT z7Q!Aky5z!wCZ7vzmG5NbN-_U#x#NMI>2X*Qk@v{fLT=}Jq{P2*{IH=(aB<8sD;97 zI@3uzCi%w)&7XI8n#WX zv{7$1x2NU>%nvEIoh&GYcmpu(EBc~2?!#o$k}n!s_hj@RY{qX+pT!Xm0%I>hoH0`& zi%z6Nr{++01m5Dt5yDVkm2TQdk#0DirU2R&ox7oC91KU8IePYIZYxRICYAD{A$0j_0%b zqz@cub(PSGIU^=dn0lOynnq(Lh0!ZlopLnMl{gSqR%Xc-WkqWw;}}Sg)XW4NeiW{p zg+%$&$t4w<(jA{52wmr2s+a<*El&I3z%r6ozvNi;%`-axJf+iIHrk&}P!(}e=_FWb zgjz3yVC%#cPn-&sBG3sjSE|h)r)#8t3LYSr8B$@^;fsN&{=65*q6@-0o3J9SOU{GvOe>8+OB7w19I& z2_vDfk-`DOkwz=TPfh;(-Ro%0Yn2=o?$fe!oAb=N$Aey_3HXkLmaf>kYe~OJWxlP=rj7CQ zhlAr3gY+qDH?#v}40c;e2ruZX$7SXIxpAzi;y`cCy25=2BXtcSas90Wt(_ksGBM>) ztebK+h@IGN5dG3p49}f!`B1EaKqh&a1PmgNV2J6CfX&59ccH}dB;>A_G+Lvx1DV3` zW~Z|_0X2uzap=LH690lL!-xWuI%M8}BeaM$TNUNytjV-iD{DVy+YuhnBYzkn!wPLE zVy%*!94`iU#9~x9cFSCF2sms7bokH9bK{%+*5i{6hd?AoREPn*G%jHpsWfa5Of_a; z6pB`nw^jX-mY+_+&e!W6%RVtV$MXbd4akTOPfS*0E z6+KC10(epG%w1TK?DHMO!Oy~-fIRy0J!XdWQ03JxqHO4)a+T<rbj(KB(wRhLZo?7(sYRx_nI+Z)pZA)?sv4p8w3R$p ze9+6#{^AFE01DpxlUnAu_Sm_SMU%R7nsOZ7pO6}BXW+FKRt&TtmMiWC&58jnEqbA4 z8>J;_cW8HsYq4g249czsJ}{W1bkGyJ9DCQM9#N)(=hZm0ob8I7=zzTUjVY*A#7+`R6a3O`KoQ z<$HXjk0&co1oMhqTGA~pgRxc9g|TaH4G+zu@{b*>d7mZ)v``8RK1+s)%kU(wJol}% zFsjUJ?)#$(@dxYHN>jHBL(^8@V|!bE%FDf;M$cP!3*Ay@otkBrQ1tvmBJ%D6_^(LN2Ky2i3f-Y-(m1>PrQthIR?}}Rl0lXL3I*&;Vfu^hO$khiKG)wvY zvNrC+0%%cMT@7=me!~u%QG2b$Ue*BCQ?0lRi=gR@6d6(;5>D+jdogNtC*bzfbH_?| z#tz`tfNmLorPK`wNb#>Qd4io56(Rm>bmc6>C}61g=MsxI-b9zDG^A~1QmItfV9B)W z884x6(@QR+01oiHQvXZv)iK#j{DbEAOxfqR7t$-@bqwf;CLYiX?i`8{U6XZB*Do*J zbH<;bDC@^-4(8m&-d|$DS4hPIfg8bpHup~>%>&H?kC^^mzr5a_Mh-NU{}luUvfmC; zeZbVB|9|~5*cjiR$zpf*t$%ILR6c3-x7)OV#erJ0M_1B{uFw z6bS8gmA~c~`4a&l(^AQ$9&@#He&-TTmf$465A}4E0^IBE+toU$Dl3NGJXO`J4+aKy zcJP(FuI+L$*C1UPv(v$Ud`1Mp5o3M6$04qja#xsmL||KiZH7T;ojgSeLc1 z*MhqaL^umF2g+{$X+yfnfRSq}80pz8)<56`brk%k3fXi_#yRy>lkA!##T!#Guld|yV(Eo@XYi57njFrv{vmUIQ^ow63{Q5t^a^nwJ4qn!y zv4hEF{n9yCd!|iwcyjE~od4~r3KAM_-N$6Ev?;FN$uMTT&6SknLzyP6-&r5#P5U?|({Hbuj$T`AWXh`nLpB0-6G%}q zCblA8QEgAV$^HHKuj03w2ft#p?`@qk9Yz(@vf%k{5ltOH|F#naTLhTGb(|wrxe$UIN&WB%dC6Bf>Sh!pq~cz2U@!uG#xo zW#u@2$9CkQU_ZR8(``%O6sZ)XRoPO%ZbD62A1y-Dv|V69)4T2v zsvW-$U56|W_t(b-MJac1tAiYKX?BKO_0RQAUL9yPiUCtBUw)VkrV``&-;7~-nj!Kx z)!q=Mt|uK@{>?fgxIDj)8`ee~cNXL>S2h1fcblB#@T?o3Dw;F{mz!w@6I9Xy`bQbn z;+}&RH5j#C3JPg;pGkPp)?(C{0+mXZXk+$) zb=R@bSKB}x{dRPTR%`grdXC3nwFvCo()SsEz70CXhL}-Y0~D}8Hbsm?B!`G?*CZt; z7S5IRHll}~yKW7+Kk=M8^?ryy40^SY^ zt9mD?c_EjPC~M|5s2L2xQ!7}zG0oI52YTuciEr&V5%#jlUf467X75~zmA=l{W2eMa5_qoNk?3uyWcRco1LcL_GV9Wt z@J3;KNx9fui=0Ioz<2IUgFuZxSFsZPWOU6&f}__pAX$cq(f9Vo}*k^AqY-WNKp znLfJ<-n;1A4rYVu#ii|spb($*PK!V!%}s?^R%V)flu~ap4w!n$q8tc>_uy(d4YtQ9 zmlPBR?Q7pwdotpJ)iPx03`YNjioAt&?V=J4sC+Ju>ht#O`F5&Ptph3Ij*LX9g?RV?pVu&$5Nm6!KEBV$F6;*@p zMOTPnmGy;R z`Yv;Jg=Qjz(pawtQw<3~7FwfY`xxk?7^8aiP{Svs9}k?u2ZRqvlWRW;aL-lf-Bd-PImZq2j+b4{R#ePaDtE%s3jRUKM2eFUr&2(kwi9uo z>;&BFXW;QjtM2>&R6v$MZ_P{)1L@xT$PP3!`}SHeQ*@2vY5PCzQr~98U^!mz-QlHQ zsN|Hcz;4xqsxb1V2m2+%_xku$uSP8342m|@4>Eyjq1Otbyc&O4H_6NJ49&e&{-)uD z7D*0w_DugNUJ^X)JLMhI_LFCM{zT`7*vM~*@xWwsXc)5wz*%zX@r$xM%XVNmw-%p& zIbMxXkh#2jNik~$9{qqls0!cRRkYuBrVJBf>KZdkV?$$_%gtlIeiQkcV3oj>%3!w{ zg@&^C9H=|T+1VkMg9br7$Zo9{4FJ__d03j{6|_k5z*K&HGN$=a9SUnwjHp`*fqp8@k7~5u@VbdFO@&r8i(qX@0+vs;W+@0xKC_QGeF=*asob;X813P5 zygEL_q%U}F&i{+dU4O_h+H!W}$3eR!695tL;c;C!4{wE`Lmbd97uY!lnc$mHJfzp>2(y6QYeb2=61<;&;ueV7$4a0-_L8r4nr9-b8#kS|9 zj40?Br2?cKHk>kO2q^y%c{#q}T|-nz|FiLjjn*P@h{Ni+t&J9YQuI}|4BBtPtL1IO4%)YG-5A*~U~D_}xAt%QxW+vknCCY*#GS z_Fj4Ktkm??8FP_r?Ktn+AW~#ptMQWTJxNev>$O7yPOp_jX(C!a?`aD*2d~?GzK0al zvB@y&iExIh-6a=jO=jRE`5NE_tM-*?E5MYZe?%@Xf2S2YvvK!we7;xS_8xgwd!RQU|9D-pCU8|XHcHcBfqhmC`(ghgMil*%l(tw?k}_e=2tWkb&lJEC zghcnT?Go;hZ8pLA?wFnqvH_&6_&}W5JG6wL$^6Hbs)Z7tvTW-R#HRGHSisKX%Z+OB z@R+9obC)7T=(GpVw%yI~{a>*3B^J%xIt!y6FH@|BY?53JmR%)>PB_Vb?!HU4qx*>h z>x(VV2e+t`TJocnE#Ml0KZ$n79?_!_3tdtG`UyaLl*jeX&>GjgNJw+$gAf&uERPil zU5D%#UxzH=!5Ere2T6jI-V>m)>hbcUNN1n>Af(b}!)`2IUEF@vb|KiKsg!b49$JNH z%rMj|r+Czs={y=faTJ;#3%uYkvn3<;o|-zM&67#}f;zGTEdGu(^6H9AcHr>Q){1es z5<(8^CFA&l{uPy79-1TW#7lQX3=psNB;qJ+XOBW5roKu>qhWl&`P;M%gF>BJ9m{h6 z+Cau*0D@1DMjOz*kSupCBI$487aAptEYw9~7bKRk>uX^8=b=aN$yUEbhhvs=>O&{4 zFcb_O;B7}TzpiN2Lx{pFww~jLEfKf;#H}oACY#LcvBeVFs%K~~M&~J9hPQxDB@t4| z|0I$RtCrb;rBC@}rtiiy--S^*L&qLqZ&Ilro_U9gwi`@m)Ict(2ufe+8010avJ|pa zC7%~HiXDwV3~*5cF1P$$T1ucrN(I;HiU#9|dL1+euBU`lXB7Oz^Z%fkR79BjjX-6J z1uzx)DVAyfOEa^RIqF<@CUs$MKwk<>hDZG3I0`#xXAOIKd(XwsezqX==2;32>2LZ+ zcW^|dugSq#owsFQLD6gkeX4YaTlo4xQgVyEbP-3E@cz;H54fy#LcsNmTIV+>T7zxf_5~G{WLMyhnUmt!RlS!Oom-#MF*D=axwTqq z*xANm2t_WNzFgOf^Eq4*C}N%M{=+2tFw4g}c&pS^z!Tk-i163(mm|Aj(D+@Q%XXOc z{AQWF!!k@!Ji|f;WGOe$F4ojVBEBOu9mpLDae?PIaRWY@4H5EU$*t zBZK&xP%wCMD^7jktU8mJ9t|dsMyVO}tS!A>OG({S9t8)*Q`sCt^ln>Dv;D8uSvTFB z+u5%La0k1~$76@W{x?pAzx%S3do)5P-$^HrEs;e~u%{gP>G+HVP=wxY?V--AZ$|C*3ri5qZy7q+*D*Bb<(&i@x& zGDwO*=2M(9;1QkOV$(gHcPZIlP5cEHaM<=E8SgtE8Mg$_mQg(}(T|?Ok`DS)dy{=> z!T+4m(EaSUj2*$rx%xe<;M8JkjyXvX@J84pl^ZIP5D5#I=gqvdgs0TWI!M(VE;0i< zlGx&HZcSN-m-J?7|+ z{p#?l?8Gx<;(XVLy1@QqV|wdbTQR8w48eOW^l666RNApoN#094N;4Pnm6ts`YCHUv z^8T;S6hNCkQMBRcBWF2wC1$aZ5FU-K{FK6{r)67)iV>3v&>aiD4szdc-~=8)o}TA1 zQ*_H+uD4GSU8({VybOL@d2#xzkSe``{mr~moZ_6oZji=^4f+G78#@OvsE)p%MGbZb z2Tw$y629(LH0_*i4MQ8&3}7M1M}$76s#ne_I|a6X+L6WP$LEyy?5b|_Jnj8~n?vBs zT;`1Eqp{sL zZ~e%FN7OCzWuu$+NB3!k4CY&PIVVpjY#>m?LQ@C`!0Qp8yICM5rx5a3=*jpL@kr4! z8H<6{fB9%3@3sl6gZ=t_R|Oh(aOyK~b(vJ7XPSo9km3Ugw;(d>P+5b#o14WpGTqQL zN_t$|uP}y*M)@P+;Ll=L=7C1erq4D@D@dT=?J4(>0T@nh+!W9mGUd+T@tlNu^##sp zgmQdY!$80UJ1REeCh=KyrzE4e+x0*Hfac?6wgq(qx__eRze*LAhx>D{x4sVZ2HJmn z_X)nuG?(YXLE7L`Tf!QAa|ti*NFm{+EnqlPkFWhm4xKe5QhcJp-RW1){i< zhKg3gNbksYuxO)=zYV;}81)K8Q|6^&D5jeQWNc?7?XokQR8PCT#cK-`oO#mdIe>mh z|BjRJDxDl^6*0v10G|tL&r=NyHRqrXLH+Xyrs1knrq?juNCe?+m;rLm*D2wMa`FTf zV=wT07PX&gKoIj4%iqa7&v#1^ak{Anm`N(SL7-aG{G{nk|G%xMHcReAbps1cr6W{y z?KZ1+_-YyITmGQ`x zX00eb7)pM|n1);1rET4j3aG|^q_whOtnT6xPVg3&k6;d_^!MyTL1LRuM zI42_R`qV#8DD99LL+~r0kKSsZyM*^#srQWvsYtuQk1_gYGd8_b)+u80ND3nr_5V!v zw3N?R8UWl3sAK6c`9L1iWdYOd1gwoGJ?5UBGBo`jrhe;vWN-E7R>NR;bb} z8+-A$Et?&aYHiFt(y|BBL$NC1lh^ctXAz6Ok>qgNT#A<0$D<1!EoC_%mN#it zg$!aw+rihxlTY816s?#4gGHmK9miy?%TD!>Y`i*<=m%RgS!)+GYYa>;)u_K2-Q_h$ zo%!5(*Oy)`*=E!(vQ#HZK(D&dP4ilZrsN0iN)p1QbQa!&aZ4a=-sVv@4H~;w*uY)n zy3E4v8d{C0pEku?BD)a`Lr!R@8bOHVmiqzMl6C*W1_=!M-|e3+g_I=M5NK#)FH$fS~7$Ek&mA-R(V7e-aAGbTwkUTJJ#Y2zIfjY)V&ZqsZ($Z~T^IF}?| zOQ94WnBm;~QZ!j^Tgo1nS)8%ro2y}xM;Iva`b^9QBSx^y-ytt+^bzKFlCFJ-67Bor zZ9|OOl`B+9PQPjFlP?C32-pilGGQj4d{X4gFH{mZMw!Y!1T3aSw97Sorf=y$MAhI8 zRb1lY>9$$(cTM*N1>*8aVvLSPlvYs**|r>EWn+wpS)^7SafI;=B9~=6sc(tU``~%~ zbI`D9D4(X=C?Ed>=DHzl(^%y@t*6ZBuV)n_vEgZ8yV6&137fuH6UN--H3fgPXOe#< zYV)p4s;Bc+Xlmtmxh*_yMg01+Xx*Y)Ae^WYea+e)X>b}y?YZjF8OM*g)PXtic8(&? zwMTKz$Ob77O1Si2UWy7Xt(#68tKn3NBG%cEK83~1tWc(!G9_aaIZ8kEA6#kz#%~D} z(dv*>w?5fG@Ikcf%oG2N0`>P;m4*aQ^ZZK6Q68Uba0=Ow(IeMth-8pSN5S3Cse#<5 zm19se6gWMcZSf7{$*^)58uNd^Nl%}D6m#HYj-2XV!5drY`5DgIvnJe<}-F3xi z+rvipHd3^Ban9^)#}48}8hK}Nu|@1r0RRwMD(ehb;=dnVn&;5Y+^dIHnW}6!l*q=d zak)m$oJP?LCtxtFCPAxb@8npLHSi=xzxP3u8Cap-{f%geZL>1tj-vuiePl>^qYl}q zpmI#d@sMZ2m-G+FDtb zbzOvP7cj6=k*35)EhJ7J5GA}Bq;%RaMF*8!E5 z2eWnhx2KPNvAFkKjxqLJ(Z`s`K6&^ey%iz!9$@65#z%%56*7-dY9c?IO{tG_&6Vi0 zu=_Cau<>>?_9_@__Bjhm>2G&vE(VEKhISPQDD7G40|jSMW+dC% z946K^B{4JA^~kzRO?DdJc*>=)&C+W3dCYO_*;}!Fjh;;%uxhCO>E3APQAeYb!g!}q zt}Kam7XjWI6}rZ!G5QuJ81U0Aei2otX_*$h)n4F$LA&L;`kMVtyAm%KNax?`um@b( z!Z(&S*r$39zEvKBaHu-KGiv5;MdB2)s^4}Vv9H@t(4jm}zGS)#)oTrLD;QK>dWdo6 zV-@b41BrU%;0&jpSSisS8H`;bsoklJd9Sq{vi?Hcf0QfO*#TpXkB%Yf9;EVaG|N)} zgkD29WFtRVACkY=knvD;*M{>f3g*Tr$T~?Lwogv5?sc^b`uRP6ysd1^ha)gCBOj!u zdar_ivZ2wXU&rR)m}9Y%<0WA$g$Kth{5aDofL6Q}S-@+X@hpLA&es5UIzFyyp~(Kib_a^1+A$*S?zj?B@-eqfnRQbAsCYEC9MfWW zJ++QR22NcXe<~~^j#ILrqZCr18ug&35*?;zWq(WWj6nI4=Y`4{9Y3UT{op;*&lFdm z|04JS;eV7V)}J3``u_BzOuHvPHKyJ!OsR8UU2&svgjQGs9KcIV#pP9>s0lGshuen! zJ=w^w>vhyu^4VZ?(t!g~m5Egu&uUXTQHPa@^8G%G@fCA$R}dtWlyM1E5CmKQi%S#L zup5KT^9gMJ6)oX0TGB>;N~Lidhf@=ms+$-;1^Owkgxi@csBS}z4%<@c$i_h_Xim_+ zXX4yo#qR=#hv5wt_GNc)*;IS91TvO-dc0~Jth*0Fvl7QEBv%)-iyK|R#(AtU;0(d; z_o7M&Qoicc@D!B0!tj*DjZWq~+eyS00tFj0&4_8=i4r?=e&Fy6WjUW6Uw3Kcq>8=2u z$8z%Esd!|E#S+{j9SUfp&>R)(Y$Y~Jg%ISrT}HjE`o1v#^pM8L+rk!zJ)3QwiAt1W zB`^@t?>N0k8zL9HK~6osxoSYYyc_q}lcYE^XUjQ$D!YF-OY-Z9?$*NZ^@J9Dm6jN% z?NbevtsR}_gofo{$BsWe=Ia#lyllIIAZ#N65zB1IPNy7{6r_;BNW0lz8Xa)ivXSPA zIsVTnV+Cx(lo{)D!h{W1Cwa%B>GZ~qeBVSKOm<=e?XyiPjTwaf_+EgzZOndmj73(j3@VU@l%o<5>{@fWst<-`)VUNP=$l9?v<$evV7)v`dM!Ojs^|$!`__a91;0ZN zLWr;K;d@{<{)_cX8?!^N!ER8Jx)+sz2&AQf9{v4wnUW~b2Cg$sT5r&uKF-oq9C$$= zUD+nyl9{;JHR9|^6G(C>%>kf`m>{jMS~I+40+0JR(;%zkyQ4juyRS|CLJ@9}&>G+N zM5n1eN+Vr0@dcdE6+Dj~ly3~L>V@wfbGV8;$B~`>$owA7&xS^cId~@LanwG;{83Vg}tv zPibU$5Ff%&9B;Qo0npe;mmg%H*dWvSs9~S7EVP=%e4L5RMI>riW#1*kgiT7JCL_o^ zoK_^(@9SF$uv={1A`x6oFwt+fzvH^?YJ>k7vgWy68AR`5L%nwoN@+Ad!r^aC?q_yU zC0{}Z>49e<@Z>*8Lv+4am!YT%Hm~6s{PeltTASGlKqKs^FR*#7y|$txc0xpPpOA-Z zRd$RB%%iF1aoWe=!9I}d&Ng(%ZgUJus;R}Hlvddd)`9BrvvWrr!%~B;h3~ehT6;po zOS};M3tT=8tliT_e-}ZK4rNwq{UmW?>)2nj%0c1gMUu=4CjjyIJ{e!SEfEX=qik9C zPnbO!6`_-*lx?R+Te-+M?6VZ2tn@rQ{$~!U`cT@O&Az1+)K8J{RF^e5M!Cu}tc1%r zF1eqRbv+X-4$j@pJl2zv0H?>|pQjxd$56u`RuvpB{yc6h9>wP1FNa*HmsZ&0)kPcv zrKjiu_)1?7ljhl+D_-ex?|2PO;R=m-x8Ul8L3B6wuWa6Kj@g}g(MbDSr*8L;t*e*V z#?iy+w#*(Ix*Uiu9FEfjM%Ty!%-=gQo0@2%5RQ9X;c(x#M+FG8CmW|O(yz|>6fj~d zBR)@9xvfOIGM=&*A8prd;N29fslLr|os}MGG2LGDS6A4xqgL_{O{_@B0L}J^NWZAJ z)gUse8i>|fklRo_YMmz_G^bilMhVV{9lY05?65V3HEM&kU*kmM6@4d6-F#U)n?k1| zn3e8oW@x&_b)yF{pLKI|Bzwfh{B+UP$LnqXw3OCl%17v+%4Tdah9TH{`$Fe?!5S$T zdNaWl=`=O&3nRgR#ex`~SbE!>98X7NI-_8O zUfY+N(N~`2bQifZ9rc#16Zr;>%&ZM65=)TAdimIY9w%YT03U#&8;}Yk0gw>fxj-lJ zIZuWtK^1#9X>Z-aGa&-4Bpoho21lWZEjT>^p=N#Q-Bu*2^^Na6W`d9$fAQFLCMj-J z-KiVm19uc8xw7c7AJ6EMSuuh1-ENrofetS5mEVszAZPjI^yr~5H#M`tNazV#DGqD1DT#*2AZe&;JMi8kvvL7{A#K%mM zc$wi?WcU_DjT-GTk^y#Uj?>Ws{K@tBj;^@imx&s+RTMDPhYeyyO7RR#F*k<}vif+P z#|&aI=@f247d!>*_Gn-Ee2PpW`Ph;o1q1i=7Dc8{7)+&6H}Cq6s~c;zF!`oR&Y`0k zwW5hY_k&_69)C6|vj&_7A2r&mrDKxr?Jc}3W@z>47c=2eN6469Fpto-JPX#gZZzF+ z!>rr|12;$qaJlOg-J6tMvqMgQo?fWNBt+1Dg%T)Ls(yl*2+>a|Wtmy!Y#7lNi-&Pk z1U;RN0f{hDw&8L{&k1>8C{~Kyy@%K5f|Yc#qei;3bKYAPe$>c7GB&cw_Nb2>s;L?@ zH74H|(woR;q*2|7;Y2|+2fRkX%s?w{Y6XO6yiHd|ALNcVxKCg{igfA={=O%U_nBVn z|LBG*|xgMM~FJ6ucSf}p5)Uyr1 zL8R!ttSzl8jQ6NAdWU}8qHVv@@-%QoKCc6IN9K5nS9Hu3H5#hBQpg)4F$Uk1S{04f zg!Pwyvn_fGga$ifjq5xNNb4hW$4z<&ZcA{^CsFD|{B*StwirgMZoxlDmPE3?mb@U4 z&y6jZPxipB$kaaS>I7r${_CKME?_K$K#GM!gRi{1VY1#jfk6a1007tHMwEaC*^5M3 zpM}z`AZ2m8G{I$oR^@jCTPZXV!Trq5Q=4b-K_3!TSt)i6dHN?gK=_Mrk*go z+7lmo9TRue6$nVo?I+250Noh`mBT0F&D_j2+149*FFH}z&bN6^y|YAr?FXW`HIuv) z5hI7eB9{}Bsp&;xfd|KSKtsdTBhBB>lVwMp7s9@{5a(_U2(5Beus>@NNx0L3RXf?7CmP{qC-vzimON*qq|1P@{t)(Nks4Ff&1hEX()JY451X`$MINTA`zjZjnGU6AV*5;q-@ zMj?yN0DCqyIOO1A&emE@!%3Xr)1&>}kUYK@{Gmd8(qQBw^V4u6sZ*|AFv1O)XiQ{) z6CrVD_zJFo%SCwHzQpb*qyc_xEbz$lIhN10Rp(`(q;o)e$ea>qMe>?Qy~@|g>Pk}{ zvYOmdb!B#)E>ifdExd6EVHiJ^S!On}8;9E*(glXHQ%E4+#7>sF^^o7c(~)jchzVB@ zCVL?#2}ITF6FHDCHS5-tZVpY#)xur*%_aNmwXbNFt_knm5JrblK z4=ZYbjZ9EQtliTRG4U09-o2&emDZbd=^hlNbgwV#_CVlgND`KZB&$O)jL7TZhXJ#A0J5ovxQ;`orW zWPRhes(|Q8DxhkNsSAfLEHLo~-~-i2puQ%Osh_EonLGO^ zcO?h6i7Fd|heA=ZS3@Wv0zK%$;qTQo;lDU)s^qdabxR5U(SIigQT-%Szz+Xb zR-?M!NUVT)iAa%fCJG@S3l&Z2d@uNTAUMOgGz=rB(MeMjpx09CekHA8N|L}pXa1-p)w zv{Re})u|V%*7GwOF`%z?WQs%t9VCi-YqG6_b5zL`+)u*W-pnojI&D>pm@)|d)B|!v z-)ii&&;hS^(d;--t0`au&W+YpK;VOSh53q=WAMKIKLCM1e!tP0h;14bZsZ-o!9?ow zg&2Lt_Qe?TC2hGfel?B+)V4DuF5ecf^ta=Q(>2rGL8!#Eie@is1>i%ai7)aX?$#|? zqc=4FoSYFyA;ud0e(#3UM@JdxBcayt_t^y`qP2p|d5}&{h;LvXp zFEXTPZ>3z!K02K-)-V3M(A3(~G0f>9vqlPkJ6yC@tChpOJ)+=E(av`| z1x_`cqPZ`sWKH!~A5c3xO-s`UKWuVSl}_8KN^y`H(}tSg?K#{KwizMKES$b!mS!*f z-EaRs_upx1y7yV}F`w=1_6PmlhwjD0le~a&dWh;!e&{c+Z(RmJYCV1Y82^6p^a=jm z-J@so=8LC;r@tFKdir?qc<|)uu6e(=_vG=D-zB?$iw@WTlT4D|fuBIukj^AKJKNGo z^UwbMC;4~B$ljpc8g15RJ72c$B+rWFHHBN-+Z}v~F*^TP6lSYW_79R*>Ee8oo*RKA zOD~iC&HAEP87bqA$@eNh%@%-9yje&ry!~Z*YX0z-Uh-%10|VG_@fy(xa)%n9YB`hDlI(YWt_2G*hz&VF3?-yn#C=+2y$;makm*l5tUOrE+ zK!ul{uUI|<#}2}6?4b$V6?g^Q0dNt}=55wqXG!f$n{|Q$ZpoAO{$X-(*iOFLKRh_> z85-Xo9DV!d{ZaD${=0Ykua6F1942qxCC}cxetvLt@aDC7dYSCM{*UBO2d|%-t;=*( z25wJg&oK1|Q$pxQ@DI3?9A=qU0SP$C_ra&8T|DULV6#UDi)?k7mnDS|Ud#-Yd48Eo zP~l3W`Ynj9vsutxZo%Q}s4=%bI3DcC6}-Et@AI{E8)MjtWdXUl(N%uBIt34$ zQ#2O>87x*+EmG+Eh5xX=n07OF3S+@uQw9WJH6XPGLz$X#sqRYRsB{iH$(wH&-uODX z0_QW;K6QI&LpcL8SZCXcrRd2}>=fd0e@59-9_-s>&0(g;8)N}n=0@Ayy)}0@Em@iy z3ZI(d!?N~s=c8HRR>rPxuvV4%tqn@?a4o2yS@zKtgXU01%)M#B!JU*}_5sC|Q3IKD zHsuCg64?ekKKHxT@jWRDjsVs6WO-AfQ+Bq$zsPWm%*wJZm)>O|cuggT7ZQ3EvSPnL z06^GzdCg%h$j=v*3z&vd(5GqzP3y9A(TKEtrG}~Gqc0z;`6q#4(VUp;7TVo31%S8A z;gB=?hsUZZW#~dWrfj2#vMsf{jbMx&TEtlx6pWEj(5~a* zv$yY&jEM44nM5q~s$$=>d9j%xJtpl7UL4 zn0E$!s!-hHG2G+{9<2JBrSvG=X5xTlE1%E-n)gw}R*W9leSffc8tvp2PvQAyjWixQ zx;RnOj7&y9)Cb)-_^zK_GS^Co@?;-eg0QXP9ihw1H&nx9e7Bq+X$ub%3~jgTc|8XH{Bl9hl!LEg1Q5x28>)JQeVY2q zZ7BM#j>+AXBk6teKo6Y9we}e<>)4z*_rexqEw#*V+Rksvd1}oC{BS%`<^!N@HJ@wv zQUy!9CWRk$p~`Zq(akGC&a2!m7S#N&$O$2dW`x=n#r{?dM|XD^4asMRuHTs@x)&~? z<(VPBtc-!A(eJ|K{(#sV{%ge;Wq_=4mH8chhEcNL;X$!!H&(!--P7NRlcNh<%*;~< zb`7x$;AYEYF;bq^R>K7Zst5@w1Xiq;X9Vq{glqM*`~j_}k~=85e3KXZhyPv$?tHV% z7G6!-D}us=8VeNW9SXWgR4tw?-Emtue%YS% zlFhw!zMg0IhWAdeX7_%c06-F8X>n{D42f=9h`k2)$?U7*Fju__L<)p26!8+HYL-(6 z&(=A0xF&);2ZvPp{g<_VSm}oaK0s{8(=J+_ecIdIdpY!x3@iXUKciry0_b^Y8x}U& zHD~BTHQ2CU2%cwBj@z!A+Y=iWMBa%QV_%Hhyuxh2a8D{4%Ugh=f>nHF+BPlD&R};? zpN%~Iaq4xU4>TW}Az`&F;{Uye_j>n|dtF2Wg==j?JkPFF0H#xnPvPbE@UG*7d9$j9 zu)PwGBFN;&yK$WsKnunx>I~zZ!34JT(`|L6X=33Is{qh>W|?KgQCTX z!H)2OB7EA|=a7nHwY4XRb2sSX8Loo?06f9WFR#h5p0%`oL|)X)O3#Z14v*M|hn%kG z*U3Pgv@3zAYM?UEN|Y3y)MncnKVDjNoao8J7x!s8wjoLRJx-T+78{|;!U(EDyRCXb z3vfKDD`f1|#4jK#IB%-Rt8IPf@Q} zneLZoRN5-cg;I(l(q%+K-CeTwg2MFUcy}1)nzULj@EV=$@0@kN&GER4uUe@x~$63*)S(DSYSfIKuW?kUvH-JXfh&h!taM zi8!Sm1l3N2L+vDwS`MoLL|{DUh-}N@IN)Z?H|TQ)9f6%-U+P~Zvn-v9O@CfeOb-a# zFwbXn1-qDmRrJ#IxjAt4SROaY(8P{R>zEqf>=Ggv1gZe+S#B1Vkcy!mVaUmT#Q-YP z=f&wEL@N0eLWMFZ3=v9at6tJ(n-`W`MYXG#Elm@9Mk9blBb?4N|I=5oxjbwI5ryOP zT@z}>g9`goWZ0_2Dy}AK`S%8vk4m3XSmlu}yvOuL7KIjAl!vRIL=Owe)JA=Yp#n8skp6(}GJ8+K|iy$y>Eo zP-BjB08?cmVJNgnFs_s^X=z=TKTKpy7!04A6uoUXXWRZ&D${K>-JON4i`>{$$H`wn zk@hzZ{@`;5-zh-#{gIRN)(w2vJOAtS0!2awIfyza>+GWN3j2%`e zwiRl2AgLuKSymZZSuJL$$LP1h%SKp-M%1{U%$ z`*aLD_xIpot8x`uS$evDk&lgwPX#|zwHSq>KwWBtL%vCJ4SXC)-`cLJuGdq4j<_pf zz?=>@r!Z#Lx*x>}ifYAnq2x~p|8HKTUWDsAD1{&lFT`Hnm>sRmVaV4gq7XFLh1ffj zwNpQU8liaBK11`8P_*g_#|s#yl4J4kY<52U98=Fj!y>{igbr|Z0_VPn%FGw3zZyvu zJzctAzIUyI{cXSMOqE%%+p5(r?bcosL3OP7RFz{NBzsLoMml9j^~nQ8zP@smUaBQg zKa;o2;)cGe^i<`L#=GcO$zBs22cXHY9xieCG0~(}$zn~6?3->wHm^E1tM|4kyT8xc z{Qm1E0%E;H8RLrLag&Xd2L2#olda`x$z)5>?Ypxf+bJ1o^qLCepRt%iKJo4i>f#Fv zmFkSW`e1d+TUE!W@2b;SHM=2I>tkeguhVVPCW3w=+y-`2@{soC^wo~Hy#SS1mvDQ2 z5bH*Fb;_CBjV0V*@3a?!(O_`5y|Jm(xX$j+H~DKM5jFDj)HhApgZ-{87ce)l!St?) z+!@El#W7KHN7hB(>ZMJ=OxsIX+er~yq z;%0toxr}P8{ce06bEC@`|0*WBI@B2eU=F}B0{~xJ!7WCBdZ#ZYD9{T9Rf;JVwI-4J z1zRN#rtUSm!!0)B&V~)Hi%Jy>n5_>;HHC`3lWYw({1`og;t-QCY!kbH#z)p2!3oTC z!g7K1z`K3cci}o!+&*wFMmp|BcE{BW>;TL`?wie;0va?q8>0iwY05Ey(VLI1RlzsR zHK*Ozz5qiTQ#P?av_pzZdktQS03+~k#l?a`&?YCvYVik7{!DgJ=z%%fw}r_&gD-#o zWM}aE$B!uJt;w~|JO;k#c(yLEa!BK4IGkch?))>i`ZMM62R#V-%wk>^+Gb(zSj=|gXoxqyY774vy<#W8Sd zx*zr1V1!q4yzON|cnRc$lg$wlVUeGf(E<6l)Xe6r%qZcifP@ggZBr_u^Yk3NN!aPp zOt$$f94Dk>Yk9z~=LFBC76zEdT@i>mQ-v+^|7R?3kmd@gJ;4KWP; zJ-8ELV)sCDpy2G{sfaz{*!ef?M$0!VHJ37Y6+_NVm{Yu5i_)g&{t*hzY)eKb@&%Fbup zAl*{K2bR$8irvpPY(-4EbcU~?v}h{u+J$aWcO9Vy%U4iB2JA4E1Ddjv`2H8%dvF04 zmeH#c0Uh0Le|M1F4?95GqQ)c=+tYouu}}3FQgl!KK@X&Iv~gV8=d!@|gN$NfMIq}D zCPK$7@=c$VbCdRFYII+!i%VhoJeG_ShUe_ckX+<)LhuzqjHd6fvVK^ISY(gV3o8@a zaWx$58V_}R#JpZ+b!WPocpNp|fkFic&C`o~Hp^@%lRgChmHU_}9m+bD)KhY2Q9Xp& z;3Y=P?C8Wj>*+l;^2dkKBM=c(aG6nrG}&(}ON(eCG)R^=wWRhYDr)jo1}lic;-JzW z(^U@22^rb;>AZZ1PC5_$EMT6n*VDyKbTk(VcetVeii{I9C7<@G`rawmCjZBgS+{#6 z+Hol-d3vi_L3*sxq!*TN{uHHGXtz3o=9IF^KT(O`Wo~-PAGMp@#*V!hK{|$-?j{&- ziv(?oGesLYrW2`7FBm2SNn4alKr1-0VGT5`#i9>AF->PP8>U+I5Vl*Vm!?Wu)q4!|gUa!H|5#QQ8W(zMzbiLT|pr;ksG+0h`LULwF z^eSbHxonY=Ybi?Xx-eXk&wPXULrFBsQ1CAdGE@mK)JuY3JGWxkqH{TxsvlzjQP35F z5TPUeXjflJ-Em;l^YkQ}o4;hSk05`hl$e>)W&zL}JL&2iWo!vy42NS?jk@A%Ziqys zsji6dbOzWb2ta}1&~3Dc57~9u>2_-)$0rF#fxc^+BNgvaQ!quU^ z1J8`c9Bzfy2hpg)t;+3xJallYYOYfgADE4Qy-3y_=yg0rp7k9JKkUeVGTqcadbfY@ znp@Vms64sD*+MnP>e{-eZ&BB7QPyr%)!u$%H`B#zAf*cQi|TjTR7#1A;kJmG{fNLy zr8BZBPt&urVm_nr5T2#3&n;x8c0jB;Utl{x72Z$^=jeulgim3LVKG%0UTj>2TPei@ z^Bd(*2P^?_cBD0(Zg3++fvS*XSX_lG`qk6dB+P~_y+d1^tBT*Dx>9l5++S9=`Uoh= zkxY!_&oVKR`#y8}NLmN%@SzS}M|emMi37Xcr0NShs~>c2u;WqcCHD_r9~@0zzI*fC z^x2=@9^G%>lKI^QiAZWezwl$7ETbym$8-Q_sbIF>fCE|73oW+RNyyHa2*lWyo2G!? z4G7L=lPj&%U_+6sc`J+Gexvoyul*}Jn+DKem+Om>7IkKDtQaBWRgtbA?YS8Zfe)6e z;=~DC@CIPIFVj!HNV7B0*-FTqUy9$PGsE{4m^5-;+c{TO@`sh*jlYLz-0%E5(Cf|WpD8Z|}l4xUENkIPk2 zsx4HYU3*MpZ)dP|Mc3!pnF(X*C^t(rcl~*5#^`1KDVx2@7JQ)UjLo@t4M;lcr;lwo zKXpclXL3z!$c|^1&&sclG`A)2GlyLCup#iM9rRLUU!!T<*hMv@f(`E`galS=mVx^{ zhg9s)pI)U17G2DUKgaZ-gr&3;%T@tsW*!2Nc6v62o0GfYDSl|R`a;DQ%VZQAO}H+S zl~mg1(A_Nul8dH!v?s1uhXveYuMr{>GgBlJn?fJ#QQDIRg{OLlZbhebb$QxHv5Xl% z7NQOXe6%a3SZK!{emr83$YTur)j|mIQtgN?L@PSTGoF#_B@&1o5Knze*~e{#0X~s# z_f5zFi2<(Y04N?kks<4Rp3M=TN$l$=@;1oKiU=TrIs%KrrNTTFLU0McxQ9VWAWR{7 zkx{oHEZot{cQ|F9n};%6EB4oI#jsgeK!@8W0RA!4tJC#fO1yB2Jx(E>2fN}(KxrTl z&UHth?PT-AI>BMrG={*`{XJ)%UY^Wky$o->BIWr#_-ailjgkoqsVZt>6X-!OQM4jTbpqL zskD)3ffZe^FN(!9FYI*U_DZ3aM~0zYxgB1>Sm_zFJi@L)w&WE-W-?Yg@FL)t)mD)4 zoIPdIAppbtaFqaZH**I}XOU?OkzK&EL^O;EPqZVO>;T@q0=9=FM|1mA#iQhT!=v z@U)=KM6|O|%fsP}@T_KOp{d6LANFpAU{z*lBBxV3J0Xo*wd?jj+h1`5YcJcNQn3L1 zs-|Kt$;4{PrkB}8qy?^GUDr64D%0inaaL-B?97@jZ_ntYoEetIA zrfBS#m0Bkmi^q9e*G>jMtEhNe)+@`Ff&*dg^5bF3D~GQ3K6u@M2!}Qi=;2P3j-e-x z9f9NI5^MuGlu*B2iHDB4Ec&9WgNqV%qodz}R|f`q?~(29v7_axW8v=!F|9U4yH_b4 zM(Q@zvYioBOw+5Wnyy;_pYXN5dclvv9@LFS%?LX9fM;l-iPoE8gr&P+5aohfs32!; zmme&TQQVg>RpgjjrDe|BHdiMa+WIE#={9!BSED2zAw7AJJlfh792P_q?A)Do?Z9{l z^OX`*pI|OiBB~hTO^sKxD?ACH%|Llb`<)R`v;3ps2IjOOtf&R1!pwu@JK4~ay$HxR zrEtiZI>3Q2|1ElrVqR8Yz|=%DUPQa8D;+q|d((Z0+zHV797u_vJaLwwhcwFLww}z@ zMN#xuR!zkE`}33(^^`=_e+z}hO{(=XTJ6Y~O~dU}^?JwNI?x5oEw%`}MH!9jLf%hq zK?-E_(LFXNW|tv~FL|$vhZ~+a^bXZh_XOnX-Z0+OpIe?JYc&?4s)%Old2_7W(szkY zDM@DhAUDA5#zVq8uA(w@2y1PZqzX~Yci^}d)%98lwnrEqBwt^5)Q5xDFW(^JQXDR{ zkb6da5AP@ezZiD~<{n`vO{8@bxur!FBzWiV(hu45d{tH=mQylo@IUfJmY19P{760u zyL4eQM~t*;Uw}EPl3qp;iG~V<$Sv~?1bcxn=f(UZmd)3Omc_FQoPrIKY`_7Gh@sbz zQQCpYm8QLd{xdYpD|^Cb*? z#$*2W#k=XZZw`;9e+H-V)n?(QNRa#8W?c%fn<+4= zM6s7EyJ%c9P9Fbgtu8S%4;qG4FDm?Ipr}=iWyZxszGyB52X({MRx_!V7Z*sUlwskW z4G@L2X^MQt<3~%bzZ6b4&&}X6&;Ot=_2i3K#gf%R#f+?VBTJewH#Qc-bJfJw_O&Jj)bqIl_&2iot_vC#8A^ zafy5Nf@rr@wQITeg531*Lgc5P@AV<-V+ywiL0!6C4PF|2G`oEc8dFpTPZ8&TIt2~x zblQgAqcB#N>FNXSyVJCsO0ae>m~?kr+DdrrtvOV*=&t1`+_^F7q?J%Ov5snb>vw`}M8M0I=1kk00aTFP=WZzq@<%Y<_(4#pA*6 z29KUT9y}gAdAe)f@9jMu41SmF{w+FS1De1j`5oL}WDV&|va_=-jWqx4-+z*ScOYH| z?68~l+0K`(JIS+Rc})&bd%J@#(QEdPMFCNW_79R*>Ee8oo@dEXmR=_NoArf}!TYT{ zCf}?4)QIGuVpxcG+Ws;%d!KyiC4VNoe=y`B$FeqmYIpzDP`EBOMjEtT6n$Sg)7EhYzo=uKFof zwqLByAI>RZ`S8`jvlp)qU+jS85oCG4Fp>rYT%kpuoLqzdqDhXXhV%3aqi3G47zo|e z0CbvoJqU(nSKw272TZYH2ivT@&XU@hHtPfhqE5B<50it#cJj^s;lW|g(D?q~=-W5% zkCN~A-@V&^eRS~RFnRMXdG_Y@^Mj*TO5!9Wc=g;<_YNr;XI zN-RU_Fw49Ohzlrdx=u~Ic+k%Q;vTP-j4XDUmnAv%FJ^|yJipAv&A-y9ehY416&E27 zrHlcZ(e-gUJ=@?Raf%9Xz@9oWk}{Z)(Q{q0S^hDHn$&$qE<2TLUbBJFYjq1FugvK1 zuMMH@gQi4YmH8+6NoUV9xXlMg>a$Q)U8gtaCyfZZNO9?Txiav`Ty9WCPoLZqYH?M!^{waU@PRU%^s447az1+1(VW3Z7+ zZIN7Lo0aJuKR#-X-m@}BeWh-X8heujf@cG>fv>VYHnY?Ium1f%bjJHT|C#RmxV!WF z>CWUq_o3U@`-TX(-bM_#!Q4$;6)R<+48XU#m;Liqu~{-uBFz{~fIy&5KL@jDHhs0T z_jvm4&cJM_)BT;nQvjHC-v)c;+ZRv2gim`mYCs!(twFf~5dtcylJ@CE4nP5wPbNQ_ zKM?t9rD}?GAHuqN6`IDgOQ8_8D4n^}w;clgf5eT+hu_}5U2vz(A)uU1>wQH&vx5(OJxpKDZ z;bDt)%;R`x$JZy_Esu#MnJ?j)*nI12O}xbjIfh(imR$;WN|U=K^%$&glnhip<{D6o z+B%el#5Mtoi$%7x-cU>v=2nW_1v}}}?Fn3&m~C>$Mb?&pEgAF#tl+Cv0bBYKZrL(T z33c>FL_rM6V#ASDum}2h-JnLU>XLSgi$ph>dT!8r{y5v~3J0h<<$bNAk$;}i=Q25l zx5qF79CRk>!0cc~o1}BFKf3OsBgWl^iG6Yh}Hm^7P48sME%p0Hevz7iI@xKaWP^)b{&@H91Y#RRi?) z4(ZE0bOg(bt4596yLIEXRy0P7N2{T;e%0m)u_~$8X82c|1uh`73%^CvpgoWy>1Xoj z4{e!k0VG-!V+v3LvIR<535w}gDOe=tn2$hvfeKz%J4M2=C(?kV$}(L^hI7ZJmE^j3 zQWyp#wM1)U{QZbUhXvk@!@6M%iFlqttPFr7%hje%Z{^)wLI^hE3_sYv!7mZi{7@fK%*N0)zhXCEP{n zE1p>)nU?wz=2@Gju!aVh12rd1f$#i*=5BMnCk1`Z{UqV>a>doc&e#?7$7PdJEAtQ zlLTg0Nv*kJbQN>6b?}k81?PHCt!1CKznF=(tm7?&tIL^ILNG5POMzCi*6t^rCCF48 zda&!cM@MQ_=k$_necJCfteDzWLOj|m{0-Sa`E~5mQ0%YHttYjAEKa@`rC8x=r0hNW zQizVEt_UE$mU`Ykmz~}ceR^Un%`&yRbCym+mafP(m3-{NIKk->tlwud(*3?JKEhS( zDjAeUazc%~=x4PKhXCV9X~VQgx8QIc&56GtZ8a&Ufy9f&`8+Q#IIZEqSE5`K9}^*l zo+d7=L74LjREEgH+0GEv*4I)cG&y5ZP}B2lk*!EMSAZi6D(=GcEPY?%RRLh}ng97s zv_P7rNa{-966474^AGtlo8_rsSDK>A;xeR#n6g4fT4(e2yZ!1Y%B-1+whZc1>JjAt zX3Pj{ci5Y9O+{{~f+SU@c@WXZ3S*d})2)1^=DWj@v$_H3ll21D^YzQ; zZKkdo=Z_am1Dr_;3-mM%i2{n&d9k|gKyoEsW6Dv|{%+A8wrXOt`hut!;hHNpn_**ZXu*TGxw%*liyJv3&X%|*JUCWuZlh*Vr@BIYS z3X+Cg!ML=99=*7xP(r*@0X0!voy?2V4?`wLciZLg;X}jtuk1bs*W2*)&}5vNjNr*I zJ(y+1`6^vrl+z9qn<6plnmes(!0<@P92y)Qhb_)F?Dl^uRz}=WyLFEV&djU>pFm(Y zcx6dJy$nS&5}fN>xO@g=+2uuglC4paNoG0tp@NbfOBQ-53OcszZ}U}po-R_VgyFd# z%mb7Gh}=Suum?8mzmG0ROpU+9i?Kq+tiO9 z2dN)F|E50m9@waO$@li3K5t0B|3#Son=c+Ur2poN$6@*}2Tkd}+zZk_d$Et{tBGI! zKB&vf{f3U&d--Kp^_TmmDu|Egt00m6Yj!~FT9|+m+hff1ZMK4IZjd7Jnppw8ud>VH zBa7*w_GGh!{ngu4e<_!R_okuHBmDEz+1c3)o{^J*31u-4_sT%X;LX)lw|Q^<)i^F6 zUn9DR$PqSx#J{032XxRMXrCOl$M0YNY0{=x;q+gq5^^AJtx_t2(-(^3ZTf7Tau2x<1x%Bzlszldqd6YXw~>(5ZRrpj%}u4oCVV z4W(g;fw&us0&m0LsvyYGbArIAyDzR50N}kjF{ed8f@DZkA)|9WD2p&EI)f0-x=HIr zvyx+i>B!xokH%%m+_}?E9`JLJmC|@BV4sTDh6)M)B1Sr6XQX06!sg&;c-Bb4*m&_8 zL&|P&EMK=&gVPVe!TUiDO$nbhm7)k&fl9*GtVw&&5}`h}yH$nxV;u8i5177bQHv6^em73kX_ekh2MsP7UhJI%dB??z&$Q=dsN z2#5t_xY_3Pg72j1yo}*Z5)mHY@h|J(c@aFX+MddpXTc5B7fN;UpJkMYFH_kes7NDQ z`k&KBI-jfpVtwwVw|$Pmgh@oxE{0ut+ddR7cjHbZaEcOcc{}ZA?d~l)GpY*N z(xF<@6LxAv!n^}PNocadVP;V*cIFsI>)~R9it7EtX9ow+pNcHSaCzni0A?zrY5y1O z?Ej-p3#NVCw#OaNNc7k%@xC{|Vdazy6sEXn2=`|0loVOl5QfGw6 zU*K_qKRx+UeR}#>9ZERC?*}Tu@1N?tgFQ`mu=ljv{Y4ARn;UO$FgJdI{|7kMe2%}z z?fgG>2T#mz&;Mia#iOUYzxjXsE&j1ddtr^>%j?qG$b-!_nbn&qwaNjfOswjaN%z!# zq_jVzlIY*l?~2)Gjy}0ygik@TAbtbhh__l0f_{~in>m2P!n8-PPeWi7FQC)@f#bnV zX%mE>57%k=VK9|1XekbMIzOaSiFP6t>5SW#s~=KeI_%uk26!B(>jzJPlt&Hy(3z8UMuoF?d|D6&fb( zPv&2{r@vy=hKYKmGph)yCefIG4xrBT6RgAg{r;pEW66<{cGg^W~n^< z70N)q|Kv#>|3`L-{Qv3hlP7~G!2fsm_I{T<`Hlbo{l{PQXTLxY*s}k-@wfh;gTdg7 z-}e81i1EkWZ?n$lKFnC$lWO$pENW*~1K(yL*zCvQ&YCFOc#b7f0<^H|<2(;2;aB5we0%e)#U zPvuQkaGEdN8>aJRuk~}Q_2)Ow_P?1P9{lGEQ|Cu;t=M}1`cKn?=NL!PYt)z?JA&gH#EG z3|ysGL1=ids6cM$=ytK~y`Wjqdg;dLIB}y>E_?P%x4sE3FJ28_rppdR$5b^CpPQ}= zeq?8&T)Txu$^4Y>V}3D#;8$=rE9rv_!NA04?MrhaEicFmu5RM!$*AX`e;4f<0tGMo z>1vf;cl1IdC{OcA1sG9_&*qVLD#uAt%msVfU-Az|wsNW(#3kegv=dwIUBQg>J7jzW z@hR_`-aUeX626LoGKSPS$oTwPFe!?!0Jv3f(_;>KW=YdUw?YvF$ibv7iOY6Zwhat> zNAa={&y4xeF1hKxL%|)QjZgpyw)D&Ra7HbZ-0U2x;h zp<6x{3KQ${@d08ot;lfkxCDfIlWn{^f%OE7RM+O%hM3n?R5A-W2y)p>M5i4FjDv;@ zLT{a#Icuoc6=uC|7>n+UxIzbIlj_?|<6MOaSdO{oVdA$Nz;<3xMqtQYEPE@aYnYw`KhmgTW z0^Gs>x00QRHD(uJPU25uApva}jboF|;udSEkhe5XQ1ru03Xmxak3xgQM9I!a80)4o zDiMFwXHhb=w%5802ugis7_!%iBu`=&0SnN$*p_5_s5qc}wG_-(M~&lNv4pA!i%YK( zQKu}E+GqyYO!EpX(N+R>uRF~MTMk(jP*oMcEeV$%PQC}OS(`B#KqbrsI3>&I6NoL9 z5nBhSVz6!dV5U#0+;>GleDTRl(lXdTSU{`8JGRMw(XLcm7frFYCSD?HDusU|gr3di z(ufbM<))-bfqEd^e|9x^DZX(cht8p+7BUUrLk{RDi%v^8h+Mop=Iit;V)Z&fU-wDn@FAhf^={U^poMi(C-gE z{CYjX?{wYil;G-4&$^fBK<9dio>dCsdFS)u#3pjDDBaKDY!GnJ=Gm<>$i+6AEP4nK zcp?i)t83w80TMnj`LNo|)o7Xak0iyFaXTvL{v$&M?g}Nh!M#o>gHN zqyXZ)l}xSpabO1FFrciGGlF2}>eTJ23KEBbDdSbzJ&fHt z9uEBmPZWpoDGcLB6~lhkLt+wlX9&NyCZ;WqrvVw7gXp zZw6>nzXUZmofE#P?0%jSMboR~bXiFeMEW;NYo{Vyv~(p|#?U1M^0Fl_ABJ) zc~$u}We}wktz%VAQCh>Lobt4Wu{H}jsQDVT$pl0oyhZ;SiIt&lk%UaANJh;0b+%+fa1{5GhY!7cEs|FO z62SZ!4?T(Mz9lj7)0siS5FH)A3X+;75G%)5l(tI3w`YZ>qN6#sR$^N%B*>vBqqn^G zqCF*l;gn)GcKjR=6UZ>8sVEmn!%G?;!!B3n*_v(wM7w`TJIWPAAi|I=?2CO2bdSki z9FE!~Ss|(epaLD+Vk`t09h=bE28RXv!yGGF1uyB72U0Jv#gt4nyn5_6cEa7kHxCNu z&$9}0Wf?~7iZc(Y#5x^86750BV}s=QD(C<; zMaG;1{0;^b+26wz8*>Shn}l|tCO6-I5$IkF)7jQrc)%T8`p9^DwbB) zy0@`ThyHZBrj5J(CsfyYdV;opyDjbK63~!lrBm`7;R;o7$K*SgcY0Ce;4?e1a_o_toOW=bbib)QV8%#i-4045QZ^5B@6X4bf8kJip!F|Qk?)W z!@F(<0Vxkl)&p1jV0lk9@35AIhY?)WzCFE?GTgNyqKzc(jjuZEZHWA=I zxT|t)hLn3k6I15PkTROL*wH=LB@^Dkyx9;mRdZK>d<H zX9xd_o4FWNkXtFrCU9+4t~>YsrG4jr-2I<_X?ME!AM|$m4~PHyhyQE*m%mK@YkK_C z|Ni;^zSm_TiKu!04FBK%`~S4jd7}F1E9+e>pV3dwt47orLfZvYU1my(iw3eeUYQxW z1Z3Ke*$QG~p%nxA=bo?9OR#=8FF+ezW4v%k;oy$LM8|?*M^^?$#!2TAABMeS_D=vV z_}HEiY!>i`IR(UmMZy6M7U(t#aXsCcLz0VlWSYS{5o~(q2}6>gV#-}vjj?@jBge^O zcURg3VMO;j3+N1c`DM6z278_K5=^<+wb}Axigrzde)6zBr%U;8L_hlF=Cbo4%a*hJ zvLr0AmRN`2Fo|G&PBl84=gSTbBgH2Ny)Jddu4Sm|z(Hm`e=l7*L{Xkm_Ei|Rx)j6OKWnWnElNgVsm!rqvxH>^)wbwL~2P6+@qC?O&`X@q-DSZa(p(&RM$*Asjq2&~8oX}Hm7>*-ZS`m9WzT^t7w>o>4 zdsWKo$|YBloL$YhN$^Xxq(OZI71~n<7A=nwC>Z&tRk}zGda%!VO(jx0gRZ5a>YyKQ zMT=NtwL;@zFX8_m?uay4UuKxRB{#O!%Vs-Y#;!-D0%kf4x-|+YX?guMwoh5 zz&`1U{wvS|JIhaO)UVLDy_tmJXEp%OSH+d4+b~c%oavrclvm}_Wfdg>y zL2sZcytIP4Vbdl$Gw9{hRgglmXOWff5ZhzBUhFv6=W9P59jm>xbZoYZ0sWKV7R*R_?V)3?(B$qIH-`3llYWxT!txebHX z;1!N3I3cArSPym;IuZ|MljYfn87e6S(k05?onEeonuTTV6kj!#na`(2#Cfx5#i8mI2yzs+%o zDhd#1b|7rP^DvE^JXI)q_fzX6G?^PyFPNdVv4kv&Hew zCe3weQThIZy;HC$X%fcL=}E2=59$-!Q2-7zPfNN~G&|kB(jv%!hhHzjkX)p9!+6%o za?vuDr<{?GA^22F=T6k#eb%*wtk)!82hqCkj3D8<-Y|+#>yIWEV-Upy~(|kpx+C z?|VV7EbUR~6j;iqkyO;}&7OGN$3Gn|qkovKa>Ot{Dh@G9l8fo%nZZVGcih&Fh}53g z>lb-CH!G_X6}X>+LPp8k$?MG}sK`zz9v{0YSP?vw6eT=It zMRKQ_f?UHGd67FwI;)2vD3R}DlPjbM!GVSwGWEGWokAq@CLP_ zFcC0`>05tYOlJ^=7ZV9B^Sh5H0+_w)GddJhD42sCdYbk)$)i0>KsOJ&A*9IN(oEC# zGR-jEYQR`c=Mq*9T7rjd43q-g2dc*gDJ^IBUb~sOlNgvNY;`GO+Q_8$6@V<0hvjRO?-4BlrGk2{FIRPGHG+2Mgbs8{u#+K1|LY zMI}zYmacNe@vlDT>%^)}pz1>eBTN_QRKqa4Bscp-Cm07s0-IoL(W`tuPd?_woYd@h z7;)Y#_R|eq-b<`CaEr5of_|Yho^xiz zSV2%BT%|KU3Yu!znwAs}7o5VzNEvp_*6H@arINT5C^eU6)5Z<5ngMy0cPQ;8vsJO2n$b(a z5N>4;_tngxb<7}t)s^GE&%9c`onPrL`{xC?T7e%J`6iWM-U|Yn`C$dP9=Vsnb)UUp zkR8(em#@InM0wfCP1sp_x-M1{X{vzuhJ^cgDoz%#kk=5^91`Fi3D^d$KyIHCnzVy` zrYa73M?S)3j3xo7y&ZGaChXG;p|LoG4>joq8QHMH=CpkwuI(q!5WxBb5U$tkr(*@QK9_b#DA%w) zPT;s?ngwP)=|U-$$na;SmLAM-vr0a2>$!2*Z#@@Yk*e2kSy<~k$sY-sFZ1RA`y;AA zmWJbyI?rMQkb+WVk*=TCtkOjZCpYjbklO+L-b=j?VFq}NrNA^e#;JQulZ4)23Sbn1Yhy;25r-3ak9ao@)S5Ow>))szUBGSg()4sX zw=0s=zxEVU`A*ti_SJ?m@hBN9(^S6`R6!ln!upBwTD)-!BM!qDGNdc1^ zx{Z&;o&RY&5&N(!wlEa645^iu=2(`YKn`Qr*%`1@)-hrZ0wtUMl$M2=+$Yx*c>}R; z<>tPm^m{Hnd7fw7PM1)Ee!%J|7HwQb3Z4>I3Fzy&PE(mzJEmjIv8U`Prc#~b;6bWp z;uVw=_F*p|-b;8AYbP~tE)M-Z#L0ST7PCdb$DD&9Zji)kOWBDRgotk#PD#mjDH-9- zcMPk5TL^GUHnVNg^>fY&+K3eau2=cSWf_e z5;h$BBl0VNBA-SFDJa;cQ9sG1N&1?Lz&&n;SmB^YE6xX!A zHbl(eFeRZPzVNQsbmv>`p>8DH6@yY~0@ZVG3FBut(f5ea+6Cp|DA{+B5n5zdJhsDR zya4kyju8y|~9ZVoF^O!baxN>8Wm*%Et> zG>LAIM4gDyzRaC@W9L~uBdcR`r7zU^xr*N7iGeXUO%FVZKzp>pKx```rj%6!5^B=8 zjkO_Dc^kLkp-_&u-pL%E zg@k+dTylX~&YCm#Z4?kzp@Au@?;tG7S2LSm!j<}~Y$3>CT1u6zM7g6{z3T0=T21Ti zvx?$1mUB>(N9&AlRpQTLV#e6eu+y`Kb0%7(Rz=B5S|o}SW3_%RUWKK=j)ShERv)WI ztE(_%N~f{wFN>gNx1`KZp9&i0Vl^Uz(>yFMqf@o%;};-4LJ zQp#})`4+TgU|TTo+cI!%8Mw9#wzS0-G_1BI`pvZ^`Z>}TZ)hs*+4Xlof6zoCRh->wJ5 zZzN2YBcefo3UtYL6z5kuV@B#*R#fTuRkW2~E=?URs7hoVk^TRvon|7j`H=kRNVCI3mN!s2MG;k{zy$;`bH<)d^2ZaTw zv)RW_V@HQzO3TNeF@2()T zK*p{YWL2bfRvxeIy@bB#sJZ{OOBl{zBfbr`cF1zL9aYb{cmN*JF*vk?7`dab`h@4q zQROO`kB%d`o>V!!Gxw+#he@AyM>o5x(bf$eMl%Q@-BjDE_U4|Id0>I+nVP$n58MmC zVf?|$se}dH3XIW%muPh3%wZ+@3j-O?ybu}w)Z#2=@EM&!)r z&hl)IPDCLYnSuQ_924Yer|q&bJQb>f0e&3_(XF9!vLpi#_=FaNPe|p$IlmsM)@cvS zs_V*!d?~=QJY$$huTt}F46a&2>D~ybX%#}qB?%8Pf;PegWBH&Chhq#o8}4KS}&L&s{SPQbnqMzH$fK9$%=NEJwgf|ui= zU)xC|E_ZVsgO|UgS|ZsNMgT-ZPGFoF*lG|U$Zslv$nqaWw*7p^F}_E8g%mUbF-5$lLZv+;!FS>oULow#ATZXzkAy< zo1p;}UvFx8&0f-&#iBINX4M%42PfF1(s^=RnORK}zo~DknS+_%RNpw%qDE5>jg?z5 zZ<6{^+e%zUDo%Rtv*+sYg!CH+2F?)n{6P0)?&eCxU~!iQkt;YhC#@h3d#3P#QKY`9 zRJ$h-AMwql=k8Q<=(^SUDtg%Sj)!%9d0 z`k@zdEUQTK)f1?FBQvNMKw1Mt1@`Q8inh4ZDHt$Lrx+F+T~Od`1+lyO0&Y!D&tJSe zc>Q7uin>mlajK1YLm^DSv~$#!2-b#yfJw^H+xLX~>=7_qcy^wxIzEDOFL|^lWXRtE zue!{4JjC}K=v2{_d&!qV%Y1}-0Jg8$2{pM|dV;&$OP&n&YBi%o{xC4LYpA9JYwjg` zPo8ce?Dh&W>{(x=YcZQbN*Gk0Y-anlxL8Tsnjl`J^D_h{;^rMk>dIrwc+vjY!cmUD z$gQf8CX4`COGi+pnNF*|v1;D1x#>wxVh-6yA({G~Rp8V;;GXGbz8-Ds;BSj7vosgi z%6<)PyVK-+Rcx01ZRH`zwJUCL`{Ga`41zwKY{9y^u3)$101Eq7-N|7XUc7mpP!x7V zYC()2vy;@8<;BIhL5bf>i5FrqZVneD-M1$LwnsVOId)yUEhBBWcy;VD@D5+ z$Ci0X4ptf!yhnkOAF{6V;r;#^THR_Y@&H?j?j=AY(kLlN0Lo1%ZCOlj;V4ZOlQy(f zqC#6Wkz@1)pa-V*Q(g5fm45CW9SkMZ$W(0hBc+4p1)5?uX(*T+IC{z0{%X&%kN71s z{CCncR}R7W5DF!y?-KA0fZYS_M!w7kX;aFs^y9&Ev0rEFAv|sO>)GLR@mWatOv-Qm!0Tqe+O%4qMba;rK4nA=+Fy24ACT2q)xWkovYNUv%IcuJPeeHEeeK>gi z@(s(fhO6{f8*?miz~kM2s=(U+r^NpiJGSY`CZEsDaNa%&aC7|MM|+R=9=Z6xPagel zcW>{DN592?{+s-x^Bx}e{w@)J@r4lx%4*o(O5|M%yoTGS4a3dP z@>4X+=a;MOduhni@UOui>0>Q%YjjgIbeAx zqvpTQtc@|AxuFCS_o#pbcT5C3SQXg%q*&llN%`qqg5BilRMdAA{!djS&XAkJL*<-|6m; zbO(2}au;EFcx0leE>t+^z1mFIOC+4vRI|JbUU0=>#OS;8MbZ;SSZ)i#Z(PY z*$Je9!LE@eRDuyk9`LIbDT`{#VTqFm34s^V+g{=bwUp9Cf+JT4XU>(ro))Htm`PQM zeS2dLUiqRAnkX~Xd1k3>5TtKRS2yS|#AHy%tqAG0g6`^+OS|~%88A_L6TOs7u4)3d z(^aXU5h$d3D1(UZ(+L_bcB6aOQLDI@u-gpR(pD2IA?-mqMfkOD#z;R{3lLku zL0K}={ediA9F?b(0)6k{7LWPUPr{dmm7JWaSY3Cz02VYaiVqvZq0g=1h(z zpOm7*Oe9~EOUWvdMSbmf!IM6keHe%sIkGTYO&hbc*?JyKF8ALby_vpy@&52dSNMio zP^PN1KY&=rJB8g&FY=i=qmQUJLjQyj;9j{~0rKecW=)@8x>Pf2ke`ZQDjx^e)RFq# zQ&ODk-I0D!bK>_aERBI6S(iC~dTZUqPqB-a9L<7Tl*kWzi&(s26x)T9=7X+%h{sdx8!WErDTe6wffd7V2&w zpItJRYE>F+79HH4ned}6O?SgY?YU7Q0=9nC#gDXfKuN8+*=;s_x*z+y z?Dx0c?Mc|C6Io=tTY^)@3^rm3bIOM|a3tud#S8C^2weR4FW3}Y9%v$9HDb0(e1PD) za08dYoN0pPSBZ>m6IUG-Zbb)!X`;d5yto3Hz`6E`T|V<*@Zd+E(^}4;_ImDSwbvrg z84>iZ+U}9qNLWa8?`{-flnDehd1M>k3r_P>s=tfoQoFqYhE6AJzi*eI_olj@QoJ8?sMF>^Ii=8|(Z(gmrFVInf1ldv^2LT&1lla03?lkHmX^Nck8V5d?3@aRBOk z2fbklUE<&OiSwArb9w>yB!~aL*?%s4=%0k8z(x@P<8m2o0?a3HTmp39x$`cCo&G&p zjqXD{)iDF`P*nf1`Oz(PD!0^h@LKfF)OQS#k0y}{u7%S{X0|Dm!5ca3Us7IQo)q&k z>Ac;4j?id`e})J|G82_D@nRF#dwH#rRkmQ$E>51VZ0o@&U`8kam1s2Kj}csSe^#}0-HVB zHE-#KPX)G`6Ryqzyg|;VWpTEK@VAkYY&@f&`*l3EhRz4*_BzmD~>eGhGxKEJkONIO>pMD)n#OvTroG!7ZApQtHMHY8w}2JG%4= z)6p~>(@}J9z;Y~&29{GP#f2o(XL8FQ@&y{?oo3q^#0kn?S-Jwec|G;l#nRQPr(UWB zZK?@xKIq{JSZFW#z_JK^05$vah8)6{ww)8i8OY~zh!4ObK5XF;81{NWqJ*#qO3pFq zh&eP=9wyU3jybA>G!MBQC)(CiKBEeGm1>RfBm266C4I(ZL&Sp9L0U<|G{vGCXd6liwlYE}9uNzJ$l#X=gZF?(!&ZyS_NS1*chDF3T2i2C` z5Ja|i`&z++ZP~9Oi;Ls5O_;X@SWn&hx*i_wzdKSVzolKdji-Gr3o~$w>n`A7gZyJAfRXsFL8il4BupIvzSA`a;z){`3UnFW50MLzHv)6-)$aV)cvJHEfEWUg6{L| zp$$R9wfeU?m#0Y&H0U=ekMxM%qD>+}WS(FO* z6KIh+VGx@~2JM-enon%_yWE)LpP5hD%GZwZ%c)_r#r$Ko>L0y*|BT+f<5!LyvK_;6 z(@TCd>~CbBr$1u&71&8k-|_qN)8&-kbOl_K%XhWuv-iXrZ(lvn53kBLPPrw*yFNbB zy*Alh*zMl5l0=2Kt72VrQovwDK0aN}jc5wujg49denY3lC59sO5gRdXpy|}>Vv}5e zv7GW{{NvmAMhg87uH~-Q7Zm)-t406ZG@@AIZ0b`5D6Od;6y2&FFe3pQS>^m%H$8+= zy7!(?W^m&MQF4|8Ou`9V7D)d7J?d4@#is&`G`K~fhjiSWWgm0KYmhQz$sHU>r`i=;Rk~njB#x*Vb_u|Jp+fz{3DqGI;(-hPX)AD*j zaWtgxK(~&q0<&!d!+4u7!Sy$zDc67d;@$MyH-|^lKktQNXpsBeX2BAzgG4n`dPIM$ zmx{RZj6&BQD>~|lH}W5?{|c-rN)Xfnmc3UC6g<}%A`31kBkMq=!IdlYX>kY(x%m zDa&b@ooe{rSPJkimAfzah%ansLe-{jT90HVneSb7IJRTYoErq*DMHM+vgC-&{+31p zm*YDusWm*)%y9`&L@zsCU?3_Ru0tEN8^w5)*t3S2*)P`B`X}%vDo>3#QOsxH18g~g zWgu96SM#c$U|mYk9ha~u&50sKpcL!iU{b6R<<_AlnkNKVjFNJ}8~nPcXysId?qVsV zdh1#x6D_p{RPn%eDEZDI9*Zd}Q5Jw<{rS++0SoE#1)j7o(`Bz=a4>zBVe0&BmC1zj zrI$oIExz55G6dLz48Hawx1AcDxA^Qf-JWOZ z3O%+k;4AFS>1uwhS=m*(y4);@t0!M4yX=ugZ|A^`I$|X0p-NP(5!Anj*08c#$d;_K zNlMM9P;+0l(v{oo?^3g8*=c%RZL@AFCYfGlMo`2^>g{DQzix9}aQrr3>8^&)3$rRr zoAGma2=J=trin&f(1W-W>2c+!i51YhElnC4nC85AGpH*wid4|{?4_YPUrjAuvJ#9d zJ#<=>4IN~@ni=&E-Z=!zS=V%Xg_`DO$^02$wqAe?-Klm6t}nk!Rj^Urn6DZu6!~Xo zbkFv)TrJBALdRO_YFMUdXA3fiw~@iymL7%bN$2Zi(5-25`#HsiEjLR%bsvX?9@BY( zt|ZJ6r4ECvZ5>6+A~{P1V$AUzP1j5_W;$?4ykrYyJID5C>E#>wpipJJ>0UG0Ot-QY0_v;s}(tI*!`&mXCx9*eCxcv&W%CC*!1c-XhZU>&= zUQ6Ta>707?>-XLIQ+Ckn`atw-Wp=1TILx?iu*rp_zkm6FIl;G)E3D5JeN#x18x|Tc_*w zN;c`X;S|*Ku4@72ufN+({$&JzsgVmxg~3TJ;t)njz>sYS-c8l(T6yvk9d-I-vDjIW z$IrvH^P7H1C3pTZd+>+u!?x;#aX#s59J_3z) z2HmcWfPAxxLz=lu!&H@=Y^qb+nxB4tXio$4E>NkoN+T4dC#A4i-czarVua2_Y60d_ z=6`c92mm#}kWcQ9EXAhy4g1UG{2F;&^3$;Y`0QuUb|L+ppMJ(^2__fnTodp>DWjLn zW22@kcd~-W`$6Tt1Bc-KX?s!xTP3K%6ZWtu}_w3D%Rm=r4{o%Ssi zz%uTG^{&aRjwvV`ewGlLXqYoM@yl@TrkBi@!$f5m!_6nM=QCIm7=|;&Z%Rqwy3&=( z6akcpVMg{vtg7g-x&kM!Bf+{*Xfd0M+O#SON?%S1Q_$K)^biWWm`eoMiVZxjHmEc} z(cXGyce)N8hX&KRkrmZfeY!aj(O4|q)R1meNIOyj_;{jkXue9<50?{+SZX4- zsaKsFMuu%Wl~rv)O4kt-Qwj_*CxC=>K+mUKLw-UvB%V^W=k=j`5)-+NZo|;tEn{t^ zH%%|GYDSYby`G&FRi_?bi!qRAIB6JXPH86%GlIL%X8+m0gKb=OLZV}*QHs6IuZmaY zEK-oqfbTFwY?8Agog!ZWSllm@F&qq&b9bDeRU_R3U7Hxy*xmf1;7G(2sM4&%LsqkQ zC=n6r+~o!Yeo=^ELW5E0WVPX%l``$Qnd}*2My=VI(-WpjKg@(3FCbBM&lwTj(sVqI z?YZjD&(mG*%r91NW-^;$;N7`QFd(Zk!L0>j!$hfnTSmt|BV~FAo9fZ+Xl>4V!bhb@ zc{aMWF*YCfbnN9f7g~?sQE=-mjdD1Bu2|2$C!tWim0CjDOj1iQj zeIWKr?h|DgJKmHI?w&mSBvq{*vuXa{?lfI;T#l!bv6UN;M)j4#SR>5Xc|YXb3bo$L zPOdf2uJul@$bg^O11x&Ve}p#q&{h~Ag{(h_h9Tf?+~eJr#?Q~^eNLj}Fxvh7DE*(} zDEnA@e|FNR-QEBAkKwcb zb%Xbyy%fEUC4>}+PGw}<(VexY0lYAgH8Xc*FZa=*kQ?01#;~^v1$XAcFmp(-+h~TD z#hp7i_&xw|+gE0X?VNlJ-cGHse_`mcdj_k zA(Y`C!G|65VaI)t*+Yj!{-r)O%^enQ`sS)P%q3+ej(#yqJ=6x%iVrSWd>t^YC7y3< zR6sL(SQHdFfuj@ZRFbm$gM6`cYtrL1ki?bvt*Qjjq#c(VAk6tiRq8nu!c@mp%!H^X zG-V$DO-4{uC{-(l54MoW4jsc4Y@mA2=_c;lOOnNWF@vYyn1X`xZFoX==+vpdguU2rZ8s&CP$Y(bzB^hjUmW`E1C9elZ=Qm~Zp_-oYV*r?Y<-BU!8N zJ8I|ll54!C(qFgJtz-^{a0a1n;1&x2vptzBLmx9%fRbZo#j#-`hLfTi zx97GG7M$zLbKMU~vIhSvep-|u@Q?}oq>g0^XbHBd4UpNOSDo=>ym>uAlDdE;eE9vV z)X_?&bHho7xAB_dcibjd_o}jFp7ooKz^6PQe~D$-8@Aes9I#KNR7>v~rrCX=-J1a* zQ&b%ma3sz#Tel9N!1Tx7;Tf>L7Sk^fe7x~K-gHlb$d2av@TuyFoObq%AQmx(VLm)- zc!rbk2S0ypzijrm9aerD3G*eNUq4L49?Me1%p7(Jp-vC*Q#uCf83g+jIr;}8iaYl&(+GPxLA8rQcx zPXaF?6ZD`Q1~H+$DRB)bLrqmxg!A#R#6(y9WM?Trx4M-k+EfUesTmgo8T812-0CfyhQC{ZAW9}?Fo z5{IN=t2nwx!BYC|8BqCl&rpl=^R^egXc>WV)WoKdl5THezmwNRG28^we2Z$Lcg36x z8En3&pd+|VxG#)BMW0Q)GyG~?T4J>?8fssddX4@d?N4LzivWIB)g{nMQVCk^`swdr zjvhf1ZS^qRj0vq~CtJl4b3a#5#7KOawU>&goNTjk_IA75*lP(tX8Uc{W-OlANxDB(niW>iSKP5mvb{dh)8 z3;w{`^1I#j>ptysUxW`cBdK$-UVG}nMy|GXC7Q3^u4gW6FBQXAwa~Q=XfN6B){DkA z0De_ikMDq>KQhCK-E;v00c!?S!`59bUnnpzNV7=7|G{%k3U`uAs zCLwBScj)qkrqMTu zD9*)m)X_b9752Q6rC0eKH#2QlOE(VU$Fr)3we8@|nu4M-g6m>-QG`8bdq<-{!=(i4 zCL}a~3Fyoj_6VTCu3wLMJZU5!l5xb=?Zybo@ASu~Ke9ENDG;yYJ6Xn?+h&)W*|i&; z3L^hRuQ}+rd>^BKacp;(QAbvtV{Q)rds5RzV%coLrGW&@ z%De0gt+}T8OcpR78CyolMHPl+zE+j3uI>|*5IW5MF2@cJ^^VPoAZ?2Wre`|7X*+Zx zdlHGO!{Dxol6aMkR(BaEaLeAc`t8FDN`gap1nMR*T?bHjp(0pf_k{c7MFqEOAs<{~ z2;A!qnv^rcI-M0a_=@t2a}oj4``}McYetm=*|r>G7{=myGP`#wGl8Rx@XRY0XhS+D zF1zmBA&EemuVrF(U#!!2l9-{Op|<=y-nAu9tl5DXXlM&4$?A3WP3iCMNlnS@N~&C1cFZ5uCIDL{jT^4xUE`ng)9k$(29Y#Y`9ggMGb1JDrVOs+R^ z4qB3gNPl*pSoiBeU5r*XHSz|5JJ-cl*HK}(&Lb#9* zlHZq2wejrF=3aS#&ei~AIvTyZ!G-h#>JjZWCZ8xY0ZNDwsxBvs(GV7tT*2dI=Gi@L zLuWo$%;pIEiyb1&0S4@iQvF8E!-97!_Wb!yh=3(<&Q`y7HD5cDjWkIcNZUG499XCuU(>KkNgO*Kqw0y(+TtiI z(c~F5ooCfx&Q@!j&e6adw$%*;`}z{<^`go}t2c`1o*H|q z@C@8`^()4_Hl1iKSMwWZKMe+dGXIq5?H9KY9|rC^o)qUH6iRlEXxBqmXUsW zVkKRDv~%f8vyP4OHl#n<<+8ne`Rgdqa^Qn}{_CjFX9uq+ky;53Wk!#@tl8N^8=bjE zNy({|@Ks0v{VNA|QiL}R>)$>&a^QyWh&zX0K{hY{W@NMR?r(lO$v8DzW-yH1x*&SV z+iX3m+1NXUd8hum)DO@YXIw%z?dejxkkuFV>)s1BU*Crm)tZUkPBtZr-QR_Z8a--2 z*P1S9{(hu(le=_P+_N3VZ%h`Wjgw&En*ZC8%_W2Xt*9sdOD<(?2H21V`_rPBsSf>S zC*XSIoErZ76LbXl>qt5cq?xE^m%o;*uSeL|p&r&2$t;))f5FfSS-xB@mMxcy&%a!N ze);D@Omn6Brmh#S@rwC+bhi2Jh;J87vGAxd`W!oD7tKpEQBXd|;#{>nFIFtiNY51t zMivFv>VlOD#*v^2`~QgOSiM^>>bv#A$IIVhw+4(DqRRBoFl8>(bappFOZe&BD9{jm|SwpW3|EF#`T6Mms0USFO*>RC%FznKBA#m?Xp-Y{wM+Y0_Lj7 zX7%_DE<3XI4AQApuMM8VPEVRXEXGfrj#nwT{(8cf1MFx^3ock2a8X>ByMR@S!8th(nygft_D-98S!C~> z^yJRkXu8oe5$|xz$)e&bl!5{>U?LY>;l?+R+ADrqz&bD++VuV4Ts&^Z=0Y3~K3G1( zwB;?}RS2?JQjl%sn_Ain;&`tQ!&D_V6Mt73!-I+^1z zY6GJ@_SWK!E3tL^Xhhf{cS>0h{^nXTX3P!(sK=b;@v-}mf>F>q+QeWN3_|rE&~KgP zsN9cOj@Ffd{vLxb;uIyr1toC;C^l^fFS;4hPy?NOHo59yLAFUGo=Iig>hs0eE>a?- z>ud)DlNY+Ie4+;!%tNf-w!)8S@xb5L9?m=>46ivLon?Y(1>+uO@2r^N_7D@amJ;|? zIb$^fr%@`P@Q{l_3T__^$e=vPz<>#oo$<00>e2f2E1SA>kjA@%6LPLS!1O@8gJT!t z#9&}EI6gMesAU5^wl=t=oOZj(*<=D|`>hJ$fnWG;7I>qAL&f;m7bZb<@Mbc|&jekq zfJX`2D96^gkJEs*)cG0~KKk$p))R2SFJlh_U7 z^P0%I<#J`v^h{@<)hOZ|LKuA=GD<~Ge59>V<5wxR6^<0GbcP#CMl zZ6RS*kkAt;R7V}ime=|0oE0)KG>p?tBy3=roXGXc6MzyM4} z9y5t?Z((`z4JhDXilN)b^W2d)C5h}Ndm!%WH_}05sdxF0#Jj=sib8`0oU)65P>bGU z_%7<1@`pusk-{3x&od6oKti`8aOWNow5eN#+O--o9W$uV`PQ_-)>dNMXQLS|)B3o} zHglS^eE*7=+nD*raR@}le2CkaAB!)mk)udF0w?sxdo=r-_s9zaur{{-%OL4D>j=}% z>aW@=oUa;43a=Okn{3kKR4Pe^S$R8ksFm_ z%)s32i6(PO-}b!G+}$Bd5!Y@6g8fZK*Cvhv{?`}-H(QMQkZd|4Z*?wr>bFB@Qmvi( zeKStVHwq_W!ZM{3rj!3hGM|S#C!W{tm8^Ui)6-%DW;71QF(Csc)z)>0S+}KiTWNy) z@cxILt)iu9&aC;J{SdXj#513EzyMD7GdYTf1xnH;I4pcsUJ>GbV0V*+-zB$@Yo{P- zt-blXkwFAEf#xFIlrRnpdSk`r#b3>~y6lTxy(z*NFRtRVB{f^I9)x%fJWI%+(Y^Z? z)gS>zkdj}-cbqmJG zbaQ)2`-m`wv1eYHI+5!VPFoK+f9gkY&*d-c!k#us51}3FV!OX z*uLHMS<7x;h#=iJT6dScm#;GVwa^<4C#Nf5^wjcKf%MPdR$$v@1|&hPoG~}4g<`9% zfi-N?Jqkc-t4OToU5vG`<=IhfTdO_ei+0UhJ8zzTQbB`Ei@n6rw&1dg2%L>4WP{Lq zL_<_&S#c*}YO>x8D(3s>o>!^<;v-n8G)A=It0@HV*aLaEo|$|wfQ=T+TfEotYc zmF=hvbSV0Fe-DXwZ#j(hqApb=_QEdCpjMxQi+?cF>@Nw-sZJ@t;EV;fsRIM(KBD`- zVXs?(!Bng>_M?Z)ok!vGp&fgYyqt`0(9z?R7sn~e?65F4FDLR2xXV+=u+*F-6Yx;z*UX&V24R=mTi%2r%ty|q3gqPKyHMR)Wj+^%>5D_mB}5;5_i-Gh3{)? z_Jl`VEvzrV#*E7-k*9QL)8U?&;Aqez>G-jUmDcsn-*A{-n;AGn0dwLX!wOB`H03ht6P7xj znp=oyzUVL)^b^)Gq8f(a_)eS^2K>da;mR@A-S3mETb{)HVs(ZA8*>4ujgG8t2yqY( zTj)|&`uS=V1qnX|(GxWQO93VHw@`Q%!_7w+Lqj9>35%@YLQM8-E0304KjU`e`KQm? zo_88X%vb{oc-gl#gD)56OU+Pp);9~X6%SL=w;G~2+HMWn>Hua2z-$?SSrY&u0R4to zDrUy|ZjxgVn3Yvi85UB;a(&H;H;KZdMH`HP|lq;6l|+zR@O+dS0GyM@B79e?qS$` z7#_m7vz`UOnlqsg=vy2wL6AaEu$Px2XDMoyBIY4zc>q$@TAcO8wpxB(60p=5nXZu0 zH3?n~^Kw0Q*0T!jS;jINTzUj-+RiNe{7 zX{AwMc@cevlY#MZX=M1gw{3ST=~~+LLkM^ZJ2iv7E>15LVs5gCemm zO@fAk@Vp9RxeX=WI=Y^k7Eh9Q_Q+XC?v~*F`O!->``ePk;kTi7g#GW6f#%v8*Ehl>hCI; z^2wSz`Z3ga*;P`vvDmg(Ija+%v{qxUg^_NByIXx&_4!y>7yMGKw;;0-w$u{;xYT6X%GDNlfC^} zrdhXJ0oPdkwUF5e>}QPmcTid_qfBakgGx;887bHoWbc}<^XvFMIWnTSQO~y+SI8JQAnT5hRUG397Y>@XK zjt=b|ImP?SuZrxaa+u90*=1g3a4eDSWL3d(Kl_-Yu%d~^s%q&V+Xzx**3 zI&Q<;_6hnGqQgZ;Uc&cVpi}J=l|RU@&Rt|j8WiQZM#nk=znR>k+XmVvv5U*~1cT0C zhQb9aP!ser9E`-#4+_q5IF8GCQBCuKiEhrc9qVSMcn5)YO*rL?;*PTbqsIhF7PIz$ zoOSa5%*X!;CwKQh&Pc3GKfOD6`To_um)955CNIu91Ow#J1f8Q1v@}b7$*42gd3onI zO8}rs`s1V=)2FnWj>>tb+x2^mBi2Lq_v(;5yl#I_y>SFR{qrX(Sqv$Cs_nK#L}cMW)Q|B&C2)nc$F ztSx!H6j^Zx%zP)$E+!)k#YCR61YQ7@7(O}##>KlRT1UPSh@hGC?N}T50tZ!5aymOc z0Z++{ig*D6ff-6C*VmNWPSJ|=eC>d#AmmR_S0o;UBusBIn#h%HoV?z~x7(!qzAS*40c!<}%@%t) zqiRU6Dx3?2vVP+AAJ>B&LKp>oi;+2?M7ikMoMPGtTmE@40-FB;nx8imbvQsexQ*E{ zcA%7N;;8`WVqh!x`k8s{R@nKrTi~<&ma|Y;@=M|4hrHrxt;%;xgSWjGLY05YVb?9%0Gfb6;9T>1c=&OE7 z()mhAw3|_?iaH{YWCH4BiH4gOJxcDET?sKjNPpIVB1=+ttLFgoIz2DP!|V(uCYxVk z`i<;nhjvcYOH7S%PAN;^mw%GeD#o2Oo(La-l!m?XuV}UTe1@!+fSb4?R$;K1*_1F2 zBqsF1u9+{kl=;AYrmW6e!q`yE3Fr!GqdKndSH*pQlwY3>^Wck^=wUvdLJ6d$XE6xbGSrj z68K`nR@)EXD!f`naucTQM~iKXJsJU~G32qDoIB=mFrIVZjV(jT7{LauyPAr`(B|V- zgN31Ox7M8$uX|Y4p8(k>+gn?7I4DH$@xZ0KJ&lE)oFqQz=emz-g8{h+H`#Swj=o(>2oxdJ8kKWRt5#8yhoioo`dm!dB?OT++nK!BDmu z!Nzgh4XaJkg?(%)CM{BF{UTESeA~f1-;Ng0(Cm|6(nBv5^xnh^OVjT0fBF62Kjy!c z`XAv)CbR4+zql9`k5B=jKIH2!ziiO|eDh*!>nqX!?0o&=%PrLZe6#i9cm2=b;vdF( zL^p%}Vt&5)&n=3Um1P{ABg+z_9+FH289z4_<6I<@anpBVcH!-qINTN&1K1-|;Cz%{ zSUnPcFp(Kh*OZyx7*p_m1fX^?8V8w9Eyj3bvMJqLqQ!D*s(E>B6;;>y{4%EJs>(a7 z)yl`i$+da97-+4Q86J(V6;qP6@M^rgOF#S@rSmZT`%n(9K-hpXQ~K31oRWdaZ&lAQ z>Pcmrm{Z)T^U>rMVa(5MrwR-2?tjP*UiFffZw~42U*5mm`z|{?`1k!*>+r+D{>%OB z)4NyuAKx6j+lP~8(fzLF~rWg-b6e%mB9TlZ2QrApad32ug_()5DKhy_3 zLj10uT`_;CC=z=wT)}YAP&9ny3j1S6S;%{#3Tgz9)>A#ZVjdFEfng|_RRaZFxx}tehS*@d?j~PU8_5@_7Sg-8qvoL7RkeCPzHN0oyHPm0 zN&wEEmfhs|i|T|RZX_G51&&N-x9ePBYqMf)ETo0tYx2dA;74jc1gbYv0CeVndY>ND zQ@u!nj_OaIvb-`>?f^}6UW<8L6OqGsSQyz&is5f0)#=)FXE82eI%A|qXdl!b7I(F; z!^r@(?c;{e^T}1l$N|ZU_Fr>3M8(Qd1+;{*FzzynvmT*#Y|?}$TKr4Y*l8MnEpCBP zXm06_ikedaoF!GmAl9L?4<)lU<&4lDJDYgon&0lTH>E6}x@T9+pIeL`;=OPM8)g|V z;Tyg@ap?2G)E0pNslS()(@`+BRl-neuNB2MtH#>f;qPVtB+~!g{F$MHA+(zLo;t>C zK%3x$T5;^#X`W(7AO~mio?S7!0#R{hem~;K3r@U89e8|$m=8s?B+rHp2X;Sj%J&uq>JaK}hISh=88!Y@^C3mdGp5+3|rthc4*lTHHw$VjK8M{+R7zJve zPfdUvk`Fulkuz;lT`7Toj@$TOQutf8P~yUo!gh|yX~(J_w_#_ARu~jIZo5}EZNA{Ce=?iK6lnPzFyA9>l*0{o5maLlp@4a(iq%$j;GylWXYXT#Y4pB_ z%ba6O(myu-y3It3G44qSbv!qP?G_|7k=^Jkn$gcqTXTUJ=MI* z)R7h8rWEq5Q@=d>7e`crsY*Vhm;n*OA_4S2A7P2@cDq~kc5kh*Syyb;WgB(PZMuAu zHg3^X8?ukuz?YQP*N z;-PANMkK#ck>9&p%2aM+dB!2g;#A0SJ^e%wINz|Ca475~(#H6C{CZ7oKm zH;z9-fz9>0>(zUfY%lBm+jyV3ax2nKh{9THf&Fi^GoHgq3|Fqe*|rkH>6 z{xjPW#U!H#FuND+CZ@)J2j!ot_(`Itt`n#h#lHEh92Q)ea#We>>t*{3gAJUId&%oJ zd*2^A1Iv7fDOTu6jvdCQqzCy91CO6pMz1j#6FKbBk8yF~?)9|%$krzrcPf ztFlPEY6${{S0g|F^8DvtkYvHH1V^r%np#cD8zP{#BfT2>j0Y4?u!Mf^t4jjq4m1s2 zmV&zl(~hqt?6Em#pXNSYj4>eGuPcoH zZ|y2X*l0rwZTnhWT;-y|mA?(;u z9D@*!?vu%MUS5}fErvF)K~>D>&MFk4ofR1Woc;K&%Zj=QFXy)frmB(K+JuP+F-yq) zml!gTmL$6OdC(F7pmx{0qnd!S`Y||N4E83LXC#S~?T1cr?H!I#hL=akbNgD+`8E zWmX49$Umz(C@8(fKp|h*10ZvYYsWSiqXmwiKR;VZ!NhHj502< z&jFD)C$KrZ?ULm-%z%(j?k{0KTy#xrx?U^BWw<&FlX}Zl0-Z>71@!7&TsKw3AeJ^(Ysi$2T-G7K5L&1B)t_nkuVx_A)pR7d^E0h_mw!uk zTCnt@^d+?5ctEVC=<1J7OiW5SuJzb567}7`-bk7+J=Ph8$0kHpP;?MMS5JaF!a6ek zSl~r zn?nr2bGZNEg!)gvEF;7qGAN17vJ9dWV;C=>rj6J{^Fwa&2M$VgnPaZF$zXvO^%W+G zo0Huii9eDwj)Z~yuDo7cJ8MI3B{lt@z&wByz_-^ZecU40Bu}bQiFUj2WjsXl&na-J z`QXw7`+#C|bOIY(F2+}tK$A?jiz$VPU(CowOokT9bN>loY5cUx<>?YF$Gk{I|ZE<719 zJno0&tT;zEz(2P_B~?9hYA^W?#lw7bo8ME^tJ&NfVtZ=ySRJuG9d{d0@9Aup4e`%S z=-uxAD%AaX73#F99Jy9HQ1O=8*Th0@TJP2Y4hsv9oNY$6T`b}7cwB_Gj<3Sys++GoM+ld_Q0cW5jWV*=) zwu(O66cApRih0tBh+*IMRi6NvwZpO|0RFLWy69!%G+c-1X9`p^o@%S85k&JgU$({T znikK*JkldqVA+KHfYK?2qD$m9Ca^1cLPw5oI)NMuMx4S+I?ig2Z&A?IdF#h>12P0R zt&}GF=|xHvl2?<#u4qp&PVsndMUFWVY&@jSCX0(p z;G~iZSUYggKxePZ(G}MxCFWifgevgeZWCBl=;dxhb6h4(smt{64zPpJFfDA-<{E8| z+)Ucy`Rznv{Uqd7&$`d=2>#0y70Vcjj6mJy++morsvH(#bwW{nNRPW=)XK*AAJxJo zOv){#MJUlCw`!SF^XWAlCQ$k*E#7$rOv||hw8Rk51_-jfIZW6~4vRqNOmn z8KOlg{X)jkmz%HxAxzc4p=>6;Z=hfgmCj)sy*d!&nYYrd?c`~Q05cXfC&r2a139c0 zKCBix%$rE%*y?;8d@75wcu>)JX$!yf!>q)hm%f)az4RTqaZew;cF`!%Rk6`myXM5? zoGvZv*w=+z5k#?d3P-s^8Zeel@=e+?r!2Q&dJe=c>^knS^=K+p7x)P_yH!!RgW#n5R_PQr3QE%HmA6 zS8co2F|SthtRA~U29fjg%=z(!bjHkL)erY!Wj^&yk**hJ!+}i?iy7x3IGP+%uvfHa z{+Qo>EGo<{)p!@aVasf==%cnEUUnN1b&7GVIM|^7nwJ*W-C&tCC~ICy$%IxU=O(~Q z^j7v^R*cH)4=$+eue-V52;te02^L*Xco_B(d+LpcznjsRBKDbj{|*n;&)4|?9GM;n zbG%M>OrT^JqPL>!L+49{Pi8ss(~Rby5{)o}0Lv(T4~pWPSY4TGc+U}vkKA)I30FNZ zr!IOcvwswFtHRui6VVci0w!(8ynQ_&&%BOs^y19~7RO(d45?T$yN(2K_uXk)b(R-Yg$n8Tnm=KjD_m zilGhh40-p75*4-_`<_uT>qUpKN0{=479dx+DU|t$MEpo?W9Qx|6B?VWI=iCH zLa=|9$Cr6^^wBApU~hm_JRqr(2*PSq@RIV9VxGbcjLW#=YnT zed;zCov;@HlOs&AIaQZCftLptMLHE0H0nA-zZ?Q0hmJE?b`_2AQ>$f7oK=+{ZSfK$ zw4hfYy8YM+)PwbI)lRqLAI+Nfs|FqO4nSAO)4Km)C-;7evHj=bw!<7oBIksre6UN? zi?nB61=j-kT*dY?Ixtiul$BBb8{bA!+VCAEMF&(KJGAoZO{c#1iJpyb`3>azx3C=g9s#Kp#M+-*2r<<+CT;AfzV{Lx@NeC2{!tHqWmRi?=KySa_%=x3_e93VV1X+|--wlNIg~u69#wwPl_YLG9Z2CbyL) zYeee5T7M^Tf0MYWAyu|(@}xP|RI?v;!t8K7;^(nHj9tH1KnqS%G;y7Fbox~V56}5v z&8QopfFgt?j~$>BV|yS)X~{%rMJViqWP+|49oTSB1{+vSP17#+cCej|a2u0)9(L0` z@Eovui{S;Dw^v0K@J!=LQa{U*!wD16g#k9LkfwtCQ{MfC-kvvUBu`4$R)fHazVQu2E&{khka7_?p`b? zuj3k=b7_K-bvnndz1eAbM{%SRiU>t}sNzP6ZCR|G4n<>qG4wjO!5k<)lFe4&gGp(} zI^}r)L~>hZC|gr)q6kH0vp6c+YNX-sg&iMn67`l$jvP*${6j>}Wr+zS5~u9vBglyy zlDV@N95{E!=wMzuNpuIj@P5AUVbE^ZrDQ1^P78Nd=qAj{2;cz&e!Qi#*6BJe8-R)P z@0b*&k{HC?1M}BJzM|&;EI%tp<@}zL2F>zYlW|sXDM`;bXNgvpu?0O@Bl_23=k^lD zB|9;QaK$4xHuj^Ag5clRkZxicY)IKvAkCUAJB?kqBR7rtWeQLjH{vue;u-5Frok2! z8+6ysvwlJsLnO866=557KqTTLmb8wKl8MCvRFg^tWi};LB`lmpIT{u-u`<0F0bx&6 z)E+;HHl|qyVHp#A4E^-a>K|UeTuv(5Y48(sh~&en|Ic=%5Y0xj;^fqH<1`tJCY9X! zoB<(6B0>p^k3M|b2iXTOJDHiQD^$S+s-vH!FZxzu7u8a)%e!LurWl+1l4IC~bBvri z|N2YI;;CzhcqNCCCDrK$VZHh$OooM^Zx9~U1$TGwfMHJ`(`Tqm@jGIcW8uui?;KY3 zuK`s3w&9V0>bC>UfOHh5nwFF9E8+EwZYWt_id#sV<+zPSHv;%0$IQg|b_(=DatL!6 z+3(@2t`TeWHB2*_|4^}nLDbEFD6?O!EjX4UFJ0vKVL`!%csw2c)2m_7UEIE+@^~dH zD4wU+vl0XB$R1K0v@QH?-?i?h+wA%*u@Vo(4C-m530)8AsH=r)<<P4BLy<@40uO zyNn@Xb)+1Jo!kwYKWT_SxA7;$3edPqU{Mh#@RQoMR8BhlU>`uO-s^ZYuv6O;J`ud^UZLL@+Ptag<+AuQ@Uq`J*42Af)&Zi{j z(&6D_Muc-dJa*S+etfLBL#gk6`bBWQ?KDxLqU!5K;Fb1etiJy!REkFq=i+&Rqr$MF zU5_&uwshKWW(!2}!z6r}O~S{Qe)ejL0;VF~jIaj(Mcs%}tV!gEfl=sg`Yao+uV==m#(0*hLngO8YaZC+fbX-eL0NHTHOHz+XfbZOKrtmvbs|v7~!Q2T5{Q0~F+s5|5 z&Mb!|2AmAX*xo;`gLNA{uX~=c!m`f!W__k>E#svn+U|bBJu$Vk?H^Sk7S$%%mMN>< zl~^l9|FB)*)EAw|e&k(KC4GBspZ&r$boOg+;uYM5cLQXHs=oWlYk5YFxS^e&qlZ2v z?rGFE3pY$Cv<+b52%B&9w;YDj4wI!F{O(e2(JUjrC&Qh+k!6(9IorT3KE-p0Gn1wt zp#1#)!;E-Ox>Oj?`@o9)o(B2%{Lf4BKihnU?33KsGF_ezsz*%_%Uor&RlF z0>ama`}yT$%%Kwb_X}2eV~E``+f#`DVjETSpBNn!ici!&VpoXfi!)Q~?0#;_q%9Sb z@rAopab87d%np;x@wveHjhUw7%hW41w^zN7p4T zPBPXGc2L=MrVdc!L=|l(k}<(5_DnDAMRy&AVUeIY_(X`}Xf;nwX*X^^E<34BDjSTQ zq(MLIIvR2G$N)ae2LtpeS9z$*bFVP>k;O=FfZU9+9ZM--Tql@NSB*=z^eg#g(@AWc zp2F^FZ?s*8iH&w!UbR}wf~^WDz4Ytt$Ue1USDht?#za_Kk>qrYDtvT+>a^dz{+Czn zo+;N2kt8@e9?(J=5P_4g(x!$xm|UNc?A{r4Nkl?{6=$z$DD<(@Rk||yB1^(g#o{s> z@owiTne;$b18X_eI(QP4LoG7Jb)kDnIuY)7nDz%DYS6x9a$SgCs_NOa4%8p6kJz5V zm|(&ztYtGCqf;htt)fzq$>o8G#R;dC=sWe>KGoZ+J}{~{ABkSV^(u!1FjBFX;1w<~ zk6j@?wyq^Vi~+Q$rBAzBJwdN9pAV;IqsQaTZ3=1M?zQ#Hos+bzhUEqBxLxJxv2uLc zaGOBwLIcYZy0mq$Rj#silKfLYsqq}xJ=X^)g$4Wdg1ar>^1S0!>IMP)gK?>{+q)-? zVXQomel_TqtLffb(rd7;$+r}dA6qoUDjdEHhAwE_g5#*y{N-B!wO87(zvuK4Bt45qG=m$|c zISU0^W%D*lblphkZP}BNt$U*GDTX+NK=;r#x`#F$s=~~BgfPeSIoEt|u+EJEr(SMX z)9tNmlU^mgd6815ndS)(v}hjy16~2wP1h*N4#;6WT0$n+%{idr|NVc`c2`$=|MB4a zgLivx+L#S5BpnZc=>z@r@vz;iO%xFGM*!@2i{x=IvQAruP4Ya7ENo7J(o~q`#;{Q) zkiI}rbvuvi!cts661)e$3GKfYb~pu}p|5%sK4#@EN9H5vrwMXzZYBI{I37|+vxvZPNw%9 zUsSHB*7W2!=tV)x;(J1+d#j50EQ5CZrIE7nJT2GQUB}kAcAuYxx64@Sz z(C|mT^pd|6GgJ#EJLwh&5!;YY8{Pj75bh_76`0`z7WmB_j| ziv|G#-AMjTKupd!WH6MU-W%^+DVOAP>M$7;&gawJ=g)6%Z-I-$&Zd*u#q$vtte(F) zc)9=XaDNjzOjSOOM{qQfnM8@sfoJ!4p%2ALPdLhNQ8$xc%nCMwhaTXSJXwRGgT-w= zD*y_nIh&Pdi@Ap^(>oZmE-<0M#~XWx$-&`9^4;Fy!C?1s(9BhRP}ofW*Z*swQLJ(- zNGP}#YhnK`8fz8y!M&Q^o8Obl>9H@y`N%%q+Yf9@gh9E?i}Jd#@W+d5SiW!ujHfk? zIEqH7sInUIpBBY}*y=yik4P*OSh5uKZ@7u>Bd{(TAl;GO%z6+Q1RMCB1EsJA6L4Ul zB?@i>gPH4TIie*ZSEbqXYzI4xXG{I-a>%3r#h(Ve*cf6=i(xj=Ygo@Ks-)H8`Z}N8 z)6$pfhx{sDleW;=;@s^boWp2KGL!)j_VGMeXSB`EGkI#NO8Qu}FNSP#8r>VZ3ZdQ5 zEcC>L*6>4y4^1^KH);2ieF4?&Lv$LOK#27q4cGi$3xJq!*{-lL7weU&JNm^kh_%Im zv03N&7?HSk9r8S>)-FzjX#L_#W>d{QT2J0*>y_nb#A}wt9XJ_bNVnTHt5CGd^@^e^ z=bA;~l$k7MgCeurcfEQroAb5H%5H~O*$tg_LV&WWVbj-P*zQ%pTNDqd(AdE#oU`bV zVJYr;CRf_nL;2ZMSy?{X`uRm>mU&~NGT;C+O{GJ6e3n<`z>u$OwisuZlgU-JZgsTQ zT5#f!HEV=Tty|9qS$$x6vaD&Ur0J_+LfI5&of^xCur3G7pd2IDKX0|NgLfcE?7hj} zygxj|@Lt6?t?b|TKfX6Fw$fGxym+>Mcy#b~?+ApCe$pO*6pij4T365pSV#LG_edBHxjNB?Txn`q^& z?9<^sbx1ylM;w;^2tCd|?tcJjZZA9h^fr6{0ZY6@r?Za*iEnTOL&^8=_OtH~zTf-q z-;VYvq}k4w$y5DDhWzf&pY}iP0~ka^n}<6l_8T{Pn@b&i+&g$DO;Hi{p`-DA-2dSt zASHYK{v#mz_5R2GcQ5y|x9?w(@e1(2WXVlA1IzJwF+-(zy9Gkj0cL?5-6!5?T@sR+ zNaVPUZ;R0=P5v~w#n=Us4TthOjZosa2Vk^^#sB3RlSJBR?jVC+75A9ueoB=nv3Kl=6jN^lIDj;hTNO0y5q}?>MgS** zBC5kI!-xPrKP>IBLQAPiMtNS;4-}hds&c@=M-v_)0+WjVttxuFeU`K7wA^W}){9{!>s zl+Uu!xXdeOgn&5t9vk?V;vRmzdH)?8w}(gjADr=bb(zCTKR$79NHXZNd?0S~%CEPa zbBN+u->}Rn=B27K^{Jyg;Ry4zEIqfG1W8e5mN=WpsGwI?=hZ)sjsdpo!QOXo_78i_ zV?5~w3ki-oL{o$x=!6kSU_C)m_Ueq3x7bhdP8iN2tupL!=3}I^unKNUwD{x*IRu42 z8_5wJ#SC$!C_k ze2BraDoVw{=hK7_)|VeXCHXnJsTG)E1;)pbXJENr!=f(+3v&6RmCd=U$N?4&hA^~( zEkOvvj!qY|>4Xn|K5lc>85qJPa}cahXFi{BgJB~^z)d0;t(VP*`P7ASa4fIG2WH}) zNm5Eqltm$pasO5!a&Wff3>8PD9%rtRiL90_2F&R}bBA4dNy-!UyT+FA>-Ls>bn$Aa z!>~`9fC4;&9-X%ZM=?Qk;IMeY1pQ{39H}#ox0}iK=KRv1mfh$;`ZNx%v-5~LFV?O@ zc5Qo3fdcr!O*H<@E*F-01 z&6roB_qwU7jn0fskNI}Ibz%&!gE}@U2Oq{7ft~hSleQ^>K&;OV7=1_^EJ!ox%LhKhhDU zW>aDL%h_aH{*~4Z^`XV??3GArrYKnsl3oMJ$QUCU;kgSm=DNU?S$^robO=r1ISxk( zRv^laQtTOKvw~)e;Zr<~NJhH`DR6e5G6V75$M<`$UhW+p5lJL*wz?w0vE&ZBl(VWq z6t)-DqT;-hlL@w5@_9Fd?tuuL+?09p@xx0R;GQ+g2mI^;89U7m&_553MLxMu&vELc zL0a+HpBM>5BYw*Iik%}AGJ$f2!O#dL>7XbBXR}E@#9&6W<%9{F%&Lyh)*jm#x60v^ zRlnrATn?(vO^U2{MyFq=yGERsHY~6qU05I=^#z%=yvyIUlP{7RVzRp_9k?#^`3+JJ zi{{j>Znw%LG|7qYULj)$_0Q>ek|C3MVkI#-?J{!lL@I;dBa|$3Bg%2L^1VMp;Dly3 z=CH)h(}8D|k-@}A+SnB%3}z@@3=<2rvK}JL#faUx&0|IUhZFUocgQ``8mFkF$eElT zva&M;P^XdQgyWK)*@Mm^2hC93fQX3)US7C*TdLDtQ_6mlI5tw?7K#Dc#@#E2Dh8IN z&FJBKSe}cAY%w=J4rl5YG{MeVUW87` zNR}NQeE)9mO^}Q1kn9v#r^R^%aj$UpN3vAlyq0*Y`+TyP<6mCGBT7diszJ7P05XiR ze$Pt>u8#gL=^``GKM+pg?2TgXeA$l9c6GvUmIc zdc=A`SgM;C{QdBVR92fi78{!VKLpvMqc0EBEd1QvU2`=<58h8%uf~Y3WGYZ+$YE+v zO;e{5*~x^^Jk_o8wnS5%Q?DoNUq~=cd&#MCR>#NDq@uruJ3sA7rp8n6KC@!zuicg- z_WhLB_H{8w%ATB-l)`O60sPLPNqTSleadSOo4Z9_Gd7}%0A;CAd`02|W4kEE6zU=| z&^-_d8r2qPE?!A4&Qqp>rxl-_u)${rgO28T#4W(3>k2N6BaT)&10fUb(tM(ficgxU z9CMmpT+ZS6Hbg}%c_?HD->7i+=I81vSxs8<10|UB=PXrtMLKipTEr+Zw8c-uC8sM@ zq~BX-GEx!n9cl;Wt{}HefyN88={jEG6D1G z8qu$}?+$>uHbF)&`DRJ2Dxz3={S&=9tp@q|`D8R?G~&oi$F$pQNOA}>hf?*FB?fd* zGgaj}jhN(oN?Sc2-wPi`xm_H|=6+CALqt-zF99tEEYCr1XIIEH-iS}swDJhJRzxlv@z!Z_eU0u%BFaIjOXMNofIvnP2s~>5rD;pfnGmE%rh!~6M6xo~ zdw7+>A4F4Rw!J7>=LRD_<*V)7oGvJ#MmbA}o+KTjB3+`wLNwUm9tLHYnQ!|T;QcDu z7Wn?;0}pe)7!kI`^cOovwhkq#j8JGhRbi#X3?emo4pFPjVZ2V2%*IJy(Pu2{3`VsG zGbEJ;?-VD{#|w(^JF*7R4~AEWe$<;VUclokqD9oMjefTXZSEebgUJ4)v6Fn1FDc~k zqYAm$yEfW6=d+;oQah8KG;SmE+Rm%gGUd64xB7jSu6EqkW!onrUYQpLpslb9n*B|n z>=TvD-ZKl2VOw9q%hZgu;~)<5{hhT|bS661?zpuVxCAin#o7G5!-gD#uaQh1%QGP8+!xZq|?w zzphoNKJY)mWg-#|^TU1e$cBDUMSQ~HKdKv1MLg7RSHMvZ`zz?8BOd;v-(8P@gfC)< zlA_@Y8>Bt2oJ~}QybWd5j!+ypPJddI&?1T(p|nRbG91k>&1DkFy{pI%*yOtNZ?tw0ag zIu3`5^q~{9s_@r6F>QFB4=FQpNBmI2q9f(#(1l8vbJ&QcIvwNTj^A)|nB_E|r&Jtq z-)Xqtiq(C{OnPmGz+8BxeyxF&^Q3oFyoOpeFZO~*`RzQIC&<#oZ>d{uMap5}X}5gEmwze{xaOmQJ~y8a^ts`Xpl?f0 z3fy=7;JNX<;7z^a(6A>)nAEit^(oNU%D2=LF6jC(qce%6dJwZPkMF#f*pN~QFE4X%ZBuIV zTW-70?q@0eSGJ>sp<>`&7$Ck-HKnh8=M9%0(XS3R(bv7{>rVAK5L5^#9p=R~U;#Sx9j@~ciA-uIfow5w-$`CtP#@RsE0zQf{danWgiQBj*3$`=*-#@i|O z2BM;;sAl_##?G$V{wk6%H#I0qx*ND$%!iZPv0M`L2>nwm>$TlgSv-+s$>V)ljC7X` z_BiO6LzdtEA+$N?ED6x?7nRunkrtVL;);l@D{95tmwaD>YDI5{20QlGZ&fzb!rSJ> z88^=Papa<9BDYywQSf87jEP@W18YOmE!k7L zM9?Kp!s)Jk4hY|JM2m1nC-gc}9~tP%dlo^ff>+vNYR%uQC5K$A@C>tFN#CK-{1OmM z=8?>AC!&>_qeT?)D%+_t88PWYcsn^gIxd8rG#TZ&V&3p@ zC?~a0adChW-QIXjvLak}I`E!MBPGRXE^8RMk?#A$1M{Qq|s)}#~CN&l-agOaU;eoWK_&jiHB!bQR5EXZN)gl0}n2hCWj<61%u}T zS{hcsr^Pg)D;l)ggYvpa>2+t#8cVxqFv2#>RJkqGhc0(QqMQzh*$T+Zlrwd%r#_vJt!9Rlc;s7VtCJ~u%SOe0C> z3t)G?7Hr(nSrGVJ5Zq3M3F)zGr}H|Ix>I4>(yhzYaZ zY{EW0a<}=+e`LEoV5JGHB}?t|t(?{CCRjDXb{o#-VaujImx@7(&*gg`v1?4G9Ivti z-At?!t3=Tjs&3ZOXPCA*Bl}Ai80T;?7(h4dx?&3r#10UI?ymEY^tcioQzuo>INi$! zEZ^z6y6Fc@@A?{!xI?lEk;F2V&T}C&s9aRM*s^Pw2@q^U5?}Bgm})XP&r1~H66NmD z49{wlX?&?9C(D#{u1SJF)4I{hpv}!IQExN3vXBI+^5kQ6JuWFE9OYbzWj1mw9U(YF zcrf9AtkT+gKzz^IbkFn9Z6gkRW`nsH!L-w=pN`Cf^(1OcKgRWwOC!Dt=_G9|WbxEs zmoa(WMl5jUp-98(WQU!tu#jvrZY(n&?r*~+)!6|ST&uo|j+26%{+{1<5c~uag^#oA zs{d8;H2HdqJo>k`x-Lo91bD;oO*w-mQGtdmdUp8X&B0Oj^8MQn=<2-p?p3z`ulp}Q z9qqR%Iy8m4-EJ>if}{)(cSH2g+hwl-3{r=NT9|7B6? z1~?#LR%zHs<6Q1rBwKP4zO>)lmWYL{ijXtY(O%c*Vt=SNkNykV;QN&UQBo^254n**@b`K1v9PdIz&j2|<#=mM? z4%e_2+q>&1n_o|6AM76$)Kh;h({~!9qqPQByGv+A zmBmplaa29*kE-7^$QAnGz$sGU!u?dnQj8g4tXd5^;ef^ZjN(MYjnjI9g}Jzb8N)rb zH$Mak?IE`vWV1$A$-<3Sbbp7I^SN@`Cx0maP6O|*%4=X{X4VG2)@DYXLe~7<{te6u zwsuPFE+r5#uJz8ttFk_IE_F3ulemVf&~en4GTkv_ycJBQ7jiOB5;FU2THmLkmsH@Q z6uL?gz&-8{_0C0WfCY(3z1TqLVv&OgJttpR=jvoSWBi(v3k_K81@$u!~)!k^H) z519RRM#a}eUdtTN02^}L0NcOsNNfglCA6^~IQEdDjon@?dVuvsF%Eok-*j!<$B_>q zF6@ZR!Gr2-!z3Luk&#@~A{qn(%r7t+v_leM0%i^XEF?O{2#xRNW|U+HKP!tHb|1=b zCgm_e94I@m&@LibRx1E6qdRTyTqc(EcnK=JKoFK8UBDn*Bpb(4uRz~^zTET)>sk2kYfAU&c7H9ig5W^ zW%8mmE8d-M9!l(v0NI99QE{Yis4e=tl}e}ChjBJqPm}L3V=@P&x{GqV`B+&)D{7pxnwKV$fzGm3@yJsw^N$tdJrG}Zu6D||eb&8)b{gv(s#Gq& z*kF3uD#rR3#!|l#R+~DER-sOk_uugq!HDGN*HXgEVX^E?u?=(5mmCzLG|#;E2KMD)QWGG|2D zDJT$75sV`MRe>VBKmUjp3B=7)m}Ik;I3|Pz^mYQ2e1b75j8$({5Sf9|6sEF&uA~-L z&yphqwIM$E8{W$2fEA1^y2mYt{cTL^X7m@s5_Z;XXjCCc7O$_NK2YV+y(gitp+_2w zuEXjD{x50Hr#u0{o+F`!A`+Rv2t@oafVH-XK_?*$LyjI=9TT0b#}OGhLMoY?0Uekv z29z_2b?SKOOwMY<;-rdlnDz1tzA%vh-xlQj}oI{ zXq2kP4JCUiAKsI@4Dq^@=Z6%(BkOsEnW08vs2lsz4MZ zY2Z{5xl@VvewrAb>b}4<8FcN-D&EjByp5aWrQIYpq`Znfk3iDo!-&G8VKn&~3>y|V zwQEhf$Nr(Ei5rKvgQ%bqo~8!q)P#{2r8A!`r+nq^MSurX|^V+9$?D6*5teB307}-tbZAZV9zFCOA zwLD7qg$?!P*Cc~pxzQa?wQo+53$aif+5U{8@)zQK?s?9LU8H6kG# zed66jx}8ZHbknWE*ab%<_DQ{mbr9}&!*j|t7fjR-u#$|*M}$$04nbXCrbQk#|KQRG z;o?c%qvY6;U8rj0!(lds|ETz#=IF1@ReT%xz3~G&OHiHPm>qpR4umK+zzhk5C?vnT z7okz|faOrXXR`m$CGE}RiWm{m(iA}=gld6uILKc-xXT(_f`sV09va*W19Ej!%;s$9$%6)l zDUwzvP4K?cW-hSN+$HA7C}~mE zKb~`(V2;O!Onr1x)FdQ!U<2{a&~dax~T<&8|4FHejWQb%CFCcd2+X##H7*Wp**KLNX^~} zu349}q%jvLDgU#7mSYCG-6#-$2yN*H6qaA^`!5e=fqSttu#qpm+4)?o@qZz9_;+r& z#tp|g+Zv;LuGQvD6- zt($VDw`5_qjE50)Jc5P&?KxPtjp+u!FlCx`H4L7~>M$&{bNfo|DPrYOT;CucT%z-^ z<%4XTffcN#*%r%HNl^iOMcu$=C4PJ;@ zD)g<6k~alkr79NM6emzlE%aWv=pbyHt(x8wic?-+; zxt5p5ZPOcyqUhJqA64ZrI$dN1%ddZUEvsheT^=O2C{2Wjx{meNDQDHa!ICkVTF9>J zh=ufSiVosLWAUq9+v|eU7Z{NR7H#7h&`P9jC&^Pv0cLQgh{jsEQH{b z9vN%JsAr7SPu8M@7a22VSi$z@hbz zC%efpPEbCt>}EIkmSSpYn(Ar;!`t?^x{V@Vw)Gndec4t-?A!YN`s1MSPrQAkyTDF{nN+BL}XY*U9w- z-;nOb=p3vjL#Sg0LUlHqe3nRj8n`?^WTs&jo|wnhrCZLmz?L^I(2q~QV^Z+Cbkpbt zx{*jv(%4dM#;zxry#q#c*PRokgiXbZB+6kkpqx*%%D1eJiL*ut=ups$U%&V{-N?Ay z5n;*q?VzaHLb-ZE>9x&JfD8{ti=nx(vtlWFh+;tx65~9OAZrs|Q#5~mX?tCH`!flZ z%LU7|gv-w;V6H1<#`fvo%%G><^y6%K0uTL%G4%CL_|E+6^YALFdj0MuM#9%+;UCAj z|NR(tafl(d?zoy`D@uw6@XGv-=SO<~f(ura!r zwI^wZ&VDG1IV~xJq`PiD?V1H9j`fuGg=fV&V2$o+sU3`w=PkJhLLZ+m#`23AHsKEt zgSCY@swS|kQ1TeHEC(N^w^N8mD-=pHsoc?B&fCVWUB^SCYlD#VMmfqHZIC8%=>o}%(U0gH8D8Cw2pRO7Bgyz()AgmH1u0} zO+`$jJ;~3?gX4%TLo?Euf{1YRT$6E>OV@~9blouy<)=8Cr|d!}wobruRt3$XWf6>n zjLk>t^4zk~m6t&P#BW%FNbB;7SyP@s#3@4@lKqs<5zmJTi@Jwq6d#2-(x(g6I3hlk zKT@O#`fX;*V!^mf#zSSqRIe&B2J)YOnv5hUiHBKk$B&!L)9yGsmkrAniRj0@m}O2~ zA9PFKmvK%8c3+YNwZ&Ma4CI{bVqb6TacDnMr^g!TCV%WFJH4ds^4@Oa;@U|JN0+v> zD!&;{AK>_59W{1HlWje?VJUjI3IVXjJV$ky4G@6etX1=x)ws7vihx(%&6-`eE$8{f2wSQvRM7-cW< z*Rp=1q%P)6j@;UW|6Do0raZXe*pUD|do9ceo1D;0b97cDQW)BwF`8G3dW(IiB&jtj zoFr+WNXI9R9M#9CiO~-jdJqjolk@TpnP<6~XS-YE!Hia6?s6HM6EbbOm!-zcov4Sw zW8Ix&?9K@_ML7)|Xw}=3hFIRQ7L8!6far_VaJqgqBE&%V5omB-i69~DX9EiCh=Ree zQdYW&0c@!hM%e+PuWB^sp=+p*;Y!t-R@gg~6gI+)l;b!x-#%fVRhTE_#kjD`aU*&J!&noSM88d z9+r))N?8$ELJcxERp(pU$rWCp!NMT!eFzL~FS!6*s95S=KYI^med3txdK%o2o-M<( zD;r?B#vav!sRgb4Q@@EmevT>F_NIUudfWtPmf=z84+rief2ru=ebk&%E0#B9HRBXc zQZ0l0+6+@GYriy^w_yIE(>cQGvp7+;NMi z=X6Lt32{=_(%{s%gKQrgBTjh>;WBDt8jMeDv*`TZhwp zaFtaU`Dsj7`%Me-aRju!NV@)p5DN}X73&?=yT7-zBnSGJeeD?=8oJd~$824drz@rt z!y6&(0X~@Dsig6fG2?48i-m?20CEM6;YsL{8@0QrP@>fFEp3p3P3*$nnBLDX@$SWJ z5#Z1{G;ReQtp>C53?+oi$*n~FrS+r|9Z!affm5-;jD$Cq! zs8R5Cw&EEe0lOK!1R_J99Pjo?6eo-LHG6fsaL%mh*7;>%gb8G9X7#x#9XJWgaZh_2TAe7GNNzSNXDMBmUmU^cg zy!&cJne}ZihXt~}_{0Oo1bILAg50e#2gC_-HSNj%B>90dCi_0hWhK@h9F6*HGQ3aB zJ+Tb{&i97`<<#^N7p-X8OEh{kkR${+PdUf9#`HaDayaSvXnmp%0J_p)Vw#4R>OB;l zIU9H{S($mWQQvaFwx#mc?1+{`_);tAB;=%e1fR&mq$4ryW|rfMyKC*&UR z^)8tTZsO$>;By3Mr3>JR=+W>}ag$umd8YAhpPnrkrV-*yPHNx$;{R{&UAx;ju5{si z#;@q1)+_)r*di&*nPCPwLravcHI}H5bTo4qzKtf(B-;W37!8PGy!7AS+PA7*)zttf z$&PbIT!{sstLt97_VwBGqHxyZIewPar9>8pABfHaY4S$ZWv0CX@K&&dqWO(qFcpla zS9pzMg*$ko0`&`Ag_utEBd5r@N4R_jkYY)EICr`pM4dBs8j1>@^f1ApJ^}Jmfb0R5 zaeF=I7t?q4YW9@2%|0)yxjuEVG0tF1JHmCMxhLT6+ITCwYjC3->{EzmhnK5-@s6VTh0cBE@V+@dc%8j?_4>!t?D;G6;q)kb^F3rw`{C&5^?vI@EE7wu>0@#|LtC(CULfb4~I z3X+b1xedy(;8*0Bku-3&R3ywM5RQ8Lfk-0DY4-M|qC}RLbnzf?HZnmKQ8`v0BuC<4 zTPeTW3H#Vu9riuWue}pL-c2qzUhCsqn$OPt60Cj9Ia?niqp&0*gLS*G>86s65P8;Z zedNi+!-hAQf3Vh^!jO8nO^3l9|FR~G1drW)+M|sdfEH`xS^dcJ$U65X3_eM?2Qj$+ zsSTU^fAJ!XlO!85*F=%-bq{YsJ)+n1MzALLxuaVjM^Tz?>3dvNzt-&h!>1-xQ8ZPw z38NFho4-Na<{6?oqte*(US<&qVV}saG^h&#?zFqIe~CW`O7EtiuCkY_#Slw4(#`6> z5C+>VRit4#F967o5;k;C>r2R&c_^0r8je_4A49|GD$>OBk^{HP@ojEIwJN_XR*!Tq zKGM3wt-z%%rc+Gh1>!r2r&kfr#?t zr!~GyNyCBMSw-uCJ`>9-tc(?|sG~xx4U!8M0#sj40D;%#p5#ddf81KfD(C`Sf%)8d zh0f+?M}v1Q^dt=_>qxOzwzR1Uk@QFbjGsv53O?9iH)AJnXf92@ zH&nR?irEX5GlU}jtTivH_4YZS&HBCo~ zX6}5Q!p!Q+n$)+xip)7D)so|;7wM}|>v@sCwNdtl8jc{5t=Szgh&;K?5fn9Q7t)#EtPz!`7-N_6(hAxtDIx4$ zxE9Tdeh8jT1%aIJs-3r^yl1bFYKyMs3O6`C{c3m<*OEa7+cvTuz zsjNq@W{6yc-=PBamBIvj30RVDJX1N5b~~l+!dFlI$|bL){Xv!Vnl%Nz@7hF){Q)Z} zeo#-Shvo@=h?-S-GGIR-D`9A9W+e1RKPe*<7w;i(3C9^Gpu)PLDj9iaT@C%9Lfau) z>ufR2XkDRKJbRdNlRD^ZK%Tn-yjmq*ilW5r7KEJ^*BIOC3G!2@d?$sISutcUejF3zh6C>;@J8N+QiDc&_zJ<;p${z&z>|44$fdq}QGcyV70&iF(%B{?x(iR|N zAyK2q@RP(ro)^it1zZf8dvGEhOglyOYK(Ei-3emB!bnTubzu=_x;8j9g)rdDl7ew@ z7-YTUz7eP+7%AtJZOM6()u-6?PGTojhHqg4t#j4DZQxjjMz@43FHo?093V$m`=PnTb;sVctWE^g0R1H6412)X0wjdfmIr78sv8ZcORBI<;)}N8ri2rWHREUvoXTGE z5Q(q$OE{J5j0PpE1)~ENg_Qt#f(M2YU^~YIfM>+x4!W5M2oQgA_ay|jNOdUFX)ieE z?xx%&J`!nNqsv#z8&U>vQZYhr6#hggR|X;1)`)Bpndzi`i2AY?ka8LpG={=IM#V<*VMaRl8| zMdJMG%F58Ex)tK#`Dd}IWxtfI1wsMfSI?IMr;h>Hp@3m~DG>UUz5uH+fk_!@&HVyl z(Hh@U2sCzuRnv<_iZ3D4)F7Sp=*`wWO?u+bA?Q5nTo@^s;K!B|x8p!CnvGB~iL&Ip zgt1nTm~=`Z(?)n>Vr*;T0PeK4399ayv~~WKnuXBPgJ+%V4HpQPsPpk#{9Zd8`3sN$ z{4M3K8RZ4#_Z+g;A@TD3tK6NZF8*HhboT<#QMbNH`vc8+i2}(vFK7Xa0a70}~wF&4TaEcbUQwXTP0#$JG9464^WF^#X z8c8Mb6^wlco}AG&?Adjv*TDl+K&#ZK=J}*DlQ9*V~Yz6j?1bV+?bjN=Zg69Jdx6xD8JcV7p;O84^!{dzN#Fpp z0wGwis=_yXz%PxjYGJh4T0h`d1}GSm6&wKtGfhgEMA#FtP((x*qyj;tT^!Dv)@u1)|5>_`Ov3vi-o9Jz9w5;i3{p&5A9-UiUW&fDUy$KC6K0!jC;TogAd zkiZ%+DF1HC*i9)$!$y+1t^_4&pW29M+TNxkN#~ajT}BLT9@e#)#eg`E?k4$7IZP-o za7O_vfHr5?O?mA5nq2FVclFj*jny3t`Qq9fz2_bwPEr4rz;7{MG^W6o^bMq5vuqFO zEV28-^U^n4#Up;59JNHX1>SYAF4T!_+;jjq!As^L{rcE$0<97Q5q7h|2*z9i&$l^N z3@og1g?rGNz@?}bQtsLbE;27ZWrI=EXx$|vEWb>_zCWip%eS(JY&P@y+@=~hj%(rB zymuXbulJLS>2zG=6N)uYfD9Uvhz`LJ5Rg#WW^4~jB+!8~Tx-}9Ky?`88v~O8y1~4Z z#=$|t8jw%&&RyUzq0t1v39ttn456_!v;KWC1}{1(=w?MRQrkQY9YNbM3MUheQ{NDkheU(UbQ#0Sf;abl>^hhZz6ds-kxfAAI-f8tG0#|< zDm^pZ`qbDPpvA}zv|}9|cR)zp)>`B<`@V=u78uE1CBB^11OQAye(9y1` zU3RErXVh3hdg!vJXxwIR;Iap3NKH_87+T)EfmWT8wq4-Sw4hhPoM@NkV!5ctiZQCM z+q_uI`rc!ya}SUMezLXD{H?Et%boj9z)%BC?D`tG+_`fWZs}CPv00>Yrg13Xas;<( zZ4=CQy#3lzxcNs$)h+@htSV$1nL3s?h=JFLz1|L-&mBG(!0vz>Jw|CJkPk1Z@w>Vr z9_ioOE`i?P+UNEP2#%bWGEcJ`tuQGchNmibaf^MWuaD;9ZqqcBu^nluBeJR)WB>%A z1a!tU@Q!%8XWt&2KK(vBdGUV_v;R8$pI%eFT?-wfSO%}dYZ;2x+P?gFatJyt>IjU& zuh6YR{h5sQo~bc|I&!w-gTRD)Yr~gfZ@G@dDMn+tXSTi;D-5nzOA4`im z!8%axgwP^&tUG1%tsD)eD_hWMG>Zs6;~K4M3=pu0o7+9t7ILAjy`m6w0g6 zOSLhNqhBsmnFO^EcM<3#8{*-DE~&t--^S~+aeO))tZleM%@F$TOiVMJqj)rY z_O8s6z$vxu$Vh{>cfVB+*fRfi+P-q-gp0%Ul~fi6pIG?TA^%Y9J&Dh0;z75EHA*>k7_56|4MA zJFJVsv2|CdIe|ne&p0yb66m&N%TNuXEM}0)!nW0gc>KN1S|D@P?NlX3YH9<92Gs)% zc8x&}%;uESrX@hQYp5i4H*y$UXKziGclErc}Hg0 zaaAQ;huicPJdn*gf?V|E$5&4;akoZjk@Sy^4NGbyfW?Bd z{`95x&O?4Hu5)~lyi}$sVMNx0p9lrSqxn-Oj&Oc$`L4wZq|!T1nZ`e``B^LDlZ@TC(}0 zTRVn2WE1`o#)d7&#MVT&smjFTc1k2O;iqn>47NuG%mr8->zhk)Rf3?RhwAf3;qtdu z>`<@az;4=L>#WN6Fq%1yC{j(gp5{-nXw*9yh54o1P&z2dJ!q<$xU$$QDL!g^k=U~6 zi}yc68C@av*J^H<>FYZ@oD@Jq*wXQR z=xpxhqC!s}T4HXXj@hSf1Wap@3-}$0x0?V7$wUjms0e;%e$QPkwjz%edVmpTh`(Jr zr(v^$&dPIWi6gBUKPb$9h`zgWO+8R6f>(cqEP%H(fU?K^R&BWh7RuQE&sK!ypVu5P zc$~Rs-qCXORrNzC-I~Xd&)2sYU-La=Mr_)7)B(^iXPaG#Igy zd4Ir6@{WcLt&5L{3i(i z%jKj;`P0y^h@v%cz}I3(5}Za`~--V2u5k9*|5KJ zbQAGA@3CL{K>uWKa;U@Zo@a&2LZTt{!b6%}P?kPNJ7!aushv%ZVx%6+80kKmD)*t< zukM+|X>1de`4^}-sA8S{#$qxTB(zY#i-xlj?9pqSKonisM-h1p4X}AK?a9^$G-SM~ z;9$ZHGh0%5$oOS$vGPJV#|JU);?Tz0To)YZTB1r~4xzL}NR~G@s6tJnjKW4)Av%0! z2sFK`iRX$14e58|0(<+njO}0bYUniM-$&p!N!}?F%3WwSU(8!(mDZS)-*2-#w)}%H9RovUvMNqYh8LmKbZ3j&&9F@pBel_iFhmYgb^c_e^-VUA^s%q zUKD11wOE8f-Ml1i-hFdbOf9T76lqJ5dn2+ zzE2iK!HgZ^Ll7?cMKR35dT8$et_)L{n^!M1h&R{)E&=ut1#yuluIHd z4R)y8{pl>_RFFvMc?V(gm5haL-sZd$`5T~6Lhy=gNc=H1TfF0Uuj@7?tY;=A2=rHc zO*|0Ci@Yj_S$McAb-E6mpP^M0b2FluupMpep`I!SYG|p+S40>CrbT- zi6kmCOL~l6Kfc>t9CDGoKOE{JtzETeJX@Bz5r9qcuX;c-(yyY`F;4Hr`MoBNlspT z_u|!Q(s_D%{KNnEw9EM==B!0E66nv%a>0@8m<^JVqX9r4olh(hdNRFSAlegv8&zn) zGA#fNJjJH_H>@Fb8BOZtJveV_Y~ zUyM~F>EhvQ_U!o&>+ipnb@_vEORf$6$}14u0PXLxyw1BB#d?%Z$)}4JQ2Rd{Zg(nA zl}nsLg8B}?r2c2RWMB?q5-IA$RHt}Pz+tz2bNw3Bo2A9C2s4b;-J@cWmt*uo5OhKU zk6;}axM6@!F$aECfWF7NVS6Nmw-^NtwU3{PAcfpuax2AoJ09o3$42utUr z09cu!nIouzkvQln>FH_bb=A1E1`lEVBt~_kfm$<>8ZRtIWNg@`!_Y8a`t}bVsn&Qi zFmqma#S8qSBQAZdL`JU>_ml6Zx4@Y7(6<%Jn=1rVjuOi0jhVnkCA2SayO!Oj=MSB` zm309Ki|K@+D}O>0wqz>;q<*lZO+k}YqGhidE|GK!AyBniza}GFjqh+4YcrDi{*0(O zvZyZu3v0(4p9L74#sWd|>`yM2g~JbSipL#L{M(?yeHZK2z~6L<*A;TM!lkpVJNi{2 z!9`yW6mIFogm<=YPZXd*Avn2|w*Fl$8yB1>gAR9sTJ14Hlaa}~194R_<&QYe4W#`V zu+9W?E(~y|4A?)R0>Yf4^RA;19ug&Gj$niw@&DIWRRl3?CQkT@VfBmh9W52Yl{zvt zNUZy^{&6`mRg{+{FdNk>^7fQr-^S%l$(g?4dfkrvLGj%*+@P3K=yp1PiyHjgQ~P(o z1cokZ=DCq!SXoG%>9@y62hW}!oSeF7<2`RCE`9pMWZ~h%S4XE0AF>`U2R->{dwRJ| zvCxGm5~+>O;Apuxx;zFx5VeyuU4T^a0#mM;k^{3f4<;j2uM|GW4_&s_%Lr$$sC*|` z$Qcg%(GfY~XOqs$S1(RZyRq5CQ#=4NMVesVu+Jrx`kcN0o)VA}uvzx#*bq>H%w%p$ zBN>2yX`0WkZu0jX#??%AJ%{;UST<>H=cv2!*_+2LOgeh3@{JlvHuq<4C@FrpT_<#`NNWTY&`}2j$P(na^^2I zE6W~Wn7XohcvEP7NGfYO`1Gkmy;U~O??5F<=&=7p)A&p1Bgyfz`wd7MZJKWLygqJS z_5&JJ;1rvGUfUB;V{F_b+4XQgBhrFl%faAa_*`BWv}kcy3b_ux$mCS^e=V4}DMqX(ONhcw|BFr`MqsN3jSLOVlbO0zhkc^L3mJBFb-c>Thu zCe&;B0$4;)`X+A(zuHJk7JCn*`BKr-9gf;w~j;8q5W8Q?XTV zC2BPJaxsPMdn7Tlwgy31tV{!NK^H|aku@2qed+=Oh4FK_WM5N0QqIw6is<}sfFjCP z9v+{w?btWKu!H_ap5Ttm>h*Fln%+*tz{Vs1_ZQ_&6|lnjy?H;&`pq;w+nn`_@OMhW z0>+$kIVw`2;mZedbq#=`fq<8TKQ@I}b_kSS;AIQ=t`t5tfiEVb#r9AOyD_^a+}d@4 zWU8M$-fMv~$;gm0{z53HqRTYD?(Xb}icH|hWau65n)S>TF-D#5*&9p;IXb;sI{K6k!W*`)&quA zPLArqp}|2nl%q}8e48;>rOq?%`gy?dG_gFK-ZtcVE>kv)g0s>6*&fYt*O(d&aJZk; zxfM$3SYh3W6xRD%4#u!p?>I@T>{fA<^a zWuF7Gk0pH{N#?L7=5|=~BR*8zd!fTN;X&VKJ`jI}=j9lvJmgiSmoJ%(u#eevqM0n9OqsOAC$Qn{)uYk$0FX7&(5;9!Bg3vvKxGNd?UQ<(hGCADx> zKwz%xB4w}5>GJ!Q94J)EorDSz-`msK8{nF+>P{GtmmJs_BsI^$)9Zx0h10Z+4Axwd z9rvum-w0z6^{ve{nR&4h@91Ja&~|w^hpzRE`qt@oNi{Nn$DF_&vr(60;`ek0WXXum zX*V!C_yYVEk&P=So@!HHIIHynpIP7~YbI_w$&b-@S^z8Yctz0Y!qN*R1#u+m7xxZ* z;5>hE}p3d7oip(iUx&;Jgr``F#ND zQDJ@I5T8P+@)E5q(+Q81U+we1d{;GW2p!KYBT#F>3QJU@-Vc8A)OZ;$Z)&^< zHPYx~L0c>SjJ8|+naFjg^JP=l)jiPM`E`XNmrAr8Jx7<#=Elej5brbZm<X$ZbBq z$c)O@Fu^;IG3?Ugits@7|_|QnO3~rv9&BqBa z+!W4;h$@igKxf9<$wdVZChKBNgVn-Lzq?A{}<(geC@S? zQy+?{VW8%45S70ynM2bA49JGTnBJzzwkR&m-3v>j0y~~cdWSki$2Et*s9KT;2~?CZ zXv?q~WYiM@2Uhn8XmCMbwuO?9%cF`xb4y23@sDo#P@_!I|UxH&0)ozS{J+N0rmw zwxKH(fj>KHRwQSSd&$l@Tl160gt}bKrV|pdP>OvuQ1Qc4={I)4ojx{*!x7kt)#7TP zh-5}hner6si(SyFryPE=)AHSmzV=qBo4!Ote?PO4Iy+^KXps;XBjdQO}P=CO=C>MG+N@OPTRPUz4{9-vK z2stRroH(&csMnbc(MNU^jkDKS?E?WvzDD^&b4mFaqGaxB!S;-kED%x9un*!T`imIP z+1&`a*mBUq7TcL7a)XGOQI=N09I{8FazkgG+Fj?Z@^}Ph5Wi=~hoDnCe3rdFI6ioJ z0#K$W&HV*fwK{8wESez>3@6ELG=NVUlX`!Umhz>M}_S<*;H?mBk}q1~HTSbqPHw2>3z*Pb!m5{a21?;8;l>@XQ)@H_mwS(#pk#vX9H^hU{|I*_q@eyKXImN?0G!w32o9_2XKt*Ry$^m3IB*whf#AE5PWMMS!P| zegWwBkq?K6Xnw%1StRY6I!^Tj#7_-TT591VsfIDb> z=f~Wl!Q3X|}o ze0hZt`}H()pIy`MCSoQY<7hzHOA#+a2+8BPDR5q(dFK1}2a15f z&{m-Z50W5~k91KN1x$!If0)^Jt^{E`1Kzn@S3bfB@}HNb83b7LDGMUpRpHbm`Ni~| zhZ+lm@M795q<$J3Uzh>`3j)!~&5a&=f%Ks=d9QQK9cT{cWE-zqLAS4LN_N{-U^Pm< zVQel1{xF?ej9U!p)3ehY$rsuym4|Ss7Pp^z0qfUiwxt#5!`a^4YElaa-!%YkDXv6x z1z2S)rlRkw%#1q6guX(83gA6E!S^Cq9Ieap#88OSAT z%Y&4cS?MEg`bZ*HrM?H=9y&#=twPo%ziKeqM8_3$l({Dxaq2I=L{I@f?aI#jJpHTb z;st1afSwkkx-jAqXyn0c_!l(Bb&^}VxCVF(SQ{nuf!A53T>O(?_zKg6I(Y&8TzJcO zRuILeE!t|_hHLg)T}SDTIJxo5`rQtzTa9g5M>ExS?kU5i_^pG z`O$Iq;??uRo7KS0^eb>jSx%=39VBZvCF2YM4-N-oECip-P3(>#< z$RG9^UDFwp2w&0Dg~!}(v($$K78r1yDdY%HpNi2l@bgp$w)3`^WWLuIFa=ak=bZHOZm{&~<5;P7HBorSnmsg6yXcsn&2tx!?KcTzL;9kqsq(|p)!@Bj5F9jOrjo)QU z1OK|{e($KA(T$`DYv=_g=%el%DYRon;$wyAsp6r5La_&}o;p@;kWGPW7}WdEXL3^9 zW;IysaB+3+>$;j;RUwdCq(g}z&P58>`&9C0kIkULFLmWvv)!w`ww4wkB^Zv)Oa}A-lcESk2%JdYo*R15^+9IiBqE%% zt`F`Pj4BhAncrT&J~YKy5j>iL+F^zYFOLvXULkm3rk842qE<(s5vqvOAy%}H;aPMc zl@n*?A3CIZRc{jxxNcgo0@hj?+3Q}v>pUb_=gE*PBj4W0vmBcvnjJTb5LyUQ0xFzP zMF}?os!RpRhETL_^7r(drcq#hLDmvVujJ=f zjloA&4u@j3*l3PVt<;Q~U!Bg!z7i6sl@{6OjJx6B5I78QfOS#)y{+0AZqen@({DJJ zD|BsS7ei-LajJ1_3$?}OGl3Wll>h1@4edkgtV2AN?Ne+Hzq$0c-fZ2s zX8W?O-Tu<{sfdFdgRF=UhI+67O9I1Uty3mPwl-FHUOOGAMr}SEA9o_a%jDNR5YcX{ zeIM@hW+~8uOAqQhm3>(v9$yE^)R&v)v_f14$Kh35CU-&_izVu&bi? z59JPH;l)PZPMohEg&whVOH8jy*eop_0Xuz*k=a9k44fpx)?Jjyd0&$FlqfN?CR24c zxdvTF5hw?PHp`I%DaIsq5;ToSB;uhH_~D2tV|4;K8xQB3_wZ=*H?T9h^yz6W&3YvN zw%7QI{M*uxOT`N}z~XFac{Utd{)O}~yw z*Au+HF1?<<5q%bgi8XEETQwcSCp!fEs&YU5ulMbC{lkqdcyQ@vI^Q0f4!K z?GFJFwlX^aj(4}gJC6wWI-Y3UzJ=LZ(}t(+-fDV1>U$DPh#ekzs(wWH#uQmhV~=+u>bV+kNZ3A zZUC?j7o@#P4uv z34iq4J#hrJaB2R|th{8?0bGd=jg|aggqUF~RJ9?Et87q-+%=fYl$@|j!U96vAOdI* zPjCDgyb;|pv$Vw-Sn!4$xgZ&rkIapYV{v@}QFYKuf1{F;$^H?q5Arg?T?pPm$@7VX z<*Ujhf~>x}c@_H|dM6F=+qoO7x8M*ghFcy~WPF<=&DSi0RgX{Up{+|W{rzHT@_hlo zoN9Gl#w|(rPC%c0$}GwS#ser!L~#{MZN3=8gV{6SQcmB4)I2@?Z=^Apu)4MJ@e)bf z(nFgoo>PlvG6+*7^RO=jcos4HR0g%TUV~6m23763*qy|J@3Fv3K%!C?JA2vKe-zBUn90mjW2B z480maCy)Hsu`SR6!Gd!V^dEPl-ZEz^R3*TcxRR|PPsv&tkmj#dpSXT(25J8Azn%#i zrzn7S*R$tD_120B41ZvqHKj_i$xilTqT9uMnvb?MP@OLoU4WesR@IG^2ELmbt~w6=SrB)k4o>l{yJDBF*w}T~fp{SZZuspt zFlaVDasS$>Y^|QahArB#p~E>_;eLTL=Mvt0_T8!?%0GR-hLvr+V+9H(wqcw0t6XBh zD@Au*^QPTo&)${Ac$5)yX2F}ZAKP!UhuK`bdK;5f7Vp_|)ozw)-jp7DQ>?#y-aQ_l z>gcbEp~B}p>|f_>C^UsnKO13X8=Z~&R=4(`)xGkG`*RTrfh&ICg0EfgRl;!r(gTc1 zR~5+=x9Bir(eF4JfK5V# zh$hKPbLi;~%$%V1pfLekt+-Q3Q4gPvi=6NrFUm!jn0$rzx2nSCETM#76-Qt|ptJ|1 zLg@&>d?jfnbaeRxhrtk;m3mx8LpDLbGyR5)QsKR1wj&!!o1>jn!XoTbBqk!3A*rGN ze#*p+^hEwwPYvYfl{se6v454I={DOS^gM+QP~mY)(WU{1bh^Ztg>cg9Bx7}d{al^* zT-Hd3n|ugrT0(yp_Jj9=J^EGB@wQOU^NEVOFSK;Pae-*XqH971CutApmx0nOhRGF_ z)l3};kCOUUCaSCQ@H#4|@&Zk8F;{pZ+&8ELE@VsbyU;(69t%`0l6oGsVorn9vsDA1gK2t5jfU4Ro|Gx&KZc7)hX>WVm5Lri zYEVsL5;50mr>o8wJ35Ucc;s%{x1M^_L$He>}{NUY{7D>HPvco?M0Afe&s5sTtH`i>a!-R&g)UHEOaY z{@yF}82etdFSwTSNCC1uQCM-}^x6w=jc`qqggredu1SCj&1UM=h?w3<-a*EODZ``f zL(1+llKmL*`U0|L->dnl1+|`?x3rkbD4ljlXXmk}d`C|WvpacL&dp5-o_g)m=eQ)_ z9UnYBeE#DP*~#}mo<2Kz^9o(5+xKu)Z`+m#h`1dRW!({fXyH>>_F8rO;T6G29_Mb{ zkP=l+E;UfpM<8UIWjKQ1t$ z@u~7u|EPY42X$T4J1_2!6Uc8mNmg!`Wb0q6t#*e+K?ZugUB#2KwQl_1&CUIL zxg5&^##pVokZNdI`95H&%r)G8MBs6z+@fKRL(^VQ`@y<(y>VH)8X^x*O+%Prs+bJR zqUxLhMLG|Ouk)PqvSF7az<^rZb!^l>5NPdZ7f>78MCvLtBlPwc$!cCCJ`8*Ryo;QgqI=REe(O~@n!k6NElN4IYKX(Unhd@}9;S$lE9e~IqBHjsh#e(bEn zlWv?6wP9||TZ#hI3x2p)P?^j+roWA^XV;$zPi}#Y$(RF#q_Ujc-#PB{EsWPUk17jR zj%E1vBegIs^b2~`46n=LGR|HBx{Wsq46Rhqrod#Oc6UVle=PX)zkr-i;!(LI(Lb2|9B8VE@8iW;uE~N4S$nL66zFDe?&pL_^?@%6DOx)riAw zAa4x~>(*)LSdVM2atqn zR0Wua7x|E(kxp5GPG_*Px$n>(9@I1>TqT?KC{U0Q>J93^%!!3;$SbJPnpYdyFvHBl zrU1j3#3Y#4nwH9661%$%N59benrf}WM&-d{I{V9q@UcgReXWg`O|b8Ggfug{M7w8d z4;Xj-JlrrYFl}URo;%5VY5RlvL=3OyOq9;x7StrUe^p_!IQhdLkZ-VS0Co^gWKbUj zBU64f%v&bjj`_IOM+$4#o})okk|rELIAKeZQz6FRUX|}O9x;OI;zYQi2bIZ0ct5W` zv5`&M4V5d{s@n&#D{nH*kZO5v>gY$k&?$vL*4hb{duH|uiF z-kt59uh#KC03g=Ml!$}u4Ola>MD)Z>6{0~{&N0PD6c?#Mb*VC>ScnHY8iD1ZHo2G< z7*eh1Re(hgDvH0$sbnN0@aJA=ebYqbJNr14Pulk6x5b+4(SlV<;8lwgQ$j<1XR5*T zh#Ki1q7n*;htQ0&dtOsHDN1qpRALTU7=rzyS4|V7a>zRO-h_;4(MP% z_Qo9jqcV5i0zJ%zkYHL&K?5~DEa2pNSik*iU6YxCMDptj#PD4 zO6IeN$UjTKxTk;&rhM4ilt`!z($Lvb9=Kff2zY?u7s#k{Bo^yq3D{pJiP1Y9|{eG;Ahy7GI!|)yQE1l>||wl z8RFSAGw{Gp6Xp`|1|-zwgiP}PUxC+4DXnO%YFr8H^0`uV$QNwbOx zq#-(kAb$;IySggoetkE~@uI|>9@NPS6Xwl%Y9JXhWFaoz6$moiO9qw*4K(^T*2R!4 zmpXCSv=kf$IA&s zjimKGa#PH8I*tdF!D<}5=<;Y>7L*!1Q5g-*l3igSSbUG{kzAuGNhhRNluB1|MoEW* zxDM&JZqUWK+g7%9XvXE58t0H_pcpx@ECkZRZz>(u1IpXuNDLOmtXDrT%SD0#P?V8b z!)3!bni=W z@R~+quG{0>9GtYRRCRpr>Lca{dWKft_cGnx#+5=SlAGD$PGZw^cp79Dn%qH7do9?J zx;+cX1h6MSYsgMYi7ulB!wKaq2$q1xA!!B%R<6wZ>*+59hb)DM@{wa{8Em&Ish zCq9TkR#_p>8Lr^9?M!_$Aw%hT(pe$xV`QIuNOAYEq;DV>sTpaMh+M7%%X#lNc~R6@ z^j7O5<)XS&6xJP+dUU?(Mk3K9=}3^Nbg|WHq4$0xke~MePwUp!)_0Ke-6BOY3YEEz z1uHVZD8#5v7t`rI+{f&ll+yW#Aq_c2kYqF%#4^cR-}u3CaW$s}mnW1pN@v)mE_hfb zuKz2cKIuh;C=%d_gm8?^w+lGa%C2wTd5LTd+&y*Ey$w_F+R!J0!|?}OVZM~0s+5!U z!c~TzaQEp#IKY+7V=}srYQ~!|zYSPszk*ayTq*e-IaoP{pxHyN9j>6)ds_kQo*$DAK7R&K>agr4x=zcR90uGqL+IGqJmVCO&E+!ft=sB&?vM8>g{H#`o{#tby!8 z_-UuBFm3;->F-CmKQQq<6y4h(EIn{q9dP`{@GH<-U;rj%P305+Y0eWZ@Xw?GT^NA1 zsr#-Vf*hU+J4xY>@~blwnCiOG2rvQGoMy4sI$BT7ajrOizZk;q^7}XmVi}cDqv(|V z-^1gh4E(IlHEzAvpUjKojCsGRoM-)bRA0-%{?UqXL6KRqAQv4KYKiqjdi+&?UH3Yw zFOMFn27RX9gPMTUQH9QS*yPKig!{-YaG7VNzu2a>agg=~Af_>2t(Ed142l*>-~~5W z(4K}XtomnGl5~Ua!yLm`Np>tnn7m*Xs0>sST;9ufLHJi0Wk~~lHC9*q6-UYTUC?ms zz*e$g`sQrGiIVS~*zo|G#tX9|SjlsR z|27g7r9l1>^QlnW9Q|9;Ax%Z3Hybi79wCu;KZ149&93HNbjc_YQF&=5qb9X(0C!~l zRL9dRvjNAMAs=35W3BNfU`DL6sd|BNo-pYDnODK9qgUC<>Cx-gFJ66@oxJ$&)xi%$ z+gz3a|Li;bzmCrGIO-*hE62x;{^=2D(nZ??M!}IwzJk^|SBvg_X~N2d`dv|lh%ZF= z)2ES(&7D{r@zCDo(E~UpjIYPz}|-p^+QOY^G?(8w*4OW+Dl~Mv{PZFrqjXkE0eF1-B#-}xUN4)zP$r`Hv*nYvT0Sizf1la@<{nH zP!0*f*+fJB0}9tbV=iMPPA@M(Jrp9r(m$f%u+7EY8JnD|^|`D$3vb58#FtU2S0fU7WQ0 z92UGV8xF?W>v_`?vNDW|_r8r4qtLL6;u5l$Gr|x=-!10l1^Tkt7{yge@fVjWGh0;@ zHx7m$QSi84aPPvYv&wAbT!IbV#6ydp*XQ*g;Rrd#`D zdE*+Q#O&NxtO(t+G}5(M$I)^~lz{MT;RUSm1g`m`nc-ytM}Ps7!FP*XK$uO)D#J3c z*Ml$&I1y85U5(lFeww0^6Y^p{J%&KS08sFQ(Ar#9?$?Lnt^;BM!MI(C<95Iv^@x&$YHMy9M?rg>eiheTFQ7$BNp}1Yz zH^jgq5PXsV9WfdA6&NJ(or}_WO(@ut-weCy753 zQZz6<7zAmKBEzdCi>09mDO}GW^X@$c2lL_`yUd;lkIlIjBQHu0RlFMbWSDcq$>BTV z(Pc)DyHC9-5dG>WmweA_gJpEtKJJ0cypjdfQ|ni!UaBJ!tp<&0*Bs1rcO{bx0hp14 zCogCYez(P_T~EEMIx84~s)NkA699cCNG)n$sr;q@D&;2^P1Ud*vJ59-WjbF%*tmF| z*Lnb=fbEUje_Sl2m#P~z9^ond@Z5aoGO{60=lSq0BtPJ^j)>VNKFXZdEn!!8z7NoI zvx%hj#L18cyg^q|pC&4L!Bv53g+5tec8Ka$6?4X0xY4;6>KALOtcK?7Yar@64uB!% zW7E_&qUdg$o2t0N@pAE3N2iCKYFu0thQH|UQzSrPE)d-KI#x0UphIcCSj_kD1+q{#h5z!wQ1Dsu%LfkOtC&&nVwr#Wup)Dip)E0j;cT+r z7K2GLn?_|I&kDFt{EK?SAyLg&Yry*W9wDkqZ=@L^?A4dwd*_N!Eu!Vz5pta^Ch@+y zp}tq=H#7gk|7RQSn$0?eL+x%fhnBD*d}CE(0F6MAt8^!ZEX|7w^bO2GTZ~AX?SZ_t z9AgNXMFfnv;zSj?C|$@dt5}r`o|#6g!a2;uiJxFn5~w(1aPpTC)xh6azSQK%95ODW ztvbkLTA)ntiI-7E{kj(6M?`G(T6YDTWqGTFnX5xe#^QQKsiZNL8uV(_sgxl)Nyuyf zfPxzJurq+o>$8}vYBT(q&wbi1DS%M-l&7oDomEeq)`36iN~O4#%8UV4W|6La-yN?#yA|<5=8gT= zJ-Y+kXua10H3}b2+130nuSE#UYr$Bn6w=>R1x-a2OhhZvKrL5a(+sbwcz^wzES9se zI|Y#e)F(r;o$FrcIk}Dp7`}nyBKc%-$73oh*?amPQac4CEXmQU_d!6x%t7aMPJ{Hk z{svbBK`5N;6`*q^;Tksd1;G=jbQ=^iwmj$Nq4bgVSF0O#2 z?#K5Q==h|*=Mi_y!iiE|t&g|Piq40}E^_8#$0g)c-k57q;%!*N+TlmSz0fyy1lFop zr1Pc=66B~3Q?sxYPCUO*`@I=Y9xyb_8elMzZ&mb?)4DKIvg3fEv%f1AXbyLe5fQ`~ zGfy+QO+DQ zv7OznKeSIC91jm@{zr~Z-&hZlH-@LcV9TVlmpp79H^OCNBL(G8DBfS^hQctzZR?9K zG*Ce3x7EF+K{8APW$dk-d9$VVFq)71`xwTFF{Js}C55D+N>BHbi(NBK=Dmx@uE`gd z4c!D`B`@I`yPdi!a#KaV=Tf3VXLk3KGy479f3RmBP@Ct;_Ma0ltlX#F>v>UxEt~(V z&ch9ARp(&xsrK*DwfwQ^+HUaRG4vVd$o4t?r{3aC{f_)vb2LkmwdbIvvpvGidXrag zkqgG4O{M2^5`}#l8ZlKd$ODPPzzQ)TursGaoJ}?^&4f3Uc9EvBwvr7E zQz2M02$|u%*>1EB>Ahzcc7~lIo;deL^fZ7q*GzjcfwVS}Ix&kChS_)aMC>DdTI05} z3F+nLtVt}&bg`U6=E=-DGigS64e#eHFMcHv2h9yF86s`NsW-!ZS3#qJJHL~i2KJmF8U#{JjN+VY$a zft8mcg$dw}N(5G>aH%41uamYKxWquD8CVXD%rqjH*SPt}%75ajbA|U=bn9R8k*n@g zuGqLjBDU)2XwF%&+Y#x#;c|fQ`XcDn8CJJyFR+d(3B&l2p%%&m7&C?<%`o#Tz6lwD zfwvf5=aZ|#a^5wxGd>WgU^>SxRsrlQd5MX>;wP1|)gn-TMuS8RARFMy^M^hl!u zRO-{JaH`V^1gs)1gLfl&c44{=vo)Q9L+}E;J*Jm->&jZ#Jyvs4--q?-sSNW{M6I+` zFG;mc$ITN;^G*4&0U2ir;3ldXtj7z}gg{MNdxZ_8?yKbB^$VwFP$}y0oz6o{yR7oh zogx&}ETJURw1@3TUD7#@1H`vIDFQCsA1AY%(4<98x4APP3 zM?lQr(nL2)OmT(@G$__UsGY+&VnPz}v{QK#>rW&_V#LT)zZ1RLQPvz*bS==&8~#zh zdQn0_Xb_bMmRo<7p2^%db5Fuc81lugR(Nq)E$(#=UTMi$*r{H^lIn>3*680Vg?pgo zvt4-qsJjRkAQYcE*is+=D=6FqyjU3Kq z$*}(ZJvgoPKChm}Gvd^3J7&jtZmtjWzed~41`fw|_dF{W2qZ0ceFHXAX%5&Bv8&}t zgY7H+eh-cFbklZ~79}`{$jXWTI3CIr`9*U06;Eqm;Y`V&K7Ip|u;1$$efw~zzK z1Gp%HZUhE9pTHi!nU2cKyOsj_W{+M0=HeN?MYqvp1{^rij$k5SBAd38qwPN5@X_R* zPn*#5*e>6gCDH7K-S@8nUr!V-#Kw+{q&Pt+!l~0gC}X=a=6Ug*9_2!6r4;M|9|bOE zBh%YEMtyNPIlI6UwtQ}g3lW5BJ-|ZKZzkA=xqmFW8r<~R6^6H99rztV|ImaWk21s7 zYRzD+{hm@}HRQ_$uD;)?zZt%vK)$RurG=Vz(uDXckocX*7>p=3QQfn8lQl@W-b6T< z+?%qhFgXZhOYp8`nfL9m`%XxK-6t_}w2}*-x^1w>awSfYCo|~g7lj_86Km%A1Q-K{ zI6`|(WCAt|VW~eSJ1vis^S9{2NI;#lvz?wl1?K>}hClC^j}Ma$)&*+VuBxrTxIAPM z^akvjchRWtb?$6;|2%lvZfVyc^Q0sDMbyi&mr_hGga=IbBrtY*ueb#>T*)GHEeY;l z7_b7xKsL>+P~WL7wIHI{I$EJ7&NgO`cWbS5U?nm8vNEyO#DN-N(a$^KIrSI%BZM?{ zf<4}TwOHB6pu_+e{jUym(UR;Fc#CyFhFXZr0T@h99rvyC%;wzD6!#Ow4RIjDf+!I)zegn~*s31vr&J<6DZb`naG z0vUOj1LLa78}MudjulwXNwu7-U5dI*&>9k6Gy8iGje?ja$f2GVX^-nNJGnqF6!fy3 zzXQ$kw|A(6BqkVFWaRI2zARB;4V~XhE-G!&2<5$4Pga7%UqLybhq8_bTyGIz)&v z;qjFVmob-g#y8HFhKa68?Z^(APgJ=&+s1ES+KS?SeFVT_am?S)9|< zYC^-_oS8152F_Ng2Jlrs;TORp*ZuxyL&}d{9cJIX`0n7_|2aKe8%g`i2XXbmDWq;2 zvWd9rEOfFRZ!x`C8Xms9DHql&Qy~_DWL|QlEGScislZFBx`FWSISmGK&n86gnKG+t zQ1DB>E8i89{l87xx~DdvCI&U%OWLM*`qQ+Wbn40L+sXEJ@?XW>K8Es~Kri++lqKXf z>(<`V8PCGyk!|3;bl!E)Az+DuGJweg;q9g{{nMk(g+^Q(YBGY12M7QQ?~omNK;^~! z@6$f1>l#0#-@nH1Hhc(FXZ%}Fha1AsIS2QAo}PuVfI;)Zivt&v53_{LT!I zv0)>Ch%mDOV-QPMDK`fM!GcLj1~zPVKrVr1CsAxY=WR$JM+nArhS1CM;=Wf2QUnT_ zqk69MiX2V2`m_~Uqy6LnyG$mp1-mBZoL*--zheZ9A>{=|g%{5O0(C9&8%R@9EidE^ z$Q7fiqV`M?$XGpsMitXn`_dI$MqF8FeOd1+eu^!2zC~B)PUG3 z_SB;)(TN0Ti!2)N+8U;UnE3^(^h0RvwxEPF@Yc1&|x~8Ccyf0 z2Qk+y?`X^9VCd`=q7=+Tn)g*zmxjw3=?S3ysJ4{ZA%@8)*d7o$GW*H5)2X=(A-xvh zd(W4}peKFUM$%L^t2*vC%mVL!nE`LfY^NR2 z_5;%&74<$4Cypu^DA>j0<>V=TGX1Wm%6hUygR!Z^p5v5W!JSAS3BRmXRmWy^@4J`n zZ+IJ2pmx^XlWX0*U^77U?h)k6SvLH9gLI*&zVP+QkIhH{TZWv0TRPXZau?X!`TF0m zBhRJQKkV%7JbAqPhh*osn1H3Zb>~U)hkQ007c0t{Vwaa|25J86 zzyBnENY%6KatQ)Mma(!l$uFwu7%;==d#j+aYPtT*-#Oya!jy<;_h#2isr;AfkrR>uNk29k2QP zhlB4Z)OMQKqH zHK4!ks?O%%Y^!Xq`FkkQT%a=i4EZ)mZi1kn6W~_H_vt3R;d)suMx)}L?t}TIu?LMw z2ew|Q#*HYCkVXVHP>i-jxOJ^`)xG1zoU#YQfB*2RSnv(*doQRbE{^E>ZdT0kCN0dp zRuKmb93iozrD2QA^55q3 zkvEXb%Pcqa;vEJYfxpB3G|Gp+@0Rz|J>QsyLDXAFHRIdf*%cj{le7{1g^6SBRb6Yld~<}QCntOdDFm0+8p$w`-F?Q72&+Lyk&yIyc$#&&h6H@ zbxy4H*cy2-^c$uZK_M>3`BlXv+P)keSwjU-wqswg@y}rkY9)EmSfxrCOL@9n8w=nj z`7P8LUGYy?1^(8JG={yQw_KkTV6LIXR}eOy9?c5fZFF##*70XFRQB<6I-Y{-f{pN7 z(eY^4ZGEV{>M9(m72O2`KNy!Zl||AC1u^v9`oie`{_7DUgmF$XXRWKS-VDNo8NBL7b+bsZ)P1k zre1OhnA~8&?|%)#tC5&s@F>R#i^34c^QGik?39%S5ha@&T_J_uvTtuWVnOG}R4?{* zC4m%4_bq?Z4=)V=l`nR`Q6=Oji?BVP&Zf%+m*8*up;E~Ves@JK zO56ZkfWIjm+NLfm7R&7HjFMh)A@vJU8)lT8Qsd!68kKYTbkBTt$^O2ESsKc1G%7Dq zhYEN<%8ZtoJNR+ya1LP z`3;ep3>$&Yy71HEz0@Zl`}Dh=?gZ91_322o4Cb@2ET>_TE^=J?var!yuma3FE;P^% zVdYhZ(Z4a1TM%-WEF`j*BH*Pp;$-{c#1ad1&08Dqi#*{v9Hg^=CQ^mT*|wr^s)h5^ zu!Uxa7v`V=YgG_bFYUHy$IK?%yjwKPc(1U?O<|7a4r5e+^k*Ac)iItu=@;+zsFZhFR}U?y=R8vJ+mMx0+qS2KQd>C`#t8ON4@u12>pFnyS0S_L(qo0_Od2*a= zSg{=ONVUz|zUX?jy^Cvz)*XDT+EKKlVdPenb%%>nE+WiHj`RIi+Osl=Wxz4+yj=J{ z($3~b^d%@@wwF9MAJs!gz~H(2=86ba=nH-DADMCRBlF2UVlqdt3-HN*WGPtRBXX_~ z?{Gw7#ZT^$-N}e}#-H6|-g)3pNuS&!yP6OQAfMf1wR+%i;lBBb5j0%YFd`W-9S_qr zv+%6)z*E9|6Tk*n-RRAiDk_PYckLyPkL7eUSvf-Fi7Wa*zoAQVcpI7t6@*CB;Tspj zXNUr9eRvJM;`m2XRrYiIVzr!5^A)MPy{}mcsmzUJ-|x~YKeG}InH#~2 zOGe2yWRMz=cq}PnH`ltUvh(HDZeKqujihX3R1jNt%yuNwiEb=EK~;21z5SC8K#sl) zTw^j4D_p#m$N-Tj&!$yL2>ke(b&|#`D?s4w7V*!}im?Og4<{XJnghSCU@W+|wuzje z_1}#`9J4V&16|KQZ7;m5h4f?NJC8bP!gR@gvfv`bEtFIB@(JC_s2OX#c#(E&a#UQs zbO`uVAj+;Cv_UiP*AH8Y5#ZLD5dT;606 zje^yb_BFXo`Nv*M%S2pvUwu}7oQs8d30yU|E=r|R#;R4+5Cl|jX4SO|Y~r92g=U)Fo%-e4W6UzUIDIcNo~#c}JDMie@M)-r{@XEps-w1qNqYEG%2do3U!JqwL+(&O07|>sFIqif?^u=DSk45 zguLxL9!F_i;ZE-h%cEc5JGvj^f{^`z|ERP>mJPC>+{1t;^1phm8ZHU{+x3pR=)T+# zb=bz@G8NM(0qS?Q#9Dw0)HekVemp(Ojt_r4IkZYu0wk5p==cTDCvaHwbv&KEUCy!v z)U4Pxn>G??nQ!3jJbWm9?s`&(%%Najx%hu8U^p76?TNOCK8mJ{{;K$TyGwwvw_jA| zmTU$-RwGDjF31#UlDj3z%W@7hqet1RyK;0FvGo`o3)e zY26|=$vXM-4goj2>Dc2xp}U4@dxySQ4lzF0SB=-*26++qzDbeKK?U;@NzC?EGv&qJ zSwkIaym15G5VaUMtI6hSisYQlj4DlJh8MaP<9BjtL0~fG!8y8xNwl&;4*CUKM@D zWhFCOjR5K8UVFVR46&F4$<&bd;pMkFGJOI-3_^J|I0_n=nfj=^&t%xhLmTw`<7GNj z-xaq`Mg#$jZ3!08Qn1%p)Zf=KT3kQ1^P?d!j(0=5G@C>gO=Izpok6AyK~i0o006}fN_gU56qsr zif+`{YB53-k<~t%lkwthiewLc zy|`ikM3o;~+PRZ8IMaFdHopSMM}7kyX7(Y!aZye$iuppl#J|W}{8hau8uGv7FQ=pB z*k-PaM~d#+_y+E@`LNEk@a6vm)s*cfnj&*h3ObEoyL5BEc-&5`z9;`wpsP9UiZON}&=U!nW zMcx*3zbJg%IzQW@XTb}cr>ly;!cT*DM$@vPym=CoU6hM)(NLP61uwM1c+bvIvgDcC zdsJZ;rt0tIOrBNvu;QmxcfaPrhs{@*ZUFqWqOlcrxcf3K@feJ)pYzI;IXkaC{m`oY zWXjUwHnv)hOl4+cG&LqGy>GWdLrGrXeclBPrC^QraW3P}aSBZd{I|Es5Y^nC;oO|3 zl&hg5gRe*O+5;j*RVCee6L4<6>LchO9ZR62_Q1wZr-12s6qgh^@Ebch+V4^Z z@rz(yDFIr%8&DWuwa$bbHJCA3CDzAB@-tMPHzs z=umxKuL&N=hOPJmiZ>!PHVm|OX{j;ek9>Pb&1fBdZGP*hSM(6{#neyQFvhmu^-=WH zZY?K(dcB9VUZ`1 z1k^OxlV`iVy-nU=&i!l)TtN|37#p?UR=4Z#;1B1~>7@ZwB-1Gd-`ZG6B6KXG$S^=^kH=oT`zQ& zzpQ(S1ifa-$8D2_RnhBuzD8bYEI~YhmD-lY(@dRQCca7JsQBtn0ZG((lD@Wrx zPg)THpBK9(rc1y1hwl1Uu{Tx*o0c2A!X1;BNFZ+oH^5{;r$TAL;=QV7;K9oW$dq^o z_pK54oY;?*1r2O2;}G4y&7ia;&z^{Q_H%w-7f>Id-5A;iIz+B|FOUCn5UU)l&)PMA`4l`qpo^c2N%#U`@ z8>$PsD!bBjETTrZ+wCQHdukG>)t>i#d$>?)2R*@Y#2J@vc0uG{HIX6I)>31LKux z+J(ECsk%aA(iOTHG}MTg0-ddwP&j!zncj|zk@+P1a;w`~H6d{Pc;jy+TZF}i$LZ{D zcmHnhtKG=Pud_`nYL&|BGiIT7Z!sD3OnpK50^02XfLMViJK?p=mqq8}U z__myWdw6`hf=rmoV^kux=kO&n?&4;)xTA=jt@Ud|0|_Q;_0ry#3Vkdk`ek0 zs`M*Srh8GRPohv8Q>kBvQhf@wYS_P<60FrbJ6?G+-HKk)Y~-Q?iP5!_u+pY;g)2H=#S}w>BwIW?$wVCuQ%Jsvlq{wA08jRI!z8< zoxC|bK3UzGIv8hRv$YKqoR;;xz1Rpt0m#%>Q(SNbzPdzn@T9IFCZ6bwqLkwDq^+SM z3nG1uqn89`0iAXy#xgG9^?I~Y?Ty-}(&ae8aOih;r)f37Bfl=`qW#t5wnxZ=B zA%q12xooA6MCe@@5i)Ry!swUD7oF0tqf_E^0cJU6IU+C{c1ylFesOvzL?U_dDtZ3m zhr<(Sf6Lc4i7x@KYopF-r6<7<)mqwMU$@M&omrR(XO+(@M@o7LeziJ^2)A^ZA|h#L zYkTYAzwLar^|wic2HHYgli6Wk@yB&?vh&TJ;6MKHkB@f%|MBa`Prms7{T6>QNJfC*HK(1g zP2L4XW@&r}&1g)umUD1C+qzyXX8VsGU5^(#dw)!eVUe1NO|ReXKQWW>i0p2*0-NjU z4v?(i^(-FrTCt{nEpg`ftbQ^!ug_%T;@-9_qLy0 z7NhO2pA7Tui;Loq+ZTV#cQ3vk?f&~^@ileu1A8-9v+1NN5CVW^GUX4Qt=C58=c&_l+N1SLAKmJ zeoU-uBH7YZ$$UIE^;gUBqWb*Y5O$oaZ(REXlVmR331496(i7b)$(GIS8pqRhULz-%&!^z%>(&wl(cHdDa z1~p^ORkQ7CFLWV?9E$h6y!l17nCC-aa%0+D9KoOWcH*j{i+Pd1jlI^EYTNNHx#!fQ zc=fT->o=n>nT7DD3(bY`vb30faW)lp8+v4>LO( zlei@DUr%XK}Ctv>|dGba6`+cnc>;L+0`^Bs0N578zZ+?61um9f8H&6cfMgIRS z{$3V~9Ekh&U&;A-KiNxnTdz?1b6cThpW-HIolt1}K12%w_XP~c5I528zo)J5r#Hp+ zEC<9)PzzN1j~-naQDu3N4yQMd?(#{#{eEfwp}n?^FSZVri)+Zj1&{g5c5#!JWAoGd z-9Kafbi@pehRUJ1KMo5>$xr^2@r%2kSX7`gn_aCR<=o#47 zT9JLU13fYe_I5PAowV+M{!fm6Jbrq30_Kok;r#D@z4uLU{&&Cr`tg_Z|6BYWA09k= zd6?deT9sk@X6XS30{H#xuB9}<5*-`-Y);`M_z7dhTMf$N)~ZXTwPwdfpW7_bciFg{ zysa7wE~bF}^sX2OB_6RuQ_~RTv4wMqKc1T7cEhm34Ml2!$_?cJ0_D<3o>SBPcnR?C zUsDNtmd5&n*_@gd6rLfEzg(2#D)v;8_+%H$ay%;Lu`=XO)>uWuT+%ZmXOus$Xg>@R zZzy93Y;zru22SNys~7kT$*%ri}`M&w$9Ew-)&oG51M7!7l&}i{7YTNW=fO^Z!{zpmqA6-9IY*|IQ!b{68^|znuTy z;Ln1ynN?p*r{k*Ca&9GCRyzbH$~n8?UX%THJe?R4;dG9MQe0kRz&AjGxe8PKNZ7aF zqSD`TdyXAu7